text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
>>> a
array([(1, 2.0), (1, 2.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([ 2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = np.rec.array(a)
>>> ar.x
array([1, 1])
>>> ar.y
array([ 2., 2.])
"""
from __future__ import division, absolute_import, print_function
import sys
import os
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import isfileobj, bytes, long
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i + 1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
class format_parser:
"""
Class to convert formats, names, titles description to a dtype.
After constructing the format_parser object, the dtype attribute is
the converted data-type:
``dtype = format_parser(formats, names, titles).dtype``
Attributes
----------
dtype : dtype
The converted data-type.
Parameters
----------
formats : str or list of str
The format description, either specified as a string with
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
a list of format description strings in the form
``['f8', 'i4', 'a5']``.
names : str or list/tuple of str
The field names, either specified as a comma-separated string in the
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
form ``['col1', 'col2', 'col3']``.
An empty list can be used, in that case default field names
('f0', 'f1', ...) are used.
titles : sequence
Sequence of title strings. An empty list can be used to leave titles
out.
aligned : bool, optional
If True, align the fields by padding as the C-compiler would.
Default is False.
byteorder : str, optional
If specified, all the fields will be changed to the
provided byte-order. Otherwise, the default byte-order is
used. For all available string specifiers, see `dtype.newbyteorder`.
See Also
--------
dtype, typename, sctype2char
Examples
--------
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),
(('T3', 'col3'), '|S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
will be used.
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])
>>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
self.dtype = self._descr
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [list, tuple]):
pass
elif isinstance(names, str):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None] * (self._nfields - len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
# manually set name and module so that this class's type shows up
# as numpy.record when printed
__name__ = 'record'
__module__ = 'numpy'
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.item())
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
#happens if field is Object type
return obj
if dt.fields:
return obj.view((self.__class__, obj.dtype.fields))
return obj
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self, attr, None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __getitem__(self, indx):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
if isinstance(obj, nt.void) and obj.dtype.fields:
return obj.view((self.__class__, obj.dtype.fields))
else:
# return a single element
return obj
def pprint(self):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
rows = []
fmt = '%% %ds: %%s' % maxlen
for name in names:
rows.append(fmt % (name, getattr(self, name)))
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analogous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of str, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of str, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : bool, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a record array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
>>> x
array([(1.0, 2), (3.0, 4)],
dtype=[('x', '<f8'), ('y', '<i4')])
>>> x['x']
array([ 1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([ 1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
# manually set name and module so that this class's type shows
# up as "numpy.recarray" when printed
__name__ = 'recarray'
__module__ = 'numpy'
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides, order=order)
return self
def __array_finalize__(self, obj):
if self.dtype.type is not record:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
def __getattribute__(self, attr):
# See if ndarray has this attr, and return it if so. (note that this
# means a field with the same name as an ndarray attr cannot be
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("recarray has no attribute %s" % attr)
obj = self.getfield(*res)
# At this point obj will always be a recarray, since (see
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
# non-structured, convert it to an ndarray. Then if obj is structured
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
if obj.dtype.fields:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(ndarray)
# Save the dictionary.
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except:
return ret
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = ndarray.__getitem__(self, indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.fields:
obj = obj.view(recarray)
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(type=ndarray)
else:
# return a single element
return obj
def __repr__(self):
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape == (0,):
lst = sb.array2string(self, separator=', ')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
if (self.dtype.type is record
or (not issubclass(self.dtype.type, nt.void))):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
# converts dtype to a numpy.record for us, convert back
# to non-record before printing
plain_dtype = self.dtype
if plain_dtype.type is record:
plain_dtype = sb.dtype((nt.void, plain_dtype))
lf = '\n'+' '*len("rec.array(")
return ('rec.array(%s, %sdtype=%s)' %
(lst, lf, plain_dtype))
else:
# otherwise represent it using np.array plus a view
# This should only happen if the user is playing
# strange games with dtypes.
lf = '\n'+' '*len("array(")
return ('array(%s, %sdtype=%s).view(numpy.recarray)' %
(lst, lf, str(self.dtype)))
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self, 'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self, 'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print r[1]
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = []
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError("item in the array list must be an ndarray.")
formats.append(obj.dtype.str)
formats = ','.join(formats)
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError("mismatch between the number of fields "
"and the number of arrays")
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = len(descr[k].shape)
testshape = obj.shape[:len(obj.shape) - nn]
if testshape != shape:
raise ValueError("array-shape mismatch in array %d" % k)
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
# shape must be 1-d if you use list of lists...
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print r[0]
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
array(['dbe', 'de'],
dtype='|S3')
>>> import pickle
>>> print pickle.loads(pickle.dumps(r))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
nfields = len(recList[0])
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[..., i].tolist()) for i in range(nfields)]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype=descr)
except TypeError: # list of lists instead of list of tuples
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError("Must have dtype= or formats=")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring) - offset) / itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object.
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print r[5]
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
shape[shape.index(-1)] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod * itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
(formats is None) and (dtype is None)):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names': names,
'titles': titles,
'aligned': aligned,
'byteorder': byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, bytes):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isfileobj(obj):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new.view(recarray)
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
return obj.view(recarray)
| valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/numpy/core/records.py | Python | gpl-2.0 | 29,422 | 0.001461 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import os
import signal
import socket
import time
import traceback
from datetime import datetime
from multiprocessing import Process
from os.path import abspath
from os.path import dirname
from os.path import expanduser
from os.path import join
from os.path import realpath
import mock
import pyotp
import requests
import tbselenium.common as cm
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.remote_connection import LOGGER
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from sqlalchemy.exc import IntegrityError
from tbselenium.tbdriver import TorBrowserDriver
import journalist_app
import source_app
import tests.utils.env as env
from db import db
from models import Journalist
from sdconfig import config
os.environ["SECUREDROP_ENV"] = "test"
FUNCTIONAL_TEST_DIR = abspath(dirname(__file__))
LOGFILE_PATH = abspath(join(FUNCTIONAL_TEST_DIR, "firefox.log"))
FILES_DIR = abspath(join(dirname(realpath(__file__)), "../..", "tests/files"))
FIREFOX_PATH = "/usr/bin/firefox/firefox"
TBB_PATH = abspath(join(expanduser("~"), ".local/tbb/tor-browser_en-US/"))
os.environ["TBB_PATH"] = TBB_PATH
TBBRC = join(TBB_PATH, "Browser/TorBrowser/Data/Tor/torrc")
LOGGER.setLevel(logging.WARNING)
# https://stackoverflow.com/a/34795883/837471
class alert_is_not_present(object):
""" Expect an alert to not be present."""
def __call__(self, driver):
try:
alert = driver.switch_to.alert
alert.text
return False
except NoAlertPresentException:
return True
class FunctionalTest(object):
use_firefox = False
driver = None
accept_languages = None
_firefox_driver = None
_torbrowser_driver = None
gpg = None
new_totp = None
timeout = 10
secret_message = "These documents outline a major government invasion of privacy."
def _unused_port(self):
s = socket.socket()
s.bind(("127.0.0.1", 0))
port = s.getsockname()[1]
s.close()
return port
def _create_torbrowser_driver(self):
logging.info("Creating TorBrowserDriver")
log_file = open(LOGFILE_PATH, "a")
log_file.write("\n\n[%s] Running Functional Tests\n" % str(datetime.now()))
log_file.flush()
# Don't use Tor when reading from localhost, and turn off private
# browsing. We need to turn off private browsing because we won't be
# able to access the browser's cookies in private browsing mode. Since
# we use session cookies in SD anyway (in private browsing mode all
# cookies are set as session cookies), this should not affect session
# lifetime.
pref_dict = {
"network.proxy.no_proxies_on": "127.0.0.1",
"browser.privatebrowsing.autostart": False,
}
if self.accept_languages is not None:
pref_dict["intl.accept_languages"] = self.accept_languages
self._torbrowser_driver = TorBrowserDriver(
TBB_PATH, tor_cfg=cm.USE_RUNNING_TOR, pref_dict=pref_dict, tbb_logfile_path=LOGFILE_PATH
)
logging.info("Created Tor Browser driver")
def _create_firefox_driver(self, profile=None):
logging.info("Creating Firefox driver")
if profile is None:
profile = webdriver.FirefoxProfile()
if self.accept_languages is not None:
profile.set_preference("intl.accept_languages", self.accept_languages)
profile.update_preferences()
self._firefox_driver = webdriver.Firefox(
firefox_binary=FIREFOX_PATH, firefox_profile=profile
)
self._firefox_driver.set_window_position(0, 0)
self._firefox_driver.set_window_size(1024, 768)
self._firefox_driver.implicitly_wait(self.timeout)
logging.info("Created Firefox driver")
def disable_javascript(self):
self.driver.profile.set_preference("javascript.enabled", False)
def enable_javascript(self):
self.driver.profile.set_preference("javascript.enabled", True)
def switch_to_firefox_driver(self):
self.driver = self._firefox_driver
def switch_to_torbrowser_driver(self):
self.driver = self._torbrowser_driver
def setup(self, session_expiration=30):
env.create_directories()
self.gpg = env.init_gpg()
self.__context = journalist_app.create_app(config).app_context()
self.__context.push()
# Patch the two-factor verification to avoid intermittent errors
self.patcher = mock.patch("models.Journalist.verify_token")
self.mock_journalist_verify_token = self.patcher.start()
self.mock_journalist_verify_token.return_value = True
self.patcher2 = mock.patch("source_app.main.get_entropy_estimate")
self.mock_get_entropy_estimate = self.patcher2.start()
self.mock_get_entropy_estimate.return_value = 8192
signal.signal(signal.SIGUSR1, lambda _, s: traceback.print_stack(s))
env.create_directories()
db.create_all()
# Add our test user
try:
valid_password = "correct horse battery staple profanity oil chewy"
user = Journalist(username="journalist", password=valid_password, is_admin=True)
user.otp_secret = "JHCOGO7VCER3EJ4L"
db.session.add(user)
db.session.commit()
except IntegrityError:
logging.error("Test user already added")
db.session.rollback()
# This user is required for our tests cases to login
self.admin_user = {
"name": "journalist",
"password": ("correct horse battery staple" " profanity oil chewy"),
"secret": "JHCOGO7VCER3EJ4L",
}
self.admin_user["totp"] = pyotp.TOTP(self.admin_user["secret"])
source_port = self._unused_port()
journalist_port = self._unused_port()
self.source_location = "http://127.0.0.1:%d" % source_port
self.journalist_location = "http://127.0.0.1:%d" % journalist_port
# Allow custom session expiration lengths
self.session_expiration = session_expiration
self.source_app = source_app.create_app(config)
self.journalist_app = journalist_app.create_app(config)
def start_source_server(app):
config.SESSION_EXPIRATION_MINUTES = self.session_expiration
app.run(port=source_port, debug=True, use_reloader=False, threaded=True)
def start_journalist_server(app):
app.run(port=journalist_port, debug=True, use_reloader=False, threaded=True)
self.source_process = Process(target=lambda: start_source_server(self.source_app))
self.journalist_process = Process(
target=lambda: start_journalist_server(self.journalist_app)
)
self.source_process.start()
self.journalist_process.start()
for tick in range(30):
try:
requests.get(self.source_location, timeout=1)
requests.get(self.journalist_location, timeout=1)
except Exception:
time.sleep(0.5)
else:
break
self._create_torbrowser_driver()
self._create_firefox_driver()
if self.use_firefox:
self.switch_to_firefox_driver()
else:
self.switch_to_torbrowser_driver()
# Polls the DOM to wait for elements. To read more about why
# this is necessary:
#
# http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html
#
# A value of 5 is known to not be enough in some cases, when
# the machine hosting the tests is slow, reason why it was
# raised to 10. Setting the value to 60 or more would surely
# cover even the slowest of machine. However it also means
# that a test failing to find the desired element in the DOM
# will only report failure after 60 seconds which is painful
# for quickly debuging.
#
self.driver.implicitly_wait(self.timeout)
def wait_for_source_key(self, source_name):
filesystem_id = self.source_app.crypto_util.hash_codename(source_name)
def key_available(filesystem_id):
assert self.source_app.crypto_util.getkey(filesystem_id)
self.wait_for(lambda: key_available(filesystem_id), timeout=60)
def teardown(self):
if self._torbrowser_driver:
self._torbrowser_driver.quit()
if self._firefox_driver:
self._firefox_driver.quit()
self.patcher.stop()
env.teardown()
self.source_process.terminate()
self.journalist_process.terminate()
self.__context.pop()
def create_new_totp(self, secret):
self.new_totp = pyotp.TOTP(secret)
def wait_for(self, function_with_assertion, timeout=None):
"""Polling wait for an arbitrary assertion."""
# Thanks to
# http://chimera.labs.oreilly.com/books/1234000000754/ch20.html#_a_common_selenium_problem_race_conditions
if timeout is None:
timeout = self.timeout
start_time = time.time()
while time.time() - start_time < timeout:
try:
return function_with_assertion()
except (AssertionError, WebDriverException):
time.sleep(0.1)
# one more try, which will raise any errors if they are outstanding
return function_with_assertion()
def safe_click_by_id(self, element_id):
WebDriverWait(self.driver, self.timeout).until(
expected_conditions.element_to_be_clickable((By.ID, element_id))
)
el = self.wait_for(lambda: self.driver.find_element_by_id(element_id))
el.location_once_scrolled_into_view
ActionChains(self.driver).move_to_element(el).click().perform()
def safe_click_by_css_selector(self, selector):
WebDriverWait(self.driver, self.timeout).until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, selector))
)
el = self.wait_for(lambda: self.driver.find_element_by_css_selector(selector))
el.location_once_scrolled_into_view
ActionChains(self.driver).move_to_element(el).click().perform()
def safe_click_all_by_css_selector(self, selector, root=None):
if root is None:
root = self.driver
els = self.wait_for(lambda: root.find_elements_by_css_selector(selector))
for el in els:
el.location_once_scrolled_into_view
self.wait_for(lambda: el.is_enabled() and el.is_displayed())
ActionChains(self.driver).move_to_element(el).click().perform()
def _alert_wait(self, timeout=None):
if timeout is None:
timeout = self.timeout
WebDriverWait(self.driver, timeout).until(
expected_conditions.alert_is_present(), "Timed out waiting for confirmation popup."
)
def _alert_accept(self):
self.driver.switch_to.alert.accept()
WebDriverWait(self.driver, self.timeout).until(
alert_is_not_present(), "Timed out waiting for confirmation popup to disappear."
)
def _alert_dismiss(self):
self.driver.switch_to.alert.dismiss()
WebDriverWait(self.driver, self.timeout).until(
alert_is_not_present(), "Timed out waiting for confirmation popup to disappear."
)
| ehartsuyker/securedrop | securedrop/tests/functional/functional_test.py | Python | agpl-3.0 | 11,846 | 0.001266 |
import time
import serial
ser = serial.Serial(port=29, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_TWO, timeout=1)
ser.isOpen()
connected=False
cash_reg = []
my_dict = []
reg = ['@r3', '@r1', '@r2', '@r4']
flag = 1
start_rec = 0
wrong_id = 0
start_count = 0
barcode_flag = 0
def handle_data(data):
print(data)
print 'start transmission'
while 1 :
for item in reg:
try:
send_pkg = item+'/'
ser.write(send_pkg)
print 'sending '+ send_pkg
while flag :
start_count += 1
buffer = ser.read() #blocking call
print 'received '+buffer
if start_rec == 1:
if buffer == item[1] :
barcode_flag = 1
if buffer == '/' :
#print 'end round'
flag = 0
break
if buffer == '@' :
start_rec = 1
if buffer == '0' :
if start_rec == 1:
start_rec = 0
wrong_id = 1
print 'wrong id'
if start_count == 5 :
start_count = 0
flag = 0
break
start_rec = 0
wrong_id = 0
flag = 1
start_count = 0
except SerialTimeoutException:
print 'Serial time out'
continue
| CG3002/Hardware-Bootloader-Timer | reg.py | Python | mit | 1,129 | 0.049601 |
#!/usr/bin/env python
import random
'''\
The computer will pick a number between 1 and 100. (You can choose any high
number you want.) The purpose of the game is to guess the number the computer
picked in as few guesses as possible.
source:http://openbookproject.net/pybiblio/practice/\
'''
high_or_low = {True: "Too high. Try again:",
False: "Too low. Try again: "}
def main():
choice = random.randrange(1, 100)
user_choice = -1
while user_choice != choice:
user_choice = int(input("Please enter your choice: "))
is_high = user_choice > choice
if user_choice == choice:
break
print(high_or_low[is_high])
print("You guessed {0} correctly".format(choice))
if __name__ == "__main__":
main()
| CompSoc-NUIG/python_tutorials_2013 | guess.py | Python | unlicense | 774 | 0.003876 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(764, 593)
MainWindow.setMinimumSize(QtCore.QSize(650, 500))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.mediaView = QtGui.QFrame(self.centralwidget)
self.mediaView.setGeometry(QtCore.QRect(0, 0, 461, 231))
self.mediaView.setStyleSheet(_fromUtf8(""))
self.mediaView.setFrameShape(QtGui.QFrame.StyledPanel)
self.mediaView.setFrameShadow(QtGui.QFrame.Raised)
self.mediaView.setObjectName(_fromUtf8("mediaView"))
self.subtitle = QtGui.QLabel(self.centralwidget)
self.subtitle.setGeometry(QtCore.QRect(250, 240, 261, 17))
font = QtGui.QFont()
font.setPointSize(12)
self.subtitle.setFont(font)
self.subtitle.setStyleSheet(_fromUtf8("color:white;"))
self.subtitle.setText(_fromUtf8(""))
self.subtitle.setObjectName(_fromUtf8("subtitle"))
self.controlView = QtGui.QWidget(self.centralwidget)
self.controlView.setGeometry(QtCore.QRect(30, 270, 661, 130))
self.controlView.setMinimumSize(QtCore.QSize(510, 130))
self.controlView.setMaximumSize(QtCore.QSize(16777215, 130))
self.controlView.setObjectName(_fromUtf8("controlView"))
self.verticalLayout = QtGui.QVBoxLayout(self.controlView)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout_8 = QtGui.QGridLayout()
self.gridLayout_8.setMargin(1)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.timeDone = QtGui.QLabel(self.controlView)
self.timeDone.setMinimumSize(QtCore.QSize(60, 0))
self.timeDone.setMaximumSize(QtCore.QSize(60, 16777215))
self.timeDone.setAlignment(QtCore.Qt.AlignCenter)
self.timeDone.setObjectName(_fromUtf8("timeDone"))
self.gridLayout_8.addWidget(self.timeDone, 0, 0, 1, 1)
self.seekBar = QtGui.QSlider(self.controlView)
self.seekBar.setMinimumSize(QtCore.QSize(365, 18))
self.seekBar.setMaximumSize(QtCore.QSize(16777215, 18))
self.seekBar.setOrientation(QtCore.Qt.Horizontal)
self.seekBar.setObjectName(_fromUtf8("seekBar"))
self.gridLayout_8.addWidget(self.seekBar, 0, 1, 1, 1)
self.timeLeft = QtGui.QLabel(self.controlView)
self.timeLeft.setMinimumSize(QtCore.QSize(60, 18))
self.timeLeft.setMaximumSize(QtCore.QSize(60, 18))
self.timeLeft.setAlignment(QtCore.Qt.AlignCenter)
self.timeLeft.setObjectName(_fromUtf8("timeLeft"))
self.gridLayout_8.addWidget(self.timeLeft, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_8)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setMargin(1)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.muteButton = QtGui.QPushButton(self.controlView)
self.muteButton.setMinimumSize(QtCore.QSize(30, 30))
self.muteButton.setMaximumSize(QtCore.QSize(30, 30))
self.muteButton.setText(_fromUtf8(""))
self.muteButton.setObjectName(_fromUtf8("muteButton"))
self.gridLayout_4.addWidget(self.muteButton, 0, 4, 1, 1)
self.expansionWidget_3 = QtGui.QWidget(self.controlView)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.expansionWidget_3.sizePolicy().hasHeightForWidth())
self.expansionWidget_3.setSizePolicy(sizePolicy)
self.expansionWidget_3.setObjectName(_fromUtf8("expansionWidget_3"))
self.gridLayout_7 = QtGui.QGridLayout(self.expansionWidget_3)
self.gridLayout_7.setMargin(0)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.gridLayout_4.addWidget(self.expansionWidget_3, 0, 1, 1, 1)
self.volumeBar = QtGui.QSlider(self.controlView)
self.volumeBar.setMinimumSize(QtCore.QSize(175, 0))
self.volumeBar.setMaximumSize(QtCore.QSize(100, 16777215))
self.volumeBar.setOrientation(QtCore.Qt.Horizontal)
self.volumeBar.setObjectName(_fromUtf8("volumeBar"))
self.gridLayout_4.addWidget(self.volumeBar, 0, 5, 1, 1)
self.mediaSettingsWidget = QtGui.QWidget(self.controlView)
self.mediaSettingsWidget.setMinimumSize(QtCore.QSize(140, 60))
self.mediaSettingsWidget.setMaximumSize(QtCore.QSize(140, 60))
self.mediaSettingsWidget.setObjectName(_fromUtf8("mediaSettingsWidget"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.mediaSettingsWidget)
self.horizontalLayout_6.setMargin(0)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.fullscreenButton = QtGui.QPushButton(self.mediaSettingsWidget)
self.fullscreenButton.setMinimumSize(QtCore.QSize(30, 30))
self.fullscreenButton.setMaximumSize(QtCore.QSize(30, 30))
self.fullscreenButton.setText(_fromUtf8(""))
self.fullscreenButton.setObjectName(_fromUtf8("fullscreenButton"))
self.horizontalLayout_6.addWidget(self.fullscreenButton)
self.playlistButton = QtGui.QPushButton(self.mediaSettingsWidget)
self.playlistButton.setMinimumSize(QtCore.QSize(30, 30))
self.playlistButton.setMaximumSize(QtCore.QSize(30, 30))
self.playlistButton.setText(_fromUtf8(""))
self.playlistButton.setObjectName(_fromUtf8("playlistButton"))
self.horizontalLayout_6.addWidget(self.playlistButton)
self.stopButton = QtGui.QPushButton(self.mediaSettingsWidget)
self.stopButton.setMinimumSize(QtCore.QSize(30, 30))
self.stopButton.setMaximumSize(QtCore.QSize(30, 30))
self.stopButton.setText(_fromUtf8(""))
self.stopButton.setObjectName(_fromUtf8("stopButton"))
self.horizontalLayout_6.addWidget(self.stopButton)
self.gridLayout_4.addWidget(self.mediaSettingsWidget, 0, 0, 1, 1)
self.mediaControlWidget = QtGui.QWidget(self.controlView)
self.mediaControlWidget.setMinimumSize(QtCore.QSize(225, 70))
self.mediaControlWidget.setMaximumSize(QtCore.QSize(225, 70))
self.mediaControlWidget.setObjectName(_fromUtf8("mediaControlWidget"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.mediaControlWidget)
self.horizontalLayout_7.setMargin(0)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.previous = QtGui.QPushButton(self.mediaControlWidget)
self.previous.setMinimumSize(QtCore.QSize(40, 40))
self.previous.setMaximumSize(QtCore.QSize(40, 40))
self.previous.setText(_fromUtf8(""))
self.previous.setObjectName(_fromUtf8("previous"))
self.horizontalLayout_7.addWidget(self.previous)
self.playState = QtGui.QPushButton(self.mediaControlWidget)
self.playState.setMinimumSize(QtCore.QSize(50, 50))
self.playState.setMaximumSize(QtCore.QSize(50, 50))
self.playState.setText(_fromUtf8(""))
icon = QtGui.QIcon.fromTheme(_fromUtf8("play-2.svg"))
self.playState.setIcon(icon)
self.playState.setObjectName(_fromUtf8("playState"))
self.horizontalLayout_7.addWidget(self.playState)
self.next = QtGui.QPushButton(self.mediaControlWidget)
self.next.setMinimumSize(QtCore.QSize(40, 40))
self.next.setMaximumSize(QtCore.QSize(40, 40))
self.next.setText(_fromUtf8(""))
self.next.setObjectName(_fromUtf8("next"))
self.horizontalLayout_7.addWidget(self.next)
self.gridLayout_4.addWidget(self.mediaControlWidget, 0, 2, 1, 1)
self.expansionWidget_4 = QtGui.QWidget(self.controlView)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.expansionWidget_4.sizePolicy().hasHeightForWidth())
self.expansionWidget_4.setSizePolicy(sizePolicy)
self.expansionWidget_4.setObjectName(_fromUtf8("expansionWidget_4"))
self.gridLayout_4.addWidget(self.expansionWidget_4, 0, 3, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_4)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 764, 29))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuPlayback = QtGui.QMenu(self.menubar)
self.menuPlayback.setObjectName(_fromUtf8("menuPlayback"))
self.menuSpeed = QtGui.QMenu(self.menuPlayback)
self.menuSpeed.setObjectName(_fromUtf8("menuSpeed"))
self.menu_Subtitles = QtGui.QMenu(self.menubar)
self.menu_Subtitles.setObjectName(_fromUtf8("menu_Subtitles"))
self.menu_Audio = QtGui.QMenu(self.menubar)
self.menu_Audio.setObjectName(_fromUtf8("menu_Audio"))
self.menu_Video = QtGui.QMenu(self.menubar)
self.menu_Video.setObjectName(_fromUtf8("menu_Video"))
MainWindow.setMenuBar(self.menubar)
self.actionOpen_File = QtGui.QAction(MainWindow)
self.actionOpen_File.setShortcutContext(QtCore.Qt.WindowShortcut)
self.actionOpen_File.setObjectName(_fromUtf8("actionOpen_File"))
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionOpen_Multiple_Files = QtGui.QAction(MainWindow)
self.actionOpen_Multiple_Files.setObjectName(_fromUtf8("actionOpen_Multiple_Files"))
self.actionAdd_Subtitle_File = QtGui.QAction(MainWindow)
self.actionAdd_Subtitle_File.setObjectName(_fromUtf8("actionAdd_Subtitle_File"))
self.actionJump_Forward = QtGui.QAction(MainWindow)
self.actionJump_Forward.setObjectName(_fromUtf8("actionJump_Forward"))
self.actionJump_Backward = QtGui.QAction(MainWindow)
self.actionJump_Backward.setObjectName(_fromUtf8("actionJump_Backward"))
self.actionX0_5 = QtGui.QAction(MainWindow)
self.actionX0_5.setObjectName(_fromUtf8("actionX0_5"))
self.actionX_1 = QtGui.QAction(MainWindow)
self.actionX_1.setObjectName(_fromUtf8("actionX_1"))
self.actionX_2 = QtGui.QAction(MainWindow)
self.actionX_2.setObjectName(_fromUtf8("actionX_2"))
self.actionX_4 = QtGui.QAction(MainWindow)
self.actionX_4.setObjectName(_fromUtf8("actionX_4"))
self.actionX_8 = QtGui.QAction(MainWindow)
self.actionX_8.setObjectName(_fromUtf8("actionX_8"))
self.actionAdd_Subtitle_Track = QtGui.QAction(MainWindow)
self.actionAdd_Subtitle_Track.setObjectName(_fromUtf8("actionAdd_Subtitle_Track"))
self.actionPlay = QtGui.QAction(MainWindow)
self.actionPlay.setObjectName(_fromUtf8("actionPlay"))
self.actionPause = QtGui.QAction(MainWindow)
self.actionPause.setObjectName(_fromUtf8("actionPause"))
self.actionStop = QtGui.QAction(MainWindow)
self.actionStop.setObjectName(_fromUtf8("actionStop"))
self.actionPrevious = QtGui.QAction(MainWindow)
self.actionPrevious.setObjectName(_fromUtf8("actionPrevious"))
self.actionNext = QtGui.QAction(MainWindow)
self.actionNext.setObjectName(_fromUtf8("actionNext"))
self.actionJump_to_specific_time = QtGui.QAction(MainWindow)
self.actionJump_to_specific_time.setObjectName(_fromUtf8("actionJump_to_specific_time"))
self.actionIncrease_Volume = QtGui.QAction(MainWindow)
self.actionIncrease_Volume.setObjectName(_fromUtf8("actionIncrease_Volume"))
self.actionDecrease_Volume = QtGui.QAction(MainWindow)
self.actionDecrease_Volume.setObjectName(_fromUtf8("actionDecrease_Volume"))
self.actionMute = QtGui.QAction(MainWindow)
self.actionMute.setObjectName(_fromUtf8("actionMute"))
self.actionFullscreen = QtGui.QAction(MainWindow)
self.actionFullscreen.setCheckable(False)
self.actionFullscreen.setObjectName(_fromUtf8("actionFullscreen"))
self.actionShift_forward_by_1_second = QtGui.QAction(MainWindow)
self.actionShift_forward_by_1_second.setObjectName(_fromUtf8("actionShift_forward_by_1_second"))
self.actionShift_backward_by_1_second = QtGui.QAction(MainWindow)
self.actionShift_backward_by_1_second.setObjectName(_fromUtf8("actionShift_backward_by_1_second"))
self.menuFile.addAction(self.actionOpen_File)
self.menuFile.addAction(self.actionOpen_Multiple_Files)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuSpeed.addAction(self.actionX0_5)
self.menuSpeed.addAction(self.actionX_1)
self.menuSpeed.addAction(self.actionX_2)
self.menuSpeed.addAction(self.actionX_4)
self.menuSpeed.addAction(self.actionX_8)
self.menuPlayback.addAction(self.actionJump_Forward)
self.menuPlayback.addAction(self.actionJump_Backward)
self.menuPlayback.addAction(self.menuSpeed.menuAction())
self.menuPlayback.addSeparator()
self.menuPlayback.addAction(self.actionPlay)
self.menuPlayback.addAction(self.actionStop)
self.menuPlayback.addSeparator()
self.menuPlayback.addAction(self.actionPrevious)
self.menuPlayback.addAction(self.actionNext)
self.menuPlayback.addSeparator()
self.menuPlayback.addAction(self.actionJump_to_specific_time)
self.menu_Subtitles.addAction(self.actionAdd_Subtitle_Track)
self.menu_Subtitles.addSeparator()
self.menu_Subtitles.addAction(self.actionShift_forward_by_1_second)
self.menu_Subtitles.addAction(self.actionShift_backward_by_1_second)
self.menu_Audio.addAction(self.actionIncrease_Volume)
self.menu_Audio.addAction(self.actionDecrease_Volume)
self.menu_Audio.addAction(self.actionMute)
self.menu_Audio.addSeparator()
self.menu_Video.addAction(self.actionFullscreen)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuPlayback.menuAction())
self.menubar.addAction(self.menu_Subtitles.menuAction())
self.menubar.addAction(self.menu_Audio.menuAction())
self.menubar.addAction(self.menu_Video.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.timeDone.setText(_translate("MainWindow", "00:00:00", None))
self.timeLeft.setText(_translate("MainWindow", "00:00:00", None))
self.muteButton.setToolTip(_translate("MainWindow", "volume", None))
self.fullscreenButton.setToolTip(_translate("MainWindow", "Fullscreen", None))
self.playlistButton.setToolTip(_translate("MainWindow", "Playlist", None))
self.stopButton.setToolTip(_translate("MainWindow", "Stop", None))
self.previous.setToolTip(_translate("MainWindow", "Previous", None))
self.playState.setToolTip(_translate("MainWindow", "Play/Pause", None))
self.next.setToolTip(_translate("MainWindow", "Next", None))
self.menuFile.setTitle(_translate("MainWindow", "&Media", None))
self.menuPlayback.setTitle(_translate("MainWindow", "P&layback", None))
self.menuSpeed.setTitle(_translate("MainWindow", "&Speed", None))
self.menu_Subtitles.setTitle(_translate("MainWindow", "&Subtitles", None))
self.menu_Audio.setTitle(_translate("MainWindow", "&Audio ", None))
self.menu_Video.setTitle(_translate("MainWindow", "&Video", None))
self.actionOpen_File.setText(_translate("MainWindow", "&Open File", None))
self.actionOpen_File.setShortcut(_translate("MainWindow", "Ctrl+O", None))
self.actionExit.setText(_translate("MainWindow", "&Exit", None))
self.actionExit.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
self.actionOpen_Multiple_Files.setText(_translate("MainWindow", "Open &Multiple Files", None))
self.actionOpen_Multiple_Files.setShortcut(_translate("MainWindow", "Ctrl+Shift+O", None))
self.actionAdd_Subtitle_File.setText(_translate("MainWindow", "&Add Subtitle File", None))
self.actionJump_Forward.setText(_translate("MainWindow", "&Jump Forward", None))
self.actionJump_Forward.setShortcut(_translate("MainWindow", "Ctrl+Shift++", None))
self.actionJump_Backward.setText(_translate("MainWindow", "Jump &Backward", None))
self.actionJump_Backward.setShortcut(_translate("MainWindow", "Ctrl+Shift+-", None))
self.actionX0_5.setText(_translate("MainWindow", "&x 0.5", None))
self.actionX_1.setText(_translate("MainWindow", "&Normal Speed", None))
self.actionX_2.setText(_translate("MainWindow", "x &2", None))
self.actionX_4.setText(_translate("MainWindow", "x &4", None))
self.actionX_8.setText(_translate("MainWindow", "x &8", None))
self.actionAdd_Subtitle_Track.setText(_translate("MainWindow", "&Add Subtitle Track", None))
self.actionPlay.setText(_translate("MainWindow", "&Play/Pause", None))
self.actionPlay.setShortcut(_translate("MainWindow", "Space", None))
self.actionPause.setText(_translate("MainWindow", "Pause", None))
self.actionPause.setShortcut(_translate("MainWindow", "Space", None))
self.actionStop.setText(_translate("MainWindow", "St&op", None))
self.actionStop.setShortcut(_translate("MainWindow", "Ctrl+Shift+S", None))
self.actionPrevious.setText(_translate("MainWindow", "P&revious", None))
self.actionPrevious.setShortcut(_translate("MainWindow", "Ctrl+Shift+Left", None))
self.actionNext.setText(_translate("MainWindow", "&Next", None))
self.actionNext.setShortcut(_translate("MainWindow", "Ctrl+Shift+Right", None))
self.actionJump_to_specific_time.setText(_translate("MainWindow", "J&ump to specific time", None))
self.actionJump_to_specific_time.setShortcut(_translate("MainWindow", "Ctrl+T", None))
self.actionIncrease_Volume.setText(_translate("MainWindow", "&Increase Volume", None))
self.actionIncrease_Volume.setShortcut(_translate("MainWindow", "Ctrl+Up", None))
self.actionDecrease_Volume.setText(_translate("MainWindow", "&Decrease Volume", None))
self.actionDecrease_Volume.setShortcut(_translate("MainWindow", "Ctrl+Down", None))
self.actionMute.setText(_translate("MainWindow", "&Mute", None))
self.actionMute.setShortcut(_translate("MainWindow", "M", None))
self.actionFullscreen.setText(_translate("MainWindow", "&Fullscreen", None))
self.actionFullscreen.setShortcut(_translate("MainWindow", "F", None))
self.actionShift_forward_by_1_second.setText(_translate("MainWindow", "&Shift Forward By 1 Second", None))
self.actionShift_forward_by_1_second.setShortcut(_translate("MainWindow", "H", None))
self.actionShift_backward_by_1_second.setText(_translate("MainWindow", "Shift &Backward By 1 Second", None))
self.actionShift_backward_by_1_second.setShortcut(_translate("MainWindow", "G", None))
| kanishkarj/Rave | Qt_Designer_files/main_design.py | Python | gpl-3.0 | 20,258 | 0.002221 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="UserActivateKey",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"activation_key",
models.CharField(max_length=40, null=True, blank=True),
),
("key_expires", models.DateTimeField(null=True, blank=True)),
(
"user",
models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
],
options={
"db_table": "tcms_user_activate_keys",
},
),
]
| Nitrate/Nitrate | src/tcms/auth/migrations/0001_initial.py | Python | gpl-2.0 | 1,148 | 0.000871 |
<<<<<<< HEAD
<<<<<<< HEAD
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
=======
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/encodings/cp852.py | Python | mit | 105,146 | 0.01923 |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
import struct
import time
import unittest
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
hash256,
ser_uint256,
tx_from_hex,
uint256_from_str,
)
from .script import (
CScript,
CScriptNum,
CScriptOp,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
)
from .script_util import (
key_to_p2wpkh_script,
script_to_p2wsh_script,
)
from .util import assert_equal
WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1296688602
# Coinbase transaction outputs can only be spent after this number of new blocks (network rule)
COINBASE_MATURITY = 100
# Soft-fork activation heights
DERSIG_HEIGHT = 102 # BIP 66
CLTV_HEIGHT = 111 # BIP 65
CSV_ACTIVATION_HEIGHT = 432
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4
def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
if tmpl is None:
tmpl = {}
block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
if tmpl and not tmpl.get('bits') is None:
block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0]
else:
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
if coinbase is None:
coinbase = create_coinbase(height=tmpl['height'])
block.vtx.append(coinbase)
if txlist:
for tx in txlist:
if not hasattr(tx, 'calc_sha256'):
tx = tx_from_hex(tx)
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def script_BIP34_coinbase_height(height):
if height <= 16:
res = CScriptOp.encode_op_n(height)
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
return CScript([res, OP_1])
return CScript([CScriptNum(height)])
def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0, nValue=50):
"""Create a coinbase transaction.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output.
If extra_output_script is given, make a 0-value output to that
script. This is useful to pad block weight/sigops as needed. """
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = nValue * COIN
if nValue == 50:
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.nValue += fees
if pubkey is not None:
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
if extra_output_script is not None:
coinbaseoutput2 = CTxOut()
coinbaseoutput2.nValue = 0
coinbaseoutput2.scriptPubKey = extra_output_script
coinbase.vout.append(coinbaseoutput2)
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert n < len(prevtx.vout)
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can
sign for the output that is being spent.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = tx_from_hex(raw_tx)
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can sign
for the output that is being spent.
"""
psbt = node.createpsbt(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
for _ in range(2):
for w in node.listwallets():
wrpc = node.get_wallet_rpc(w)
signed_psbt = wrpc.walletprocesspsbt(psbt)
psbt = signed_psbt['psbt']
final_psbt = node.finalizepsbt(psbt)
assert_equal(final_psbt["complete"], True)
return final_psbt['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-witness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pkscript = key_to_p2wpkh_script(pubkey)
else:
# 1-of-1 multisig
witness_script = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
pkscript = script_to_p2wsh_script(witness_script)
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert "errors" not in signed or len(["errors"]) == 0
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = tx_from_hex(tx_to_witness)
tx.vin[0].scriptSig += CScript([bytes.fromhex(insert_redeem_script)])
tx_to_witness = tx.serialize().hex()
return node.sendrawtransaction(tx_to_witness)
class TestFrameworkBlockTools(unittest.TestCase):
def test_create_coinbase(self):
height = 20
coinbase_tx = create_coinbase(height=height)
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)
| yenliangl/bitcoin | test/functional/test_framework/blocktools.py | Python | mit | 9,688 | 0.003509 |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.emu', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## system-mutex.h (module 'core'): ns3::CriticalSection [class]
module.add_class('CriticalSection', import_from_module='ns.core')
## data-rate.h (module 'network'): ns3::DataRate [class]
module.add_class('DataRate', import_from_module='ns.network')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]
module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## system-mutex.h (module 'core'): ns3::SystemMutex [class]
module.add_class('SystemMutex', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## emu-helper.h (module 'emu'): ns3::EmuHelper [class]
module.add_class('EmuHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## system-thread.h (module 'core'): ns3::SystemThread [class]
module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## data-rate.h (module 'network'): ns3::DataRateChecker [class]
module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## data-rate.h (module 'network'): ns3::DataRateValue [class]
module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice [class]
module.add_class('EmuNetDevice', parent=root_module['ns3::NetDevice'])
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice::EncapsulationMode [enumeration]
module.add_enum('EncapsulationMode', ['ILLEGAL', 'DIX', 'LLC'], outer_class=root_module['ns3::EmuNetDevice'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])
register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3EmuHelper_methods(root_module, root_module['ns3::EmuHelper'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])
register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3EmuNetDevice_methods(root_module, root_module['ns3::EmuNetDevice'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AsciiTraceHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function]
cls.add_method('CreateFileStream',
'ns3::Ptr< ns3::OutputStreamWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')])
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::string', 'prefix')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CriticalSection_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]
cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])
return
def register_Ns3DataRate_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRate const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor]
cls.add_constructor([param('uint64_t', 'bps')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor]
cls.add_constructor([param('std::string', 'rate')])
## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function]
cls.add_method('CalculateTxTime',
'double',
[param('uint32_t', 'bytes')],
is_const=True)
## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function]
cls.add_method('GetBitRate',
'uint64_t',
[],
is_const=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[])
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3PcapHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function]
cls.add_method('CreateFile',
'ns3::Ptr< ns3::PcapFileWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3PcapHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]
cls.add_method('EnablePcapAll',
'void',
[param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3SystemMutex_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor]
cls.add_constructor([])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function]
cls.add_method('Lock',
'void',
[])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function]
cls.add_method('Unlock',
'void',
[])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3EmuHelper_methods(root_module, cls):
## emu-helper.h (module 'emu'): ns3::EmuHelper::EmuHelper(ns3::EmuHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmuHelper const &', 'arg0')])
## emu-helper.h (module 'emu'): ns3::EmuHelper::EmuHelper() [constructor]
cls.add_constructor([])
## emu-helper.h (module 'emu'): ns3::NetDeviceContainer ns3::EmuHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## emu-helper.h (module 'emu'): ns3::NetDeviceContainer ns3::EmuHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName')],
is_const=True)
## emu-helper.h (module 'emu'): ns3::NetDeviceContainer ns3::EmuHelper::Install(ns3::NodeContainer const & c) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer const &', 'c')],
is_const=True)
## emu-helper.h (module 'emu'): void ns3::EmuHelper::SetAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
## emu-helper.h (module 'emu'): void ns3::EmuHelper::SetQueue(std::string type, std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetQueue',
'void',
[param('std::string', 'type'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()')])
## emu-helper.h (module 'emu'): void ns3::EmuHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
## emu-helper.h (module 'emu'): void ns3::EmuHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor]
cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function]
cls.add_method('Equals',
'bool',
[param('pthread_t', 'id')],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function]
cls.add_method('Join',
'void',
[])
## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function]
cls.add_method('Self',
'pthread_t',
[],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DataRateChecker_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')])
return
def register_Ns3DataRateValue_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor]
cls.add_constructor([param('ns3::DataRate const &', 'value')])
## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function]
cls.add_method('Get',
'ns3::DataRate',
[],
is_const=True)
## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::DataRate const &', 'value')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3EmuNetDevice_methods(root_module, cls):
## emu-net-device.h (module 'emu'): static ns3::TypeId ns3::EmuNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice::EmuNetDevice() [constructor]
cls.add_constructor([])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetDataRate(ns3::DataRate bps) [member function]
cls.add_method('SetDataRate',
'void',
[param('ns3::DataRate', 'bps')])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::Start(ns3::Time tStart) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time', 'tStart')])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::Stop(ns3::Time tStop) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time', 'tStop')])
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetQueue(ns3::Ptr<ns3::Queue> queue) [member function]
cls.add_method('SetQueue',
'void',
[param('ns3::Ptr< ns3::Queue >', 'queue')])
## emu-net-device.h (module 'emu'): ns3::Ptr<ns3::Queue> ns3::EmuNetDevice::GetQueue() const [member function]
cls.add_method('GetQueue',
'ns3::Ptr< ns3::Queue >',
[],
is_const=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## emu-net-device.h (module 'emu'): uint32_t ns3::EmuNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Ptr<ns3::Channel> ns3::EmuNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## emu-net-device.h (module 'emu'): uint16_t ns3::EmuNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Address ns3::EmuNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## emu-net-device.h (module 'emu'): ns3::Ptr<ns3::Node> ns3::EmuNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## emu-net-device.h (module 'emu'): bool ns3::EmuNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::SetEncapsulationMode(ns3::EmuNetDevice::EncapsulationMode mode) [member function]
cls.add_method('SetEncapsulationMode',
'void',
[param('ns3::EmuNetDevice::EncapsulationMode', 'mode')])
## emu-net-device.h (module 'emu'): ns3::EmuNetDevice::EncapsulationMode ns3::EmuNetDevice::GetEncapsulationMode() const [member function]
cls.add_method('GetEncapsulationMode',
'ns3::EmuNetDevice::EncapsulationMode',
[],
is_const=True)
## emu-net-device.h (module 'emu'): void ns3::EmuNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| SiderZhang/p2pns3 | src/emu/bindings/modulegen__gcc_LP64.py | Python | gpl-2.0 | 285,868 | 0.014741 |
#from flask.templating import render_template
# Also installed redis
from app import app
from flask import Flask, request, url_for, Response, redirect
from extended_client import extended_client
import json
from jinja2 import Environment, PackageLoader
import logging
from time import sleep
#To access JMX Rest api
import requests
#To allow calling of sh commands from python
import commands
#Threading purposes
import threading
#For async tasks
from celery import Celery
#For doing msg_out rate calculations
import math
#For the timing of things
import datetime
#messages_in_topic_per_second = 'java -cp $JAVA_HOME/lib/tools.jar:../target/scala-2.10/cjmx.jar cjmx.Main 3628 \"mbeans \'kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,*\' select *\"'
#For getting the process id of kafka
#import os
#PIDs = os.system("ps aux | grep \"kafka.Kafka\" | grep -v grep | awk '{print $2}'")
#For getting ipaddress
import socket
ip = socket.gethostbyname(socket.gethostname()) + ""
host = {}
host["ip"] = ip
#Jinja templating
env = Environment(loader=PackageLoader('app','templates'))
ext_client=None
json_data=None
json_nodes=None
zk=None
json_topics=None
remote_server = {}
remote_server["host"]= "John"
local = "local"
remote = "remote"
#CSS File
#reading_from={}
reading_from=""
#Store the offsets for each topic all consumers consume from
#Objects for keeping track of rates for CONSUMERS
prev_consumer_info = {}
prev_consumer_counts = {}
#Store the accumulated offset
accumulated_topic_rates = {}
consumers = ""
#Stores information for msgs_out
second_counter = 0
seconds_in_a_day = 86400 #(60*60*24)
#Objects for keeping track of rates for TOPICS
topic_sums = {}
prev_topic_info = {}
prev_topic_counts = {}
#Proxy server
proxy = None
#reading_from["data"] = None
#
#
# FUNCTIONS
#
#
# The thing that the user sees
@app.route('/')
@app.route('/index')
def index():
print "Index called"
template = env.get_template('index.html')
title = "Fufuka"
client_url = ""#ext_client.url_port
return template.render(page_title=title, zk_client=client_url)
# Gets all the form data from the "Start visualization page"
@app.route('/', methods=['POST'])
def index_return_values():
print "/ with data. Form received"
start = datetime.datetime.now()
#hostname = request.local
dictionary = request.form
print "Dict: " + str(dictionary) + " :" + str(len(dictionary))
#print list(v for k,v in dictionary.iteritems() if 'jmx' in k)
if len(dictionary) > 1:
#Dealing with a remote connection
print "Remotely"
global reading_from
#reading_from["data"] = str(remote)
reading_from = str(remote)
hostname = request.form.get("hostname", None)
zkhostnamePort = request.form.get("zkhostnameport", None)
proxy = request.form.get("proxy", None)
print "Connecting to: " + hostname
print "With zk at: " + zkhostnamePort
global proxy
print "Proxy: " + proxy
global hostandport
#Set the remote host
remote_server["host"] = str(hostname)
#Set all the JMX ports that need to be listened to
jmx_ports = list(v for k,v in dictionary.iteritems() if 'jmx' in k)
remote_server["ports"] = []
for port in jmx_ports:
print "JMX ports: " + str(port)
remote_server["ports"].append(str(port))
else:
#Dealing with a local connection
global reading_from
#reading_from["data"] = str(local)
reading_from = str(local)
print "Local"
zkhostnamePort = request.form.get("zkhostnameport", None)
print "Connecting to: " + zkhostnamePort
# Process data for getting to zk instance
#
#
split = zkhostnamePort.index(':')
hostname = zkhostnamePort[:split]
port = int(zkhostnamePort[split+1:])
#Start an instance of the extended_client
global ext_client
ext_client = extended_client(hostname, port)
#Start zookeeper client
global zk
zk = ext_client.zk
zk.start()
#Once the returned values are found, set them all
#Get consumers and producers
topics = ext_client.show_all_topics(zk)
#Populate topic holder
for t in topics:
topic_sums[t] = 0
prev_topic_info[t] = {}
prev_topic_counts[t] = []
global json_topics
json_topics = json.dumps(topics)
#Get the json data and store it
global json_data
json_data = json.dumps(ext_client.get_json(zk))
global json_nodes
json_nodes = json.dumps(ext_client.get_nodes_json(zk))
json_edges = json.dumps(ext_client.get_edges_json(zk))
end = datetime.datetime.now()
print "Total time to load zk information: " + str(end-start)
return redirect("/zk")
# Main viewing area for zks
@app.route('/zk')
def zk_client():
print "/zk called"
#Set the consumers then continously calculate their offsets
print "Creating consumer holders:"
start_time = datetime.datetime.now()
global consumers
consumers = ext_client.show_all_consumers(zk)
#Populate consumer holders
for c in consumers:
prev_consumer_info[c] = {}
prev_consumer_counts[c] = []
for c in consumers:
topics = ext_client.show_topics_consumed(zk, c)
for t in topics:
prev_consumer_info[c][t] = {}
#print prev_consumer_info
end_time = datetime.datetime.now()
calculate_offsets()
#Set the template of the page
template = env.get_template('zk_client.html')
#brokers = ext_client.show_brokers_ids(zk)
#Get the information of the current zookeeper instance
data = {}
data["zkinfo"] = str(ext_client.url_port)
print "Total con: " + str(len(consumers))
print "Total time to load /zk page: " + str(end_time-start_time)
return template.render(data=data)#consumers=consumers, brokers=brokers, producers=producers, topics=topics)#, r=r.content)
# Loads the d3 graph onto the iframe
@app.route('/test')
def test_2():
print "/test called"
start = datetime.datetime.now()
template = env.get_template('test2_graph.html')
js_url = url_for('static', filename='js/loadGraph.js')
# graph={}
# graph["nodes"] = json_nodes
# graph["edges"] = json_edges
data = {}
data["json_data"] = json_data
data["json_nodes"] = json_nodes
data["json_topics"] = json_topics
data["js_url"] = js_url
data["host"] = host
data["remote_server"] = remote_server
data["reading_from"] = reading_from
data["largest_weight"] = ext_client.get_largest_weight(zk)
data["smallest_weight"] = ext_client.get_smallest_weight(zk)
data["proxy"] = proxy
sendData = json.dumps(data)
# print "---------------------------"
# print "---------------------------"
# print "---------------------------"
end = datetime.datetime.now()
print "Total time to load /test page: " + str(end-start)
#print data
return template.render(data=sendData)#json_data=json_data, json_nodes=json_nodes, json_topics=json_topics, js_url=js_url, host=host, remote_server=remote_server, readingFrom=reading_from)
# Method to return offset rates
def get_rate(rate_type, prevData):
one_minute = 60
if rate_type == "minute":
#Get the minute rate
if len(prevData) > one_minute:
#print " Min rate "
#print "L: " + str(prevData[second_counter+1]) + " S: " + str(prevData[second_counter-one_minute])
#min_rate = abs(prevData[second_counter+1] - prevData[second_counter-one_minute])
min_rate = abs(prevData[second_counter] - prevData[second_counter-one_minute])/(one_minute + 0.0)
return min_rate
else:
min_rate = 0
return min_rate
if rate_type == "mean":
#Get the mean rate
global second_counter
if second_counter > 0:
#print " Mean rate"
#Method 1
#global predata_sum
#mean_rate = predata_sum/(second_counter+0.0)
#Method 2
# print "L: " + str(prevData[second_counter+1]) + " S: " + str(prevData[0])
# mean_rate = abs(prevData[second_counter+1] - prevData[0])/(second_counter+0.0)
#Method 3
# print " ArrLen: " + str(len(prevData))
# print " SC: " + str(second_counter)
# print " L: " + str(prevData[second_counter])+ " S: " + str(prevData[0])
mean_rate = abs(prevData[second_counter] - prevData[0])/(second_counter+0.0)
#print " MeanR " + str(mean_rate)
return mean_rate
else:
mean_rate = -1
return mean_rate
# Threaded method which calculates the offsets
def calculate_offsets():
#Get individual offsets of a consumer
for c in consumers:
global prev_consumer_info
#prev_consumer_info[c] = {}
topics = ext_client.show_topics_consumed(zk, c)
for t in topics:
#
#
# Consumer Rates
#
#
# Get the offsets for every consumer and correpsonding topic
offset = ext_client.get_consumer_offset(zk, c, t)
#Append count to the array holder
prev_consumer_counts[c].append(offset)
#Get the msg_out_minute_rate for this topic
min_rate = get_rate("minute", prev_consumer_counts[c])
#print "Min: " + str(min_rate)
mean_rate = get_rate("mean", prev_consumer_counts[c])
#print "Mean: " + str(mean_rate)
if mean_rate == -1:
mean_rate = 0
#Update the holder for this topic
global prev_consumer_info
prev_consumer_info[c][t]["count"] = offset
prev_consumer_info[c][t]["min_rate"] = min_rate
prev_consumer_info[c][t]["mean_rate"] = mean_rate
#
#
# Topic rates
#
#
#Get the count for this topic
count = ext_client.get_accumulated_topic_offset(zk, t)
#Update the sum for this topic
topic_sums[t] = topic_sums[t] + count
#Append count to the array holder
prev_topic_counts[t].append(count)
#Get the msg_out_minute_rate for this topic
min_rate = get_rate("minute", prev_topic_counts[t])
mean_rate = get_rate("mean", prev_topic_counts[t])
if mean_rate == -1:
mean_rate = 0
#Update the holder for this topic
global prev_topic_info
prev_topic_info[t]["count"] = count
prev_topic_info[t]["min_rate"] = min_rate
prev_topic_info[t]["mean_rate"] = mean_rate
global second_counter
second_counter = second_counter + 1
#Reset the rate calculations every 24hrs
if second_counter == seconds_in_a_day:
second_counter = 0
threading.Timer(1, calculate_offsets).start()
# Returns the consumer offsets
@app.route('/getconsumerrates')
def get_consumer_offsets():
return json.dumps(prev_consumer_info)
# Returns the accumulated offsets for each topic
@app.route('/getaccumulatedrates')
def get_accumulated_offsets():
return json.dumps(prev_topic_info)
# Takes care of the currently selected node
@app.route('/current_node')
def draw_node():
print "Draw node called"
template = env.get_template('node.html')
return template.render(json_data=json_data)
@app.route('/orgraph')
def or_graph():
template = env.get_template('orgraph.html')
return template.render(json_data=json_data)
| johankaito/fufuka | microblog/app/views.py | Python | apache-2.0 | 10,442 | 0.032561 |
def load_resources(app):
# import all our Resources to get them registered
import home
import facebook
import fblogin
home.load_resources(app)
fblogin.load_resources(app)
| dcsan/ltel | resources/__init__.py | Python | mit | 197 | 0.005076 |
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from django.forms import widgets
from django.core.mail import send_mail
from django.conf import settings
from form_designer import app_settings
import re
from form_designer.pickled_object_field import PickledObjectField
from form_designer.model_name_field import ModelNameField
from form_designer.template_field import TemplateTextField, TemplateCharField
#==============================================================================
class FormDefinition(models.Model):
"""
A model that defines a form and its components and properties.
"""
name = models.SlugField(_('Name'), max_length=255, unique=True)
title = models.CharField(_('Title'), max_length=255, blank=True, null=True)
action = models.URLField(_('Target URL'), help_text=_('If you leave this empty, the page where the form resides will be requested, and you can use the mail form and logging features. You can also send data to external sites: For instance, enter "http://www.google.ch/search" to create a search form.'), max_length=255, blank=True, null=True)
mail_to = TemplateCharField(_('Send form data to e-mail address'), help_text=('Separate several addresses with a comma. Your form fields are available as template context. Example: "admin@domain.com, {{ from_email }}" if you have a field named `from_email`.'), max_length=255, blank=True, null=True)
mail_from = TemplateCharField(_('Sender address'), max_length=255, help_text=('Your form fields are available as template context. Example: "{{ firstname }} {{ lastname }} <{{ from_email }}>" if you have fields named `first_name`, `last_name`, `from_email`.'), blank=True, null=True)
mail_subject = TemplateCharField(_('e-Mail subject'), max_length=255, help_text=('Your form fields are available as template context. Example: "Contact form {{ subject }}" if you have a field named `subject`.'), blank=True, null=True)
method = models.CharField(_('Method'), max_length=10, default="POST", choices = (('POST', 'POST'), ('GET', 'GET')))
success_message = models.CharField(_('Success message'), max_length=255, blank=True, null=True)
error_message = models.CharField(_('Error message'), max_length=255, blank=True, null=True)
submit_label = models.CharField(_('Submit button label'), max_length=255, blank=True, null=True)
log_data = models.BooleanField(_('Log form data'), help_text=_('Logs all form submissions to the database.'), default=True)
success_redirect = models.BooleanField(_('Redirect after success'), help_text=_('You should install django_notify if you want to enable this.') if not 'django_notify' in settings.INSTALLED_APPS else None, default=False)
success_clear = models.BooleanField(_('Clear form after success'), default=True)
allow_get_initial = models.BooleanField(_('Allow initial values via URL'), help_text=_('If enabled, you can fill in form fields by adding them to the query string.'), default=True)
message_template = TemplateTextField(_('Message template'), help_text=_('Your form fields are available as template context. Example: "{{ message }}" if you have a field named `message`. To iterate over all fields, use the variable `data` (a list containing a dictionary for each form field, each containing the elements `name`, `label`, `value`).'), blank=True, null=True)
form_template_name = models.CharField(_('Form template'), max_length=255, choices=app_settings.get('FORM_DESIGNER_FORM_TEMPLATES'), blank=True, null=True)
#--------------------------------------------------------------------------
class Meta:
verbose_name = _('form')
verbose_name_plural = _('forms')
#--------------------------------------------------------------------------
def get_field_dict(self):
dict = {}
for field in self.fields.all():
dict[field.name] = field
return dict
#--------------------------------------------------------------------------
def get_form_data(self, form):
data = []
field_dict = self.get_field_dict()
form_keys = form.fields.keys()
def_keys = field_dict.keys()
for key in form_keys:
if key in def_keys and field_dict[key].include_result:
value = form.cleaned_data[key]
if getattr(value, '__form_data__', False):
value = value.__form_data__()
data.append({'name': key, 'label': form.fields[key].label, 'value': value})
return data
#--------------------------------------------------------------------------
def get_form_data_dict(self, form_data):
dict = {}
for field in form_data:
dict[field['name']] = field['value']
return dict
#--------------------------------------------------------------------------
def compile_message(self, form_data, template=None):
from django.template.loader import get_template
from django.template import Context, Template
if template:
t = get_template(template)
elif not self.message_template:
t = get_template('txt/formdefinition/data_message.txt')
else:
t = Template(self.message_template)
context = Context(self.get_form_data_dict(form_data))
context['data'] = form_data
return t.render(context)
#--------------------------------------------------------------------------
def count_fields(self):
return self.fields.count()
count_fields.short_description = _('Fields')
#--------------------------------------------------------------------------
def __unicode__(self):
return self.title or self.name
#--------------------------------------------------------------------------
def log(self, form):
"""
Saves the form submission.
"""
form_data = self.get_form_data(form)
field_dict = self.get_field_dict()
# create a submission
submission = FormSubmission()
submission.save()
# log each field's value individually
for field_data in form_data:
field_submission = FormFieldSubmission(submission=submission, definition_field=field_dict[field_data['name']],
value=field_data['value'])
field_submission.save()
return submission
#--------------------------------------------------------------------------
def string_template_replace(self, text, context_dict):
from django.template import Context, Template, TemplateSyntaxError
try:
t = Template(text)
return t.render(Context(context_dict))
except TemplateSyntaxError:
return text
#--------------------------------------------------------------------------
def send_mail(self, form):
form_data = self.get_form_data(form)
message = self.compile_message(form_data)
context_dict = self.get_form_data_dict(form_data)
import re
mail_to = re.compile('\s*[,;]+\s*').split(self.mail_to)
for key, email in enumerate(mail_to):
mail_to[key] = self.string_template_replace(email, context_dict)
mail_from = self.mail_from or None
if mail_from:
mail_from = self.string_template_replace(mail_from, context_dict)
if self.mail_subject:
mail_subject = self.string_template_replace(self.mail_subject, context_dict)
else:
mail_subject = self.title
import logging
logging.debug('Mail: '+repr(mail_from)+' --> '+repr(mail_to));
from django.core.mail import send_mail
send_mail(mail_subject, message, mail_from or None, mail_to, fail_silently=False)
#--------------------------------------------------------------------------
@property
def submit_flag_name(self):
name = app_settings.get('FORM_DESIGNER_SUBMIT_FLAG_NAME') % self.name
while self.fields.filter(name__exact=name).count() > 0:
name += '_'
return name
#--------------------------------------------------------------------------
def to_field_list(self):
"""
Converts this form definition into a list of dictionaries, each
dictionary representing a field and its components.
@param fields A list of fields to include. By default, if this is
None, all fields will be generated.
@param field_name_replacements
"""
field_arr = []
# run through all of the fields associated with this definition
for field in self.fields.all():
choices = []
if field.choices.count():
choices = [{'value': u'%s' % choice.value, 'label': u'%s' % choice.label} for choice in field.choices.all()]
elif field.choice_model:
choices = [{'value': u'%s' % obj.id, 'label': u'%s' % obj} for obj in ModelNameField.get_model_from_string(field.choice_model).objects.all()]
field_item = {
'name': u'%s' % field.name,
'label': u'%s' % field.label,
'class': u'%s' % field.field_class,
'position': u'%s' % field.position,
'widget': u'%s' % field.widget,
'initial': u'%s' % field.initial,
'help_text': u'%s' % field.help_text,
}
if choices:
field_item['choices'] = choices
#==============================================================================
class FormDefinitionFieldChoice(models.Model):
"""
A single choice available for a form definition field.
"""
label = models.TextField(_('Label'), help_text=_('A descriptive value for the choice'), blank=True, null=True)
value = models.TextField(_('Value'), help_text=_('The value of the choice when submitting the form'), blank=True, null=True)
#--------------------------------------------------------------------------
def __unicode__(self):
return u'%s (%s)' % (self.label, self.value)
#==============================================================================
class FieldChoiceContainer(object):
def __init__(self, value='', label=''):
self.value = value
self.label = label
#==============================================================================
class FormDefinitionField(models.Model):
"""
A single field within a form definition.
"""
form_definition = models.ForeignKey(FormDefinition, verbose_name=_('Form definition'), related_name='fields')
field_class = models.CharField(_('Field class'), choices=app_settings.get('FORM_DESIGNER_FIELD_CLASSES'), max_length=32)
position = models.IntegerField(_('Position'), blank=True, null=True)
name = models.SlugField(_('Name'), max_length=255)
label = models.CharField(_('Label'), max_length=255, blank=True, null=True)
required = models.BooleanField(_('Required'), default=True)
include_result = models.BooleanField(_('Include in result'), help_text=('If this is disabled, the field value will not be included in logs and e-mails generated from form data.'), default=True)
widget = models.CharField(_('Widget'), default='', choices=app_settings.get('FORM_DESIGNER_WIDGET_CLASSES'), max_length=255, blank=True, null=True)
initial = models.TextField(_('Initial value'), blank=True, null=True)
help_text = models.CharField(_('Help text'), max_length=255, blank=True, null=True)
# the new model
choices = models.ManyToManyField(FormDefinitionFieldChoice, verbose_name=_('Choices'), help_text=_('The various options from which the user can choose'), blank=True, null=True)
max_length = models.IntegerField(_('Max. length'), blank=True, null=True)
min_length = models.IntegerField(_('Min. length'), blank=True, null=True)
max_value = models.FloatField(_('Max. value'), blank=True, null=True)
min_value = models.FloatField(_('Min. value'), blank=True, null=True)
max_digits = models.IntegerField(_('Max. digits'), blank=True, null=True)
decimal_places = models.IntegerField(_('Decimal places'), blank=True, null=True)
regex = models.CharField(_('Regular Expression'), max_length=255, blank=True, null=True)
choice_model_choices = app_settings.get('FORM_DESIGNER_CHOICE_MODEL_CHOICES')
choice_model = ModelNameField(_('Data model'), max_length=255, blank=True, null=True, choices=choice_model_choices, help_text=_('your_app.models.ModelName' if not choice_model_choices else None))
choice_model_empty_label = models.CharField(_('Empty label'), max_length=255, blank=True, null=True)
#--------------------------------------------------------------------------
def save(self, *args, **kwargs):
if self.position == None:
self.position = 0
super(FormDefinitionField, self).save()
#--------------------------------------------------------------------------
def ____init__(self, field_class=None, name=None, required=None, widget=None, label=None, initial=None, help_text=None, *args, **kwargs):
super(FormDefinitionField, self).__init__(*args, **kwargs)
self.name = name
self.field_class = field_class
self.required = required
self.widget = widget
self.label = label
self.initial = initial
self.help_text = help_text
#--------------------------------------------------------------------------
def get_choices(self, filter=None, order_by=None):
queryset = None
if self.field_class in ('forms.ModelChoiceField', 'forms.ModelMultipleChoiceField'):
if filter:
exec('queryset = ModelNameField.get_model_from_string(self.choice_model).objects.%s' % filter)
else:
queryset = ModelNameField.get_model_from_string(self.choice_model).objects.all()
if order_by:
queryset = queryset.order_by(order_by)
return [FieldChoiceContainer(value=item.id, label=item.title) for item in queryset]
else:
return self.choices.order_by('value')
#--------------------------------------------------------------------------
def get_form_field_init_args(self):
args = {
'required': self.required,
'label': self.label if self.label else '',
'initial': self.initial if self.initial else None,
'help_text': self.help_text,
}
if self.field_class in ('forms.CharField', 'forms.EmailField', 'forms.RegexField'):
args.update({
'max_length': self.max_length,
'min_length': self.min_length,
})
if self.field_class in ('forms.IntegerField', 'forms.DecimalField'):
args.update({
'max_value': int(self.max_value) if self.max_value != None else None,
'min_value': int(self.min_value) if self.min_value != None else None,
})
if self.field_class == 'forms.DecimalField':
args.update({
'max_value': self.max_value,
'min_value': self.min_value,
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
})
if self.field_class == 'forms.RegexField':
if self.regex:
args.update({
'regex': self.regex
})
if self.field_class in ('forms.ChoiceField', 'forms.MultipleChoiceField'):
#print "Choices count:", self.choices.count()
if self.choices.count():
# new method of creating choices
choices = [(choice.value, choice.label) for choice in self.choices.all()]
args.update({
'choices': tuple(choices)
})
#print "Choices:", choices
if self.field_class in ('forms.ModelChoiceField', 'forms.ModelMultipleChoiceField'):
args.update({
'queryset': ModelNameField.get_model_from_string(self.choice_model).objects.all()
})
if self.field_class == 'forms.ModelChoiceField':
args.update({
'empty_label': self.choice_model_empty_label
})
if self.widget:
args.update({
'widget': eval(self.widget)()
})
return args
#--------------------------------------------------------------------------
class Meta:
verbose_name = _('field')
verbose_name_plural = _('fields')
ordering = ['position']
#--------------------------------------------------------------------------
def __unicode__(self):
return self.label if self.label else self.name
#==============================================================================
class FormSubmission(models.Model):
"""
Represents a single submission of a particular type of form definition.
"""
created = models.DateTimeField(_('Created'), auto_now=True)
#--------------------------------------------------------------------------
class Meta:
verbose_name = _('form submission')
verbose_name_plural = _('form submissions')
ordering = ['-created']
#--------------------------------------------------------------------------
def __unicode__(self):
form_definition = self.form_definition
# if this submission has fields attached to it
if form_definition:
return u'%s at %s' % (form_definition, self.created)
else:
return u'Empty submission at %s' % self.created
#--------------------------------------------------------------------------
@property
def form_definition(self):
return self.fields.all()[0].definition_field.form_definition if self.fields.count() else None
#==============================================================================
class FormFieldSubmission(models.Model):
"""
Represents the content of a single submission's field.
"""
submission = models.ForeignKey(FormSubmission, verbose_name=_('Form submission'), help_text=_('The submission to which this particular submission component belongs'),
related_name='fields')
definition_field = models.ForeignKey(FormDefinitionField, verbose_name=_('Form definition field'),
help_text=_('The field in the form definition to which this submitted value belongs'),
related_name='submissions')
value = models.TextField(_('Value'), help_text=_('The actual submitted value'))
#--------------------------------------------------------------------------
def __unicode__(self):
value = u'%s' % self.value
truncated_value = value if len(value) < 10 else value[:10]+'...'
return u'%s: %s (%s)' % (self.definition_field, u'%s=%s' % (truncated_value, self.choice_label) if self.choice_label else truncated_value, self.submission)
#--------------------------------------------------------------------------
@property
def choice_label(self):
"""
Retrieves the label of the choice made by the user, should this
submission's field be linked to a set of choices.
TODO: Account for model choice fields.
"""
try:
# get the first choice that matches the available ones
choice = self.definition_field.choices.filter(value=self.value)[0]
except:
return None
return u'%s' % choice.label
#==============================================================================
if 'cms' in settings.INSTALLED_APPS:
from cms.models import CMSPlugin
class CMSFormDefinition(CMSPlugin):
form_definition = models.ForeignKey(FormDefinition, verbose_name=_('Form'))
def __unicode__(self):
return self.form_definition.__unicode__()
| praekelt/django-form-designer | form_designer/models.py | Python | bsd-3-clause | 20,394 | 0.010052 |
import ctypes
import sys
if sys.platform.startswith("win"):
_dwf = ctypes.cdll.dwf
elif sys.platform.startswith("darwin"):
_dwf = ctypes.cdll.LoadLibrary("libdwf.dylib")
else:
_dwf = ctypes.cdll.LoadLibrary("libdwf.so")
class _types(object):
c_byte_p = ctypes.POINTER(ctypes.c_byte)
c_double_p = ctypes.POINTER(ctypes.c_double)
c_int_p = ctypes.POINTER(ctypes.c_int)
c_uint_p = ctypes.POINTER(ctypes.c_uint)
class HDWF(ctypes.c_int):
pass
hdwfNone = HDWF(0)
class ENUMFILTER(ctypes.c_int):
pass
enumfilterAll = ENUMFILTER(0)
enumfilterEExplorer = ENUMFILTER(1)
enumfilterDiscovery = ENUMFILTER(2)
class DEVID(ctypes.c_int):
pass
devidEExplorer = DEVID(1)
devidDiscovery = DEVID(2)
class DEVVER(ctypes.c_int):
pass
devverEExplorerC = DEVVER(2)
devverEExplorerE = DEVVER(4)
devverEExplorerF = DEVVER(5)
devverDiscoveryA = DEVVER(1)
devverDiscoveryB = DEVVER(2)
devverDiscoveryC = DEVVER(3)
class TRIGSRC(ctypes.c_byte):
pass
trigsrcNone = TRIGSRC(0)
trigsrcPC = TRIGSRC(1)
trigsrcDetectorAnalogIn = TRIGSRC(2)
trigsrcDetectorDigitalIn = TRIGSRC(3)
trigsrcAnalogIn = TRIGSRC(4)
trigsrcDigitalIn = TRIGSRC(5)
trigsrcDigitalOut = TRIGSRC(6)
trigsrcAnalogOut1 = TRIGSRC(7)
trigsrcAnalogOut2 = TRIGSRC(8)
trigsrcAnalogOut3 = TRIGSRC(9)
trigsrcAnalogOut4 = TRIGSRC(10)
trigsrcExternal1 = TRIGSRC(11)
trigsrcExternal2 = TRIGSRC(12)
trigsrcExternal3 = TRIGSRC(13)
trigsrcExternal4 = TRIGSRC(14)
class DwfState(ctypes.c_byte):
pass
DwfStateReady = DwfState(0)
DwfStateConfig = DwfState(4)
DwfStatePrefill = DwfState(5)
DwfStateArmed = DwfState(1)
DwfStateWait = DwfState(7)
DwfStateTriggered = DwfState(3)
DwfStateRunning = DwfState(3)
DwfStateDone = DwfState(2)
class ACQMODE(ctypes.c_int):
pass
acqmodeSingle = ACQMODE(0)
acqmodeScanShift = ACQMODE(1)
acqmodeScanScreen = ACQMODE(2)
acqmodeRecord = ACQMODE(3)
class FILTER(ctypes.c_int):
pass
filterDecimate = FILTER(0)
filterAverage = FILTER(1)
filterMinMax = FILTER(2)
class TRIGTYPE(ctypes.c_int):
pass
trigtypeEdge = TRIGTYPE(0)
trigtypePulse = TRIGTYPE(1)
trigtypeTransition = TRIGTYPE(2)
class TRIGCOND(ctypes.c_int):
pass
trigcondRisingPositive = TRIGCOND(0)
trigcondFallingNegative = TRIGCOND(1)
class TRIGLEN(ctypes.c_int):
pass
triglenLess = TRIGLEN(0)
triglenTimeout = TRIGLEN(1)
triglenMore = TRIGLEN(2)
class DWFERC(ctypes.c_int):
pass
dwfercNoErc = DWFERC(0) # No error occurred
dwfercUnknownError = DWFERC(1) # API waiting on pending API timed out
dwfercApiLockTimeout = DWFERC(2) # API waiting on pending API timed out
dwfercAlreadyOpened = DWFERC(3) # Device already opened
dwfercNotSupported = DWFERC(4) # Device not supported
dwfercInvalidParameter0 = DWFERC(0x10) # Invalid parameter sent in API call
dwfercInvalidParameter1 = DWFERC(0x11) # Invalid parameter sent in API call
dwfercInvalidParameter2 = DWFERC(0x12) # Invalid parameter sent in API call
dwfercInvalidParameter3 = DWFERC(0x13) # Invalid parameter sent in API call
dwfercInvalidParameter4 = DWFERC(0x14) # Invalid parameter sent in API call
class FUNC(ctypes.c_byte):
pass
funcDC = FUNC(0)
funcSine = FUNC(1)
funcSquare = FUNC(2)
funcTriangle = FUNC(3)
funcRampUp = FUNC(4)
funcRampDown = FUNC(5)
funcNoise = FUNC(6)
funcCustom = FUNC(30)
funcPlay = FUNC(31)
class ANALOGIO(ctypes.c_byte):
pass
analogioEnable = ANALOGIO(1)
analogioVoltage = ANALOGIO(2)
analogioCurrent = ANALOGIO(3)
analogioPower = ANALOGIO(4)
analogioTemperature = ANALOGIO(5)
class AnalogOutNode(ctypes.c_int):
pass
AnalogOutNodeCarrier = AnalogOutNode(0)
AnalogOutNodeFM = AnalogOutNode(1)
AnalogOutNodeAM = AnalogOutNode(2)
class DwfDigitalInClockSource(ctypes.c_int):
pass
DwfDigitalInClockSourceInternal = DwfDigitalInClockSource(0)
DwfDigitalInClockSourceExternal = DwfDigitalInClockSource(1)
class DwfDigitalInSampleMode(ctypes.c_int):
pass
DwfDigitalInSampleModeSimple = DwfDigitalInSampleMode(0)
DwfDigitalInSampleModeNoise = DwfDigitalInSampleMode(1)
class DwfDigitalOutOutput(ctypes.c_int):
pass
DwfDigitalOutOutputPushPull = DwfDigitalOutOutput(0)
DwfDigitalOutOutputOpenDrain = DwfDigitalOutOutput(1)
DwfDigitalOutOutputOpenSource = DwfDigitalOutOutput(2)
DwfDigitalOutOutputThreeState = DwfDigitalOutOutput(3)
class DwfDigitalOutType(ctypes.c_int):
pass
DwfDigitalOutTypePulse = DwfDigitalOutType(0)
DwfDigitalOutTypeCustom = DwfDigitalOutType(1)
DwfDigitalOutTypeRandom = DwfDigitalOutType(2)
class DwfDigitalOutIdle(ctypes.c_int):
pass
DwfDigitalOutIdleInit = DwfDigitalOutIdle(0)
DwfDigitalOutIdleLow = DwfDigitalOutIdle(1)
DwfDigitalOutIdleHigh = DwfDigitalOutIdle(2)
DwfDigitalOutIdleZet = DwfDigitalOutIdle(3)
def IsBitSet(fs, bit):
return ((fs & (1 << bit)) != 0)
# Error and version APIs:
_FDwfGetLastError = _dwf.FDwfGetLastError
_FDwfGetLastError.argtypes = [ctypes.POINTER(DWFERC)]
_FDwfGetLastError.restype = bool
def FDwfGetLastError():
erc = DWFERC()
return (_FDwfGetLastError(ctypes.byref(erc)), erc)
_FDwfGetLastErrorMsg = _dwf.FDwfGetLastErrorMsg
_FDwfGetLastErrorMsg.argtypes = [ctypes.POINTER(ctypes.c_char * 512)]
_FDwfGetLastErrorMsg.restype = bool
def FDwfGetLastErrorMsg():
buf = ctypes.create_string_buffer(512)
return (_FDwfGetLastErrorMsg(ctypes.byref(buf)), buf.value)
_FDwfGetVersion = _dwf.FDwfGetVersion # Returns DLL version, for instance: "2.4.3"
_FDwfGetVersion.argtypes = [ctypes.POINTER(ctypes.c_char * 32)]
_FDwfGetVersion.restype = bool
def FDwfGetVersion():
buf = ctypes.create_string_buffer(32)
return (_FDwfGetVersion(ctypes.byref(buf)), buf.value)
# DEVICE MANAGMENT FUNCTIONS
# Enumeration:
_FDwfEnum = _dwf.FDwfEnum
_FDwfEnum.argtypes = [ENUMFILTER, _types.c_int_p]
_FDwfEnum.restype = bool
def FDwfEnum(enumFilter):
tmp = ctypes.c_int()
return (_FDwfEnum(enumFilter, ctypes.byref(tmp)), tmp.value)
_FDwfEnumDeviceType = _dwf.FDwfEnumDeviceType
_FDwfEnumDeviceType.argtypes = [ctypes.c_int, ctypes.POINTER(DEVID), ctypes.POINTER(DEVVER)]
_FDwfEnumDeviceType.restype = bool
def FDwfEnumDeviceType(idxDevice):
devid = DEVID()
devver = DEVVER()
return (_FDwfEnumDeviceType(idxDevice, ctypes.byref(devid), ctypes.byref(devver)), devid, devver)
_FDwfEnumDeviceIsOpened = _dwf.FDwfEnumDeviceIsOpened
_FDwfEnumDeviceIsOpened.argtypes = [ctypes.c_int, _types.c_byte_p]
_FDwfEnumDeviceIsOpened.restype = bool
def FDwfEnumDeviceIsOpened(idxDevice):
isopen = ctypes.c_byte()
return (_FDwfEnumDeviceIsOpened(idxDevice, ctypes.byref(isopen)), bool(isopen.value))
_FDwfEnumUserName = _dwf.FDwfEnumUserName
_FDwfEnumUserName.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char * 32)]
_FDwfEnumUserName.restype = bool
def FDwfEnumUserName(idxDevice):
name = ctypes.create_string_buffer(32)
return (_FDwfEnumUserName(idxDevice, ctypes.byref(name)), name.value)
_FDwfEnumDeviceName = _dwf.FDwfEnumDeviceName
_FDwfEnumDeviceName.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char * 32)]
_FDwfEnumDeviceName.restype = bool
def FDwfEnumDeviceName(idxDevice):
name = ctypes.create_string_buffer(32)
return (_FDwfEnumDeviceName(idxDevice, ctypes.byref(name)), name.value)
_FDwfEnumSN = _dwf.FDwfEnumSN
_FDwfEnumSN.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char * 32)]
_FDwfEnumSN.restype = bool
def FDwfEnumSN(idxDevice):
sn = ctypes.create_string_buffer(32)
return (_FDwfEnumSN(idxDevice, ctypes.byref(sn)), sn.value)
# Open/Close:
_FDwfDeviceOpen = _dwf.FDwfDeviceOpen
_FDwfDeviceOpen.argtypes = [ctypes.c_int, ctypes.POINTER(HDWF)]
_FDwfDeviceOpen.restype = bool
def FDwfDeviceOpen(idxDevice):
hdwf = HDWF()
return (_FDwfDeviceOpen(idxDevice, ctypes.byref(hdwf)), hdwf)
FDwfDeviceClose = _dwf.FDwfDeviceClose
FDwfDeviceClose.argtypes = [HDWF]
FDwfDeviceClose.restype = bool
FDwfDeviceCloseAll = _dwf.FDwfDeviceCloseAll
FDwfDeviceCloseAll.argtypes = []
FDwfDeviceCloseAll.restype = bool
FDwfDeviceAutoConfigureSet = _dwf.FDwfDeviceAutoConfigureSet
FDwfDeviceAutoConfigureSet.argtypes = [HDWF, ctypes.c_byte]
FDwfDeviceAutoConfigureSet.restype = bool
_FDwfDeviceAutoConfigureGet = _dwf.FDwfDeviceAutoConfigureGet
_FDwfDeviceAutoConfigureGet.argtypes = [HDWF, _types.c_byte_p]
_FDwfDeviceAutoConfigureGet.restype = bool
def FDwfDeviceAutoConfigureGet(hdwf):
value = ctypes.c_byte()
return (_FDwfDeviceAutoConfigureGet(hdwf, ctypes.byref(value)), bool(value.value))
FDwfDeviceReset = _dwf.FDwfDeviceReset
FDwfDeviceReset.argtypes = [HDWF]
FDwfDeviceReset.restype = bool
_FDwfDeviceTriggerInfo = _dwf.FDwfDeviceTriggerInfo # use IsBitSet
_FDwfDeviceTriggerInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfDeviceTriggerInfo.restype = bool
def FDwfDeviceTriggerInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDeviceTriggerInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGSRC(i))
return (True, supported)
FDwfDeviceTriggerSet = _dwf.FDwfDeviceTriggerSet
FDwfDeviceTriggerSet.argtypes = [HDWF, ctypes.c_int, TRIGSRC]
FDwfDeviceTriggerSet.restype = bool
_FDwfDeviceTriggerGet = _dwf.FDwfDeviceTriggerGet
_FDwfDeviceTriggerGet.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(TRIGSRC)]
_FDwfDeviceTriggerGet.restype = bool
def FDwfDeviceTriggerGet(hdwf, idxPin):
src = TRIGSRC()
return (_FDwfDeviceTriggerGet(hdwf, idxPin, ctypes.byref(src)), src)
FDwfDeviceTriggerPC = _dwf.FDwfDeviceTriggerPC
FDwfDeviceTriggerPC.argtypes = [HDWF]
FDwfDeviceTriggerPC.restype = bool
# ANALOG IN INSTRUMENT FUNCTIONS
# Control and status:
FDwfAnalogInReset = _dwf.FDwfAnalogInReset
FDwfAnalogInReset.argtypes = [HDWF]
FDwfAnalogInReset.restype = bool
FDwfAnalogInConfigure = _dwf.FDwfAnalogInConfigure
FDwfAnalogInConfigure.argtypes = [HDWF, ctypes.c_byte, ctypes.c_byte]
FDwfAnalogInConfigure.restype = bool
_FDwfAnalogInStatus = _dwf.FDwfAnalogInStatus
_FDwfAnalogInStatus.argtypes = [HDWF, ctypes.c_byte, ctypes.POINTER(DwfState)]
_FDwfAnalogInStatus.restype = bool
def FDwfAnalogInStatus(hdwf, fReadData):
state = DwfState()
return (_FDwfAnalogInStatus(hdwf, fReadData, ctypes.byref(state)), state)
_FDwfAnalogInStatusSamplesLeft = _dwf.FDwfAnalogInStatusSamplesLeft
_FDwfAnalogInStatusSamplesLeft.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInStatusSamplesLeft.restype = bool
def FDwfAnalogInStatusSamplesLeft(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogInStatusSamplesLeft(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInStatusSamplesValid = _dwf.FDwfAnalogInStatusSamplesValid
_FDwfAnalogInStatusSamplesValid.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInStatusSamplesValid.restype = bool
def FDwfAnalogInStatusSamplesValid(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogInStatusSamplesValid(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInStatusIndexWrite = _dwf.FDwfAnalogInStatusIndexWrite
_FDwfAnalogInStatusIndexWrite.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInStatusIndexWrite.restype = bool
def FDwfAnalogInStatusIndexWrite(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogInStatusIndexWrite(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInStatusAutoTriggered = _dwf.FDwfAnalogInStatusAutoTriggered
_FDwfAnalogInStatusAutoTriggered.argtypes = [HDWF, _types.c_byte_p]
_FDwfAnalogInStatusAutoTriggered.restype = bool
def FDwfAnalogInStatusAutoTriggered(hdwf):
value = ctypes.c_byte()
return (_FDwfAnalogInStatusAutoTriggered(hdwf, ctypes.byref(value)), bool(value.value))
FDwfAnalogInStatusData = _dwf.FDwfAnalogInStatusData
FDwfAnalogInStatusData.argtypes = [HDWF, ctypes.c_int, _types.c_double_p, ctypes.c_int]
FDwfAnalogInStatusData.restype = bool
_FDwfAnalogInStatusSample = _dwf.FDwfAnalogInStatusSample
_FDwfAnalogInStatusSample.argtypes = [HDWF, ctypes.c_int, _types.c_double_p]
_FDwfAnalogInStatusSample.restype = bool
def FDwfAnalogInStatusSample(hdwf, idxChannel):
value = ctypes.c_double()
return (_FDwfAnalogInStatusSample(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogInStatusRecord = _dwf.FDwfAnalogInStatusRecord
_FDwfAnalogInStatusRecord.argtypes = [HDWF, _types.c_int_p, _types.c_int_p, _types.c_int_p]
_FDwfAnalogInStatusRecord.restype = bool
def FDwfAnalogInStatusRecord(hdwf):
available = ctypes.c_int()
lost = ctypes.c_int()
corrupt = ctypes.c_int()
return (_FDwfAnalogInStatusRecord(hdwf, ctypes.byref(available), ctypes.byref(lost), ctypes.byref(corrupt)), available.value, lost.value, corrupt.value)
FDwfAnalogInRecordLengthSet = _dwf.FDwfAnalogInRecordLengthSet
FDwfAnalogInRecordLengthSet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInRecordLengthSet.restype = bool
_FDwfAnalogInRecordLengthGet = _dwf.FDwfAnalogInRecordLengthGet
_FDwfAnalogInRecordLengthGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInRecordLengthGet.restype = bool
def FDwfAnalogInRecordLengthGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInRecordLengthGet(hdwf, ctypes.byref(value)), value.value)
# Acquistion configuration:
_FDwfAnalogInFrequencyInfo = _dwf.FDwfAnalogInFrequencyInfo
_FDwfAnalogInFrequencyInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInFrequencyInfo.restype = bool
def FDwfAnalogInFrequencyInfo(hdwf):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogInFrequencyInfo(hdwf, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogInFrequencySet = _dwf.FDwfAnalogInFrequencySet
FDwfAnalogInFrequencySet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInFrequencySet.restype = bool
_FDwfAnalogInFrequencyGet = _dwf.FDwfAnalogInFrequencyGet
_FDwfAnalogInFrequencyGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInFrequencyGet.restype = bool
def FDwfAnalogInFrequencyGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInFrequencyGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInBitsInfo = _dwf.FDwfAnalogInBitsInfo
_FDwfAnalogInBitsInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInBitsInfo.restype = bool
def FDwfAnalogInBitsInfo(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogInBitsInfo(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInBufferSizeInfo = _dwf.FDwfAnalogInBufferSizeInfo
_FDwfAnalogInBufferSizeInfo.argtypes = [HDWF, _types.c_int_p, _types.c_int_p]
_FDwfAnalogInBufferSizeInfo.restype = bool
def FDwfAnalogInBufferSizeInfo(hdwf):
min = ctypes.c_int()
max = ctypes.c_int()
return (_FDwfAnalogInBufferSizeInfo(hdwf, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogInBufferSizeSet = _dwf.FDwfAnalogInBufferSizeSet
FDwfAnalogInBufferSizeSet.argtypes = [HDWF, ctypes.c_int]
FDwfAnalogInBufferSizeSet.restype = bool
_FDwfAnalogInBufferSizeGet = _dwf.FDwfAnalogInBufferSizeGet
_FDwfAnalogInBufferSizeGet.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInBufferSizeGet.restype = bool
def FDwfAnalogInBufferSizeGet(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogInBufferSizeGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInAcquisitionModeInfo = _dwf.FDwfAnalogInAcquisitionModeInfo # use IsBitSet
_FDwfAnalogInAcquisitionModeInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInAcquisitionModeInfo.restype = bool
def FDwfAnalogInAcquisitionModeInfo(hdwf):
info = ctypes.c_int()
if not _FDwfAnalogInAcquisitionModeInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(ACQMODE(i))
return (True, supported)
FDwfAnalogInAcquisitionModeSet = _dwf.FDwfAnalogInAcquisitionModeSet
FDwfAnalogInAcquisitionModeSet.argtypes = [HDWF, ACQMODE]
FDwfAnalogInAcquisitionModeSet.restype = bool
_FDwfAnalogInAcquisitionModeGet = _dwf.FDwfAnalogInAcquisitionModeGet
_FDwfAnalogInAcquisitionModeGet.argtypes = [HDWF, ctypes.POINTER(ACQMODE)]
_FDwfAnalogInAcquisitionModeGet.restype = bool
def FDwfAnalogInAcquisitionModeGet(hdwf):
value = ctypes.ACQMODE()
return (_FDwfAnalogInAcquisitionModeGet(hdwf, ctypes.byref(value)), value)
# Channel configuration:
_FDwfAnalogInChannelCount = _dwf.FDwfAnalogInChannelCount
_FDwfAnalogInChannelCount.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInChannelCount.restype = bool
def FDwfAnalogInChannelCount(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogInChannelCount(hdwf, ctypes.byref(value)), value.value)
FDwfAnalogInChannelEnableSet = _dwf.FDwfAnalogInChannelEnableSet
FDwfAnalogInChannelEnableSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_byte]
FDwfAnalogInChannelEnableSet.restype = bool
_FDwfAnalogInChannelEnableGet = _dwf.FDwfAnalogInChannelEnableGet
_FDwfAnalogInChannelEnableGet.argtypes = [HDWF, ctypes.c_int, _types.c_byte_p]
_FDwfAnalogInChannelEnableGet.restype = bool
def FDwfAnalogInChannelEnableGet(hdwf, idxChannel):
value = ctypes.c_byte()
return (_FDwfAnalogInChannelEnableGet(hdwf, idxChannel, ctypes.byref(value)), bool(value.value))
_FDwfAnalogInChannelFilterInfo = _dwf.FDwfAnalogInChannelFilterInfo # use IsBitSet
_FDwfAnalogInChannelFilterInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInChannelFilterInfo.restype = bool
def FDwfAnalogInChannelFilterInfo(hdwf):
info = ctypes.c_int()
if not _FDwfAnalogInChannelFilterInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(FILTER(i))
return (True, supported)
FDwfAnalogInChannelFilterSet = _dwf.FDwfAnalogInChannelFilterSet
FDwfAnalogInChannelFilterSet.argtypes = [HDWF, ctypes.c_int, FILTER]
FDwfAnalogInChannelFilterSet.restype = bool
_FDwfAnalogInChannelFilterGet = _dwf.FDwfAnalogInChannelFilterGet
_FDwfAnalogInChannelFilterGet.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(FILTER)]
_FDwfAnalogInChannelFilterGet.restype = bool
def FDwfAnalogInChannelFilterGet(hdwf, idxChannel):
value = ctypes.FILTER()
return (_FDwfAnalogInChannelFilterGet(hdwf, idxChannel, ctypes.byref(value)), value)
_FDwfAnalogInChannelRangeInfo = _dwf.FDwfAnalogInChannelRangeInfo
_FDwfAnalogInChannelRangeInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInChannelRangeInfo.restype = bool
def FDwfAnalogInChannelRangeInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInChannelRangeInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
_FDwfAnalogInChannelRangeSteps = _dwf.FDwfAnalogInChannelRangeSteps
_FDwfAnalogInChannelRangeSteps.argtypes = [HDWF, ctypes.POINTER(ctypes.c_double * 32), _types.c_int_p]
_FDwfAnalogInChannelRangeSteps.restype = bool
def FDwfAnalogInChannelRangeSteps(hdwf):
steps = (ctypes.c_double * 32)()
numSteps = ctypes.c_int()
if not _FDwfAnalogInChannelRangeSteps(hdwf, ctypes.byref(steps), ctypes.byref(numSteps)):
return (False, [])
return (True, steps[:numSteps.value])
FDwfAnalogInChannelRangeSet = _dwf.FDwfAnalogInChannelRangeSet
FDwfAnalogInChannelRangeSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_double]
FDwfAnalogInChannelRangeSet.restype = bool
_FDwfAnalogInChannelRangeGet = _dwf.FDwfAnalogInChannelRangeGet
_FDwfAnalogInChannelRangeGet.argtypes = [HDWF, ctypes.c_int, _types.c_double_p]
_FDwfAnalogInChannelRangeGet.restype = bool
def FDwfAnalogInChannelRangeGet(hdwf, idxChannel):
value = ctypes.c_double()
return (_FDwfAnalogInChannelRangeGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogInChannelOffsetInfo = _dwf.FDwfAnalogInChannelOffsetInfo
_FDwfAnalogInChannelOffsetInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInChannelOffsetInfo.restype = bool
def FDwfAnalogInChannelOffsetInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInChannelOffsetInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogInChannelOffsetSet = _dwf.FDwfAnalogInChannelOffsetSet
FDwfAnalogInChannelOffsetSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_double]
FDwfAnalogInChannelOffsetSet.restype = bool
_FDwfAnalogInChannelOffsetGet = _dwf.FDwfAnalogInChannelOffsetGet
_FDwfAnalogInChannelOffsetGet.argtypes = [HDWF, ctypes.c_int, _types.c_double_p]
_FDwfAnalogInChannelOffsetGet.restype = bool
def FDwfAnalogInChannelOffsetGet(hdwf, idxChannel):
value = ctypes.c_double()
return (_FDwfAnalogInChannelOffsetGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
# Trigger configuration:
_FDwfAnalogInTriggerSourceInfo = _dwf.FDwfAnalogInTriggerSourceInfo # use IsBitSet
_FDwfAnalogInTriggerSourceInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInTriggerSourceInfo.restype = bool
def FDwfAnalogInTriggerSourceInfo(hdwf):
info = ctypes.c_int()
if not _FDwfAnalogInTriggerSourceInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGSRC(i))
return (True, supported)
FDwfAnalogInTriggerSourceSet = _dwf.FDwfAnalogInTriggerSourceSet
FDwfAnalogInTriggerSourceSet.argtypes = [HDWF, TRIGSRC]
FDwfAnalogInTriggerSourceSet.restype = bool
_FDwfAnalogInTriggerSourceGet = _dwf.FDwfAnalogInTriggerSourceGet
_FDwfAnalogInTriggerSourceGet.argtypes = [HDWF, ctypes.POINTER(TRIGSRC)]
_FDwfAnalogInTriggerSourceGet.restype = bool
def FDwfAnalogInTriggerSourceGet(hdwf):
value = ctypes.TRIGSRC()
return (_FDwfAnalogInTriggerSourceGet(hdwf, ctypes.byref(value)), value)
_FDwfAnalogInTriggerPositionInfo = _dwf.FDwfAnalogInTriggerPositionInfo
_FDwfAnalogInTriggerPositionInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInTriggerPositionInfo.restype = bool
def FDwfAnalogInTriggerPositionInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInTriggerPositionInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogInTriggerPositionSet = _dwf.FDwfAnalogInTriggerPositionSet
FDwfAnalogInTriggerPositionSet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInTriggerPositionSet.restype = bool
_FDwfAnalogInTriggerPositionGet = _dwf.FDwfAnalogInTriggerPositionGet
_FDwfAnalogInTriggerPositionGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInTriggerPositionGet.restype = bool
def FDwfAnalogInTriggerPositionGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInTriggerPositionGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerPositionStatus = _dwf.FDwfAnalogInTriggerPositionStatus
_FDwfAnalogInTriggerPositionStatus.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInTriggerPositionStatus.restype = bool
def FDwfAnalogInTriggerPositionStatus(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInTriggerPositionStatus(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerAutoTimeoutInfo = _dwf.FDwfAnalogInTriggerAutoTimeoutInfo
_FDwfAnalogInTriggerAutoTimeoutInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInTriggerAutoTimeoutInfo.restype = bool
def FDwfAnalogInTriggerAutoTimeoutInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInTriggerAutoTimeoutInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogInTriggerAutoTimeoutSet = _dwf.FDwfAnalogInTriggerAutoTimeoutSet
FDwfAnalogInTriggerAutoTimeoutSet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInTriggerAutoTimeoutSet.restype = bool
_FDwfAnalogInTriggerAutoTimeoutGet = _dwf.FDwfAnalogInTriggerAutoTimeoutGet
_FDwfAnalogInTriggerAutoTimeoutGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInTriggerAutoTimeoutGet.restype = bool
def FDwfAnalogInTriggerAutoTimeoutGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInTriggerAutoTimeoutGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerHoldOffInfo = _dwf.FDwfAnalogInTriggerHoldOffInfo
_FDwfAnalogInTriggerHoldOffInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInTriggerHoldOffInfo.restype = bool
def FDwfAnalogInTriggerHoldOffInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInTriggerHoldOffInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogInTriggerHoldOffSet = _dwf.FDwfAnalogInTriggerHoldOffSet
FDwfAnalogInTriggerHoldOffSet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInTriggerHoldOffSet.restype = bool
_FDwfAnalogInTriggerHoldOffGet = _dwf.FDwfAnalogInTriggerHoldOffGet
_FDwfAnalogInTriggerHoldOffGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInTriggerHoldOffGet.restype = bool
def FDwfAnalogInTriggerHoldOffGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInTriggerHoldOffGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerTypeInfo = _dwf.FDwfAnalogInTriggerTypeInfo # use IsBitSet
_FDwfAnalogInTriggerTypeInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInTriggerTypeInfo.restype = bool
def FDwfAnalogInTriggerTypeInfo(hdwf):
info = ctypes.c_int()
if not _FDwfAnalogInTriggerTypeInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGTYPE(i))
return (True, supported)
FDwfAnalogInTriggerTypeSet = _dwf.FDwfAnalogInTriggerTypeSet
FDwfAnalogInTriggerTypeSet.argtypes = [HDWF, TRIGTYPE]
FDwfAnalogInTriggerTypeSet.restype = bool
_FDwfAnalogInTriggerTypeGet = _dwf.FDwfAnalogInTriggerTypeGet
_FDwfAnalogInTriggerTypeGet.argtypes = [HDWF, ctypes.POINTER(TRIGTYPE)]
_FDwfAnalogInTriggerTypeGet.restype = bool
def FDwfAnalogInTriggerTypeGet(hdwf):
value = ctypes.TRIGTYPE()
return (_FDwfAnalogInTriggerTypeGet(hdwf, ctypes.byref(value)), value)
_FDwfAnalogInTriggerChannelInfo = _dwf.FDwfAnalogInTriggerChannelInfo
_FDwfAnalogInTriggerChannelInfo.argtypes = [HDWF, _types.c_int_p, _types.c_int_p]
_FDwfAnalogInTriggerChannelInfo.restype = bool
def FDwfAnalogInTriggerChannelInfo(hdwf):
min = ctypes.c_int()
max = ctypes.c_int()
return (_FDwfAnalogInTriggerChannelInfo(hdwf, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogInTriggerChannelSet = _dwf.FDwfAnalogInTriggerChannelSet
FDwfAnalogInTriggerChannelSet.argtypes = [HDWF, ctypes.c_int]
FDwfAnalogInTriggerChannelSet.restype = bool
_FDwfAnalogInTriggerChannelGet = _dwf.FDwfAnalogInTriggerChannelGet
_FDwfAnalogInTriggerChannelGet.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInTriggerChannelGet.restype = bool
def FDwfAnalogInTriggerChannelGet(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogInTriggerChannelGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerFilterInfo = _dwf.FDwfAnalogInTriggerFilterInfo # use IsBitSet
_FDwfAnalogInTriggerFilterInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInTriggerFilterInfo.restype = bool
def FDwfAnalogInTriggerFilterInfo(hdwf):
info = ctypes.c_int()
if not _FDwfAnalogInTriggerFilterInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(FILTER(i))
return (True, supported)
FDwfAnalogInTriggerFilterSet = _dwf.FDwfAnalogInTriggerFilterSet
FDwfAnalogInTriggerFilterSet.argtypes = [HDWF, FILTER]
FDwfAnalogInTriggerFilterSet.restype = bool
_FDwfAnalogInTriggerFilterGet = _dwf.FDwfAnalogInTriggerFilterGet
_FDwfAnalogInTriggerFilterGet.argtypes = [HDWF, ctypes.POINTER(FILTER)]
_FDwfAnalogInTriggerFilterGet.restype = bool
def FDwfAnalogInTriggerFilterGet(hdwf):
value = ctypes.TRIGTYPE()
return (_FDwfAnalogInTriggerFilterGet(hdwf, ctypes.byref(value)), value)
_FDwfAnalogInTriggerLevelInfo = _dwf.FDwfAnalogInTriggerLevelInfo
_FDwfAnalogInTriggerLevelInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInTriggerLevelInfo.restype = bool
def FDwfAnalogInTriggerLevelInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInTriggerLevelInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogInTriggerLevelSet = _dwf.FDwfAnalogInTriggerLevelSet
FDwfAnalogInTriggerLevelSet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInTriggerLevelSet.restype = bool
_FDwfAnalogInTriggerLevelGet = _dwf.FDwfAnalogInTriggerLevelGet
_FDwfAnalogInTriggerLevelGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInTriggerLevelGet.restype = bool
def FDwfAnalogInTriggerLevelGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInTriggerLevelGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerHysteresisInfo = _dwf.FDwfAnalogInTriggerHysteresisInfo
_FDwfAnalogInTriggerHysteresisInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInTriggerHysteresisInfo.restype = bool
def FDwfAnalogInTriggerHysteresisInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInTriggerHysteresisInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogInTriggerHysteresisSet = _dwf.FDwfAnalogInTriggerHysteresisSet
FDwfAnalogInTriggerHysteresisSet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInTriggerHysteresisSet.restype = bool
_FDwfAnalogInTriggerHysteresisGet = _dwf.FDwfAnalogInTriggerHysteresisGet
_FDwfAnalogInTriggerHysteresisGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInTriggerHysteresisGet.restype = bool
def FDwfAnalogInTriggerHysteresisGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInTriggerHysteresisGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerConditionInfo = _dwf.FDwfAnalogInTriggerConditionInfo # use IsBitSet
_FDwfAnalogInTriggerConditionInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInTriggerConditionInfo.restype = bool
def FDwfAnalogInTriggerConditionInfo(hdwf):
info = ctypes.c_int()
if not _FDwfAnalogInTriggerConditionInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGCOND(i))
return (True, supported)
FDwfAnalogInTriggerConditionSet = _dwf.FDwfAnalogInTriggerConditionSet
FDwfAnalogInTriggerConditionSet.argtypes = [HDWF, TRIGCOND]
FDwfAnalogInTriggerConditionSet.restype = bool
_FDwfAnalogInTriggerConditionGet = _dwf.FDwfAnalogInTriggerConditionGet
_FDwfAnalogInTriggerConditionGet.argtypes = [HDWF, ctypes.POINTER(TRIGCOND)]
_FDwfAnalogInTriggerConditionGet.restype = bool
def FDwfAnalogInTriggerConditionGet(hdwf):
value = ctypes.TRIGCOND()
return (_FDwfAnalogInTriggerConditionGet(hdwf, ctypes.byref(value)), value)
_FDwfAnalogInTriggerLengthInfo = _dwf.FDwfAnalogInTriggerLengthInfo
_FDwfAnalogInTriggerLengthInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfAnalogInTriggerLengthInfo.restype = bool
def FDwfAnalogInTriggerLengthInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfAnalogInTriggerLengthInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogInTriggerLengthSet = _dwf.FDwfAnalogInTriggerLengthSet
FDwfAnalogInTriggerLengthSet.argtypes = [HDWF, ctypes.c_double]
FDwfAnalogInTriggerLengthSet.restype = bool
_FDwfAnalogInTriggerLengthGet = _dwf.FDwfAnalogInTriggerLengthGet
_FDwfAnalogInTriggerLengthGet.argtypes = [HDWF, _types.c_double_p]
_FDwfAnalogInTriggerLengthGet.restype = bool
def FDwfAnalogInTriggerLengthGet(hdwf):
value = ctypes.c_double()
return (_FDwfAnalogInTriggerLengthGet(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogInTriggerLengthConditionInfo = _dwf.FDwfAnalogInTriggerLengthConditionInfo # use IsBitSet
_FDwfAnalogInTriggerLengthConditionInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogInTriggerLengthConditionInfo.restype = bool
def FDwfAnalogInTriggerLengthConditionInfo(hdwf):
info = ctypes.c_int()
if not _FDwfAnalogInTriggerLengthConditionInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGLEN(i))
return (True, supported)
FDwfAnalogInTriggerLengthConditionSet = _dwf.FDwfAnalogInTriggerLengthConditionSet
FDwfAnalogInTriggerLengthConditionSet.argtypes = [HDWF, TRIGLEN]
FDwfAnalogInTriggerLengthConditionSet.restype = bool
_FDwfAnalogInTriggerLengthConditionGet = _dwf.FDwfAnalogInTriggerLengthConditionGet
_FDwfAnalogInTriggerLengthConditionGet.argtypes = [HDWF, ctypes.POINTER(TRIGLEN)]
_FDwfAnalogInTriggerLengthConditionGet.restype = bool
def FDwfAnalogInTriggerLengthConditionGet(hdwf):
value = ctypes.TRIGLEN()
return (_FDwfAnalogInTriggerLengthConditionGet(hdwf, ctypes.byref(value)), value)
# ANALOG OUT INSTRUMENT FUNCTIONS
# Configuration:
_FDwfAnalogOutCount = _dwf.FDwfAnalogOutCount
_FDwfAnalogOutCount.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogOutCount.restype = bool
def FDwfAnalogOutCount(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogOutCount(hdwf, ctypes.byref(value)), value.value)
FDwfAnalogOutMasterSet = _dwf.FDwfAnalogOutMasterSet
FDwfAnalogOutMasterSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_int]
FDwfAnalogOutMasterSet.restype = bool
_FDwfAnalogOutMasterGet = _dwf.FDwfAnalogOutMasterGet
_FDwfAnalogOutMasterGet.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfAnalogOutMasterGet.restype = bool
def FDwfAnalogOutMasterGet(hdwf, idxChannel):
value = ctypes.c_int()
return (_FDwfAnalogOutMasterGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogOutTriggerSourceInfo = _dwf.FDwfAnalogOutTriggerSourceInfo # use IsBitSet
_FDwfAnalogOutTriggerSourceInfo.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfAnalogOutTriggerSourceInfo.restype = bool
def FDwfAnalogOutTriggerSourceInfo(hdwf, idxChannel):
info = ctypes.c_int()
if not _FDwfAnalogOutTriggerSourceInfo(hdwf, idxChannel, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGSRC(i))
return (True, supported)
FDwfAnalogOutTriggerSourceSet = _dwf.FDwfAnalogOutTriggerSourceSet
FDwfAnalogOutTriggerSourceSet.argtypes = [HDWF, ctypes.c_int, TRIGSRC]
FDwfAnalogOutTriggerSourceSet.restype = bool
_FDwfAnalogOutTriggerSourceGet = _dwf.FDwfAnalogOutTriggerSourceGet
_FDwfAnalogOutTriggerSourceGet.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(TRIGSRC)]
_FDwfAnalogOutTriggerSourceGet.restype = bool
def FDwfAnalogOutTriggerSourceGet(hdwf, idxChannel):
value = TRIGSRC()
return (_FDwfAnalogOutTriggerSourceGet(hdwf, idxChannel, ctypes.byref(value)), value)
_FDwfAnalogOutRunInfo = _dwf.FDwfAnalogOutRunInfo
_FDwfAnalogOutRunInfo.argtypes = [HDWF, ctypes.c_int, _types.c_double_p, _types.c_double_p]
_FDwfAnalogOutRunInfo.restype = bool
def FDwfAnalogOutRunInfo(hdwf, idxChannel):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutRunInfo(hdwf, idxChannel, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutRunSet = _dwf.FDwfAnalogOutRunSet
FDwfAnalogOutRunSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_double]
FDwfAnalogOutRunSet.restype = bool
_FDwfAnalogOutRunGet = _dwf.FDwfAnalogOutRunGet
_FDwfAnalogOutRunGet.argtypes = [HDWF, ctypes.c_int, _types.c_double_p]
_FDwfAnalogOutRunGet.restype = bool
def FDwfAnalogOutRunGet(hdwf, idxChannel):
value = ctypes.c_double()
return (_FDwfAnalogOutRunGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogOutRunStatus = _dwf.FDwfAnalogOutRunStatus
_FDwfAnalogOutRunStatus.argtypes = [HDWF, ctypes.c_int, _types.c_double_p]
_FDwfAnalogOutRunStatus.restype = bool
def FDwfAnalogOutRunStatus(hdwf, idxChannel):
value = ctypes.c_double()
return (_FDwfAnalogOutRunStatus(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogOutWaitInfo = _dwf.FDwfAnalogOutWaitInfo
_FDwfAnalogOutWaitInfo.argtypes = [HDWF, ctypes.c_int, _types.c_double_p, _types.c_double_p]
_FDwfAnalogOutWaitInfo.restype = bool
def FDwfAnalogOutWaitInfo(hdwf, idxChannel):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutWaitInfo(hdwf, idxChannel, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutWaitSet = _dwf.FDwfAnalogOutWaitSet
FDwfAnalogOutWaitSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_double]
FDwfAnalogOutWaitSet.restype = bool
_FDwfAnalogOutWaitGet = _dwf.FDwfAnalogOutWaitGet
_FDwfAnalogOutWaitGet.argtypes = [HDWF, ctypes.c_int, _types.c_double_p]
_FDwfAnalogOutWaitGet.restype = bool
def FDwfAnalogOutWaitGet(hdwf, idxChannel):
value = ctypes.c_double()
return (_FDwfAnalogOutWaitGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogOutRepeatInfo = _dwf.FDwfAnalogOutRepeatInfo
_FDwfAnalogOutRepeatInfo.argtypes = [HDWF, ctypes.c_int, _types.c_int_p, _types.c_int_p]
_FDwfAnalogOutRepeatInfo.restype = bool
def FDwfAnalogOutRepeatInfo(hdwf, idxChannel):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutRepeatInfo(hdwf, idxChannel, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutRepeatSet = _dwf.FDwfAnalogOutRepeatSet
FDwfAnalogOutRepeatSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_int]
FDwfAnalogOutRepeatSet.restype = bool
_FDwfAnalogOutRepeatGet = _dwf.FDwfAnalogOutRepeatGet
_FDwfAnalogOutRepeatGet.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfAnalogOutRepeatGet.restype = bool
def FDwfAnalogOutRepeatGet(hdwf, idxChannel):
value = ctypes.c_int()
return (_FDwfAnalogOutRepeatGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogOutRepeatStatus = _dwf.FDwfAnalogOutRepeatStatus
_FDwfAnalogOutRepeatStatus.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfAnalogOutRepeatStatus.restype = bool
def FDwfAnalogOutRepeatStatus(hdwf, idxChannel):
value = ctypes.c_int()
return (_FDwfAnalogOutRepeatStatus(hdwf, idxChannel, ctypes.byref(value)), value.value)
FDwfAnalogOutRepeatTriggerSet = _dwf.FDwfAnalogOutRepeatTriggerSet
FDwfAnalogOutRepeatTriggerSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_byte]
FDwfAnalogOutRepeatTriggerSet.restype = bool
_FDwfAnalogOutRepeatTriggerGet = _dwf.FDwfAnalogOutRepeatTriggerGet
_FDwfAnalogOutRepeatTriggerGet.argtypes = [HDWF, ctypes.c_int, _types.c_byte_p]
_FDwfAnalogOutRepeatTriggerGet.restype = bool
def FDwfAnalogOutRepeatTriggerGet(hdwf, idxChannel):
value = ctypes.c_byte()
return (_FDwfAnalogOutRepeatTriggerGet(hdwf, idxChannel, ctypes.byref(value)), bool(value.value))
_FDwfAnalogOutNodeInfo = _dwf.FDwfAnalogOutNodeInfo # use IsBitSet
_FDwfAnalogOutNodeInfo.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfAnalogOutNodeInfo.restype = bool
def FDwfAnalogOutNodeInfo(hdwf, idxChannel):
info = ctypes.c_int()
if not _FDwfAnalogOutNodeInfo(hdwf, idxChannel, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(AnalogOutNode(i))
return (True, supported)
FDwfAnalogOutNodeEnableSet = _dwf.FDwfAnalogOutNodeEnableSet
FDwfAnalogOutNodeEnableSet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, ctypes.c_byte]
FDwfAnalogOutNodeEnableSet.restype = bool
_FDwfAnalogOutNodeEnableGet = _dwf.FDwfAnalogOutNodeEnableGet
_FDwfAnalogOutNodeEnableGet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_byte_p]
_FDwfAnalogOutNodeEnableGet.restype = bool
def FDwfAnalogOutNodeEnableGet(hdwf, idxChannel, node):
value = ctypes.c_byte()
return (_FDwfAnalogOutNodeEnableGet(hdwf, idxChannel, node, ctypes.byref(value)), value.value)
_FDwfAnalogOutNodeFunctionInfo = _dwf.FDwfAnalogOutNodeFunctionInfo # use IsBitSet
_FDwfAnalogOutNodeFunctionInfo.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_int_p]
_FDwfAnalogOutNodeFunctionInfo.restype = bool
def FDwfAnalogOutNodeFunctionInfo(hdwf, idxChannel):
info = ctypes.c_int()
if not _FDwfAnalogOutNodeFunctionInfo(hdwf, idxChannel, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(FUNC(i))
return (True, supported)
FDwfAnalogOutNodeFunctionSet = _dwf.FDwfAnalogOutNodeFunctionSet
FDwfAnalogOutNodeFunctionSet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, FUNC]
FDwfAnalogOutNodeFunctionSet.restype = bool
_FDwfAnalogOutNodeFunctionGet = _dwf.FDwfAnalogOutNodeFunctionGet
_FDwfAnalogOutNodeFunctionGet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, ctypes.POINTER(FUNC)]
_FDwfAnalogOutNodeFunctionGet.restype = bool
def FDwfAnalogOutNodeFunctionGet(hdwf, idxChannel, node):
value = FUNC()
return (_FDwfAnalogOutNodeFunctionGet(hdwf, idxChannel, node, ctypes.byref(value)), value)
_FDwfAnalogOutNodeFrequencyInfo = _dwf.FDwfAnalogOutNodeFrequencyInfo
_FDwfAnalogOutNodeFrequencyInfo.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p, _types.c_double_p]
_FDwfAnalogOutNodeFrequencyInfo.restype = bool
def FDwfAnalogOutNodeFrequencyInfo(hdwf, idxChannel, node):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutNodeFrequencyInfo(hdwf, idxChannel, node, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutNodeFrequencySet = _dwf.FDwfAnalogOutNodeFrequencySet
FDwfAnalogOutNodeFrequencySet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, ctypes.c_double]
FDwfAnalogOutNodeFrequencySet.restype = bool
_FDwfAnalogOutNodeFrequencyGet = _dwf.FDwfAnalogOutNodeFrequencyGet
_FDwfAnalogOutNodeFrequencyGet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p]
_FDwfAnalogOutNodeFrequencyGet.restype = bool
def FDwfAnalogOutNodeFrequencyGet(hdwf, idxChannel, node):
value = ctypes.c_double()
return (_FDwfAnalogOutNodeFrequencyGet(hdwf, idxChannel, node, ctypes.byref(value)), value.value)
# Carrier Amplitude or Modulation Index
_FDwfAnalogOutNodeAmplitudeInfo = _dwf.FDwfAnalogOutNodeAmplitudeInfo
_FDwfAnalogOutNodeAmplitudeInfo.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p, _types.c_double_p]
_FDwfAnalogOutNodeAmplitudeInfo.restype = bool
def FDwfAnalogOutNodeAmplitudeInfo(hdwf, idxChannel, node):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutNodeAmplitudeInfo(hdwf, idxChannel, node, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutNodeAmplitudeSet = _dwf.FDwfAnalogOutNodeAmplitudeSet
FDwfAnalogOutNodeAmplitudeSet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, ctypes.c_double]
FDwfAnalogOutNodeAmplitudeSet.restype = bool
_FDwfAnalogOutNodeAmplitudeGet = _dwf.FDwfAnalogOutNodeAmplitudeGet
_FDwfAnalogOutNodeAmplitudeGet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p]
_FDwfAnalogOutNodeAmplitudeGet.restype = bool
def FDwfAnalogOutNodeAmplitudeGet(hdwf, idxChannel, node):
value = ctypes.c_double()
return (_FDwfAnalogOutNodeAmplitudeGet(hdwf, idxChannel, node, ctypes.byref(value)), value.value)
_FDwfAnalogOutNodeOffsetInfo = _dwf.FDwfAnalogOutNodeOffsetInfo
_FDwfAnalogOutNodeOffsetInfo.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p, _types.c_double_p]
_FDwfAnalogOutNodeOffsetInfo.restype = bool
def FDwfAnalogOutNodeOffsetInfo(hdwf, idxChannel, node):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutNodeOffsetInfo(hdwf, idxChannel, node, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutNodeOffsetSet = _dwf.FDwfAnalogOutNodeOffsetSet
FDwfAnalogOutNodeOffsetSet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, ctypes.c_double]
FDwfAnalogOutNodeOffsetSet.restype = bool
_FDwfAnalogOutNodeOffsetGet = _dwf.FDwfAnalogOutNodeOffsetGet
_FDwfAnalogOutNodeOffsetGet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p]
_FDwfAnalogOutNodeOffsetGet.restype = bool
def FDwfAnalogOutNodeOffsetGet(hdwf, idxChannel, node):
value = ctypes.c_double()
return (_FDwfAnalogOutNodeOffsetGet(hdwf, idxChannel, node, ctypes.byref(value)), value.value)
_FDwfAnalogOutNodeSymmetryInfo = _dwf.FDwfAnalogOutNodeSymmetryInfo
_FDwfAnalogOutNodeSymmetryInfo.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p, _types.c_double_p]
_FDwfAnalogOutNodeSymmetryInfo.restype = bool
def FDwfAnalogOutNodeSymmetryInfo(hdwf, idxChannel, node):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutNodeSymmetryInfo(hdwf, idxChannel, node, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutNodeSymmetrySet = _dwf.FDwfAnalogOutNodeSymmetrySet
FDwfAnalogOutNodeSymmetrySet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, ctypes.c_double]
FDwfAnalogOutNodeSymmetrySet.restype = bool
_FDwfAnalogOutNodeSymmetryGet = _dwf.FDwfAnalogOutNodeSymmetryGet
_FDwfAnalogOutNodeSymmetryGet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p]
_FDwfAnalogOutNodeSymmetryGet.restype = bool
def FDwfAnalogOutNodeSymmetryGet(hdwf, idxChannel, node):
value = ctypes.c_double()
return (_FDwfAnalogOutNodeSymmetryGet(hdwf, idxChannel, node, ctypes.byref(value)), value.value)
_FDwfAnalogOutNodePhaseInfo = _dwf.FDwfAnalogOutNodePhaseInfo
_FDwfAnalogOutNodePhaseInfo.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p, _types.c_double_p]
_FDwfAnalogOutNodePhaseInfo.restype = bool
def FDwfAnalogOutNodePhaseInfo(hdwf, idxChannel, node):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfAnalogOutNodePhaseInfo(hdwf, idxChannel, node, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutNodePhaseSet = _dwf.FDwfAnalogOutNodePhaseSet
FDwfAnalogOutNodePhaseSet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, ctypes.c_double]
FDwfAnalogOutNodePhaseSet.restype = bool
_FDwfAnalogOutNodePhaseGet = _dwf.FDwfAnalogOutNodePhaseGet
_FDwfAnalogOutNodePhaseGet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p]
_FDwfAnalogOutNodePhaseGet.restype = bool
def FDwfAnalogOutNodePhaseGet(hdwf, idxChannel, node):
value = ctypes.c_double()
return (_FDwfAnalogOutNodePhaseGet(hdwf, idxChannel, node, ctypes.byref(value)), value.value)
_FDwfAnalogOutNodeDataInfo = _dwf.FDwfAnalogOutNodeDataInfo
_FDwfAnalogOutNodeDataInfo.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_int_p, _types.c_int_p]
_FDwfAnalogOutNodeDataInfo.restype = bool
def FDwfAnalogOutNodeDataInfo(hdwf, idxChannel, node):
min = ctypes.c_int()
max = ctypes.c_int()
return (_FDwfAnalogOutNodeDataInfo(hdwf, idxChannel, node, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfAnalogOutNodeDataSet = _dwf.FDwfAnalogOutNodeDataSet
FDwfAnalogOutNodeDataSet.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p, ctypes.c_int]
FDwfAnalogOutNodeDataSet.restype = bool
# needed for EExplorer, don't care for ADiscovery
FDwfAnalogOutCustomAMFMEnableSet = _dwf.FDwfAnalogOutCustomAMFMEnableSet
FDwfAnalogOutCustomAMFMEnableSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_byte]
FDwfAnalogOutCustomAMFMEnableSet.restype = bool
_FDwfAnalogOutCustomAMFMEnableGet = _dwf.FDwfAnalogOutCustomAMFMEnableGet
_FDwfAnalogOutCustomAMFMEnableGet.argtypes = [HDWF, ctypes.c_int, _types.c_byte_p]
_FDwfAnalogOutCustomAMFMEnableGet.restype = bool
def FDwfAnalogOutCustomAMFMEnableGet(hdwf, idxChannel):
value = ctypes.c_byte()
return (_FDwfAnalogOutCustomAMFMEnableGet(hdwf, idxChannel, ctypes.byref(value)), bool(value.value))
# Control:
FDwfAnalogOutReset = _dwf.FDwfAnalogOutReset
FDwfAnalogOutReset.argtypes = [HDWF, ctypes.c_int]
FDwfAnalogOutReset.restype = bool
FDwfAnalogOutConfigure = _dwf.FDwfAnalogOutConfigure
FDwfAnalogOutConfigure.argtypes = [HDWF, ctypes.c_int, ctypes.c_byte]
FDwfAnalogOutConfigure.restype = bool
_FDwfAnalogOutStatus = _dwf.FDwfAnalogOutStatus
_FDwfAnalogOutStatus.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(DwfState)]
_FDwfAnalogOutStatus.restype = bool
def FDwfAnalogOutStatus(hdwf, idxChannel):
value = DwfState()
return (_FDwfAnalogOutStatus(hdwf, idxChannel, ctypes.byref(value)), value)
_FDwfAnalogOutNodePlayStatus = _dwf.FDwfAnalogOutNodePlayStatus
_FDwfAnalogOutNodePlayStatus.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_int_p, _types.c_int_p, _types.c_int_p]
_FDwfAnalogOutNodePlayStatus.restype = bool
def FDwfAnalogOutNodePlayStatus(hdwf, idxChannel, node):
val_a = ctypes.c_int()
val_b = ctypes.c_int()
val_c = ctypes.c_int()
return (_FDwfAnalogOutNodePlayStatus(hdwf, idxChannel, node, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogOutNodePlayData = _dwf.FDwfAnalogOutNodePlayData
FDwfAnalogOutNodePlayData.argtypes = [HDWF, ctypes.c_int, AnalogOutNode, _types.c_double_p, ctypes.c_int]
FDwfAnalogOutNodePlayData.restype = bool
# ANALOG IO INSTRUMENT FUNCTIONS
# Control:
FDwfAnalogIOReset = _dwf.FDwfAnalogIOReset
FDwfAnalogIOReset.argtypes = [HDWF]
FDwfAnalogIOReset.restype = bool
FDwfAnalogIOConfigure = _dwf.FDwfAnalogIOConfigure
FDwfAnalogIOConfigure.argtypes = [HDWF]
FDwfAnalogIOConfigure.restype = bool
FDwfAnalogIOStatus = _dwf.FDwfAnalogIOStatus
FDwfAnalogIOStatus.argtypes = [HDWF]
FDwfAnalogIOStatus.restype = bool
# Configure:
_FDwfAnalogIOEnableInfo = _dwf.FDwfAnalogIOEnableInfo
_FDwfAnalogIOEnableInfo.argtypes = [HDWF, _types.c_byte_p, _types.c_byte_p]
_FDwfAnalogIOEnableInfo.restype = bool
def FDwfAnalogIOEnableInfo(hdwf):
val_a = ctypes.c_byte()
val_b = ctypes.c_byte()
return (_FDwfAnalogIOEnableInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b)), bool(val_a.value), bool(val_b.value))
FDwfAnalogIOEnableSet = _dwf.FDwfAnalogIOEnableSet
FDwfAnalogIOEnableSet.argtypes = [HDWF, ctypes.c_byte]
FDwfAnalogIOEnableSet.restype = bool
_FDwfAnalogIOEnableGet = _dwf.FDwfAnalogIOEnableGet
_FDwfAnalogIOEnableGet.argtypes = [HDWF, _types.c_byte_p]
_FDwfAnalogIOEnableGet.restype = bool
def FDwfAnalogIOEnableGet(hdwf):
value = ctypes.c_byte()
return (_FDwfAnalogIOEnableGet(hdwf, ctypes.byref(value)), bool(value.value))
_FDwfAnalogIOEnableStatus = _dwf.FDwfAnalogIOEnableStatus
_FDwfAnalogIOEnableStatus.argtypes = [HDWF, _types.c_byte_p]
_FDwfAnalogIOEnableStatus.restype = bool
def FDwfAnalogIOEnableStatus(hdwf):
value = ctypes.c_byte()
return (_FDwfAnalogIOEnableStatus(hdwf, ctypes.byref(value)), bool(value.value))
_FDwfAnalogIOChannelCount = _dwf.FDwfAnalogIOChannelCount
_FDwfAnalogIOChannelCount.argtypes = [HDWF, _types.c_int_p]
_FDwfAnalogIOChannelCount.restype = bool
def FDwfAnalogIOChannelCount(hdwf):
value = ctypes.c_int()
return (_FDwfAnalogIOChannelCount(hdwf, ctypes.byref(value)), value.value)
_FDwfAnalogIOChannelName = _dwf.FDwfAnalogIOChannelName
_FDwfAnalogIOChannelName.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(ctypes.c_char * 32), ctypes.POINTER(ctypes.c_char * 16)]
_FDwfAnalogIOChannelName.restype = bool
def FDwfAnalogIOChannelName(hdwf, idxChannel):
val_a = ctypes.create_string_buffer(32)
val_b = ctypes.create_string_buffer(16)
return (_FDwfAnalogIOChannelName(hdwf, idxChannel, ctypes.byref(val_a), ctypes.byref(val_b)), val_a.value, val_b.value)
_FDwfAnalogIOChannelInfo = _dwf.FDwfAnalogIOChannelInfo
_FDwfAnalogIOChannelInfo.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfAnalogIOChannelInfo.restype = bool
def FDwfAnalogIOChannelInfo(hdwf, idxChannel):
value = ctypes.c_int()
return (_FDwfAnalogIOChannelInfo(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfAnalogIOChannelNodeName = _dwf.FDwfAnalogIOChannelNodeName
_FDwfAnalogIOChannelNodeName.argtypes = [HDWF, ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_char * 32), ctypes.POINTER(ctypes.c_char * 16)]
_FDwfAnalogIOChannelNodeName.restype = bool
def FDwfAnalogIOChannelNodeName(hdwf, idxChannel, idxNode):
val_a = ctypes.create_string_buffer(32)
val_b = ctypes.create_string_buffer(16)
return (_FDwfAnalogIOChannelNodeName(hdwf, idxChannel, idxNode, ctypes.byref(val_a), ctypes.byref(val_b)), val_a.value, val_b.value)
_FDwfAnalogIOChannelNodeInfo = _dwf.FDwfAnalogIOChannelNodeInfo
_FDwfAnalogIOChannelNodeInfo.argtypes = [HDWF, ctypes.c_int, ctypes.c_int, ctypes.POINTER(ANALOGIO)]
_FDwfAnalogIOChannelNodeInfo.restype = bool
def FDwfAnalogIOChannelNodeInfo(hdwf, idxChannel, idxNode):
value = ANALOGIO()
return (_FDwfAnalogIOChannelNodeInfo(hdwf, idxChannel, idxNode, ctypes.byref(value)), value)
_FDwfAnalogIOChannelNodeSetInfo = _dwf.FDwfAnalogIOChannelNodeSetInfo
_FDwfAnalogIOChannelNodeSetInfo.argtypes = [HDWF, ctypes.c_int, ctypes.c_int, _types.c_double_p, _types.c_double_p, _types.c_int_p]
_FDwfAnalogIOChannelNodeSetInfo.restype = bool
def FDwfAnalogIOChannelNodeSetInfo(hdwf, idxChannel, idxNode):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_int()
return (_FDwfAnalogIOChannelNodeSetInfo(hdwf, idxChannel, idxNode, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfAnalogIOChannelNodeSet = _dwf.FDwfAnalogIOChannelNodeSet
FDwfAnalogIOChannelNodeSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_int, ctypes.c_double]
FDwfAnalogIOChannelNodeSet.restype = bool
_FDwfAnalogIOChannelNodeGet = _dwf.FDwfAnalogIOChannelNodeGet
_FDwfAnalogIOChannelNodeGet.argtypes = [HDWF, ctypes.c_int, ctypes.c_int, _types.c_double_p]
_FDwfAnalogIOChannelNodeGet.restype = bool
def FDwfAnalogIOChannelNodeGet(hdwf, idxChannel, idxNode):
value = ctypes.c_double()
return (_FDwfAnalogIOChannelNodeGet(hdwf, idxChannel, idxNode, ctypes.byref(value)), value.value)
_FDwfAnalogIOChannelNodeStatusInfo = _dwf.FDwfAnalogIOChannelNodeStatusInfo
_FDwfAnalogIOChannelNodeStatusInfo.argtypes = [HDWF, ctypes.c_int, ctypes.c_int, _types.c_double_p, _types.c_double_p, _types.c_int_p]
_FDwfAnalogIOChannelNodeStatusInfo.restype = bool
def FDwfAnalogIOChannelNodeStatusInfo(hdwf, idxChannel, idxNode):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_int()
return (_FDwfAnalogIOChannelNodeStatusInfo(hdwf, idxChannel, idxNode, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
_FDwfAnalogIOChannelNodeStatus = _dwf.FDwfAnalogIOChannelNodeStatus
_FDwfAnalogIOChannelNodeStatus.argtypes = [HDWF, ctypes.c_int, ctypes.c_int, _types.c_double_p]
_FDwfAnalogIOChannelNodeStatus.restype = bool
def FDwfAnalogIOChannelNodeStatus(hdwf, idxChannel, idxNode):
value = ctypes.c_double()
return (_FDwfAnalogIOChannelNodeStatus(hdwf, idxChannel, idxNode, ctypes.byref(value)), value.value)
# DIGITAL IO INSTRUMENT FUNCTIONS
# Control:
FDwfDigitalIOReset = _dwf.FDwfDigitalIOReset
FDwfDigitalIOReset.argtypes = [HDWF]
FDwfDigitalIOReset.restype = bool
FDwfDigitalIOConfigure = _dwf.FDwfDigitalIOConfigure
FDwfDigitalIOConfigure.argtypes = [HDWF]
FDwfDigitalIOConfigure.restype = bool
FDwfDigitalIOStatus = _dwf.FDwfDigitalIOStatus
FDwfDigitalIOStatus.argtypes = [HDWF]
FDwfDigitalIOStatus.restype = bool
# Configure:
_FDwfDigitalIOOutputEnableInfo = _dwf.FDwfDigitalIOOutputEnableInfo
_FDwfDigitalIOOutputEnableInfo.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalIOOutputEnableInfo.restype = bool
def FDwfDigitalIOOutputEnableInfo(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalIOOutputEnableInfo(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalIOOutputEnableSet = _dwf.FDwfDigitalIOOutputEnableSet
FDwfDigitalIOOutputEnableSet.argtypes = [HDWF, ctypes.c_uint]
FDwfDigitalIOOutputEnableSet.restype = bool
_FDwfDigitalIOOutputEnableGet = _dwf.FDwfDigitalIOOutputEnableGet
_FDwfDigitalIOOutputEnableGet.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalIOOutputEnableGet.restype = bool
def FDwfDigitalIOOutputEnableGet(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalIOOutputEnableGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalIOOutputInfo = _dwf.FDwfDigitalIOOutputInfo
_FDwfDigitalIOOutputInfo.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalIOOutputInfo.restype = bool
def FDwfDigitalIOOutputInfo(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalIOOutputInfo(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalIOOutputSet = _dwf.FDwfDigitalIOOutputSet
FDwfDigitalIOOutputSet.argtypes = [HDWF, ctypes.c_uint]
FDwfDigitalIOOutputSet.restype = bool
_FDwfDigitalIOOutputGet = _dwf.FDwfDigitalIOOutputGet
_FDwfDigitalIOOutputGet.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalIOOutputGet.restype = bool
def FDwfDigitalIOOutputGet(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalIOOutputGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalIOInputInfo = _dwf.FDwfDigitalIOInputInfo
_FDwfDigitalIOInputInfo.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalIOInputInfo.restype = bool
def FDwfDigitalIOInputInfo(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalIOInputInfo(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalIOInputStatus = _dwf.FDwfDigitalIOInputStatus
_FDwfDigitalIOInputStatus.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalIOInputStatus.restype = bool
def FDwfDigitalIOInputStatus(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalIOInputStatus(hdwf, ctypes.byref(value)), value.value)
# DIGITAL IN INSTRUMENT FUNCTIONS
# Control and status:
FDwfDigitalInReset = _dwf.FDwfDigitalInReset
FDwfDigitalInReset.argtypes = [HDWF]
FDwfDigitalInReset.restype = bool
FDwfDigitalInConfigure = _dwf.FDwfDigitalInConfigure
FDwfDigitalInConfigure.argtypes = [HDWF, ctypes.c_byte, ctypes.c_byte]
FDwfDigitalInConfigure.restype = bool
_FDwfDigitalInStatus = _dwf.FDwfDigitalInStatus
_FDwfDigitalInStatus.argtypes = [HDWF, ctypes.c_byte, ctypes.POINTER(DwfState)]
_FDwfDigitalInStatus.restype = bool
def FDwfDigitalInStatus(hdwf, fReadData):
value = DwfState()
return (_FDwfDigitalInStatus(hdwf, ctypes.byref(value)), value)
_FDwfDigitalInStatusSamplesLeft = _dwf.FDwfDigitalInStatusSamplesLeft
_FDwfDigitalInStatusSamplesLeft.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInStatusSamplesLeft.restype = bool
def FDwfDigitalInStatusSamplesLeft(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalInStatusSamplesLeft(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInStatusSamplesValid = _dwf.FDwfDigitalInStatusSamplesValid
_FDwfDigitalInStatusSamplesValid.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInStatusSamplesValid.restype = bool
def FDwfDigitalInStatusSamplesValid(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalInStatusSamplesValid(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInStatusIndexWrite = _dwf.FDwfDigitalInStatusIndexWrite
_FDwfDigitalInStatusIndexWrite.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInStatusIndexWrite.restype = bool
def FDwfDigitalInStatusIndexWrite(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalInStatusIndexWrite(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInStatusAutoTriggered = _dwf.FDwfDigitalInStatusAutoTriggered
_FDwfDigitalInStatusAutoTriggered.argtypes = [HDWF, _types.c_byte_p]
_FDwfDigitalInStatusAutoTriggered.restype = bool
def FDwfDigitalInStatusAutoTriggered(hdwf):
value = ctypes.c_byte()
return (_FDwfDigitalInStatusAutoTriggered(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalInStatusData = _dwf.FDwfDigitalInStatusData
FDwfDigitalInStatusData.argtypes = [HDWF, ctypes.c_void_p, ctypes.c_int]
FDwfDigitalInStatusData.restype = bool
# Acquistion configuration:
_FDwfDigitalInInternalClockInfo = _dwf.FDwfDigitalInInternalClockInfo
_FDwfDigitalInInternalClockInfo.argtypes = [HDWF, _types.c_double_p]
_FDwfDigitalInInternalClockInfo.restype = bool
def FDwfDigitalInInternalClockInfo(hdwf):
value = ctypes.c_double()
return (_FDwfDigitalInInternalClockInfo(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInClockSourceInfo = _dwf.FDwfDigitalInClockSourceInfo
_FDwfDigitalInClockSourceInfo.argtypes = [HDWF, _types.c_int_p] # use IsBitSet
_FDwfDigitalInClockSourceInfo.restype = bool
def FDwfDigitalInClockSourceInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalInClockSourceInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(DwfDigitalInClockSource(i))
return (True, supported)
FDwfDigitalInClockSourceSet = _dwf.FDwfDigitalInClockSourceSet
FDwfDigitalInClockSourceSet.argtypes = [HDWF, DwfDigitalInClockSource]
FDwfDigitalInClockSourceSet.restype = bool
_FDwfDigitalInClockSourceGet = _dwf.FDwfDigitalInClockSourceGet
_FDwfDigitalInClockSourceGet.argtypes = [HDWF, ctypes.POINTER(DwfDigitalInClockSource)]
_FDwfDigitalInClockSourceGet.restype = bool
def FDwfDigitalInClockSourceGet(hdwf):
value = DwfDigitalInClockSource()
return (_FDwfDigitalInClockSourceGet(hdwf, ctypes.byref(value)), value)
_FDwfDigitalInDividerInfo = _dwf.FDwfDigitalInDividerInfo
_FDwfDigitalInDividerInfo.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalInDividerInfo.restype = bool
def FDwfDigitalInDividerInfo(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalInDividerInfo(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalInDividerSet = _dwf.FDwfDigitalInDividerSet
FDwfDigitalInDividerSet.argtypes = [HDWF, ctypes.c_uint]
FDwfDigitalInDividerSet.restype = bool
_FDwfDigitalInDividerGet = _dwf.FDwfDigitalInDividerGet
_FDwfDigitalInDividerGet.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalInDividerGet.restype = bool
def FDwfDigitalInDividerGet(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalInDividerGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInBitsInfo = _dwf.FDwfDigitalInBitsInfo # Returns the number of Digital In bits
_FDwfDigitalInBitsInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInBitsInfo.restype = bool
def FDwfDigitalInBitsInfo(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalInBitsInfo(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalInSampleFormatSet = _dwf.FDwfDigitalInSampleFormatSet # valid options 8/16/32
FDwfDigitalInSampleFormatSet.argtypes = [HDWF, ctypes.c_int]
FDwfDigitalInSampleFormatSet.restype = bool
_FDwfDigitalInSampleFormatGet = _dwf.FDwfDigitalInSampleFormatGet
_FDwfDigitalInSampleFormatGet.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInSampleFormatGet.restype = bool
def FDwfDigitalInSampleFormatGet(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalInSampleFormatGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInBufferSizeInfo = _dwf.FDwfDigitalInBufferSizeInfo
_FDwfDigitalInBufferSizeInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInBufferSizeInfo.restype = bool
def FDwfDigitalInBufferSizeInfo(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalInBufferSizeInfo(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalInBufferSizeSet = _dwf.FDwfDigitalInBufferSizeSet
FDwfDigitalInBufferSizeSet.argtypes = [HDWF, ctypes.c_int]
FDwfDigitalInBufferSizeSet.restype = bool
_FDwfDigitalInBufferSizeGet = _dwf.FDwfDigitalInBufferSizeGet
_FDwfDigitalInBufferSizeGet.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInBufferSizeGet.restype = bool
def FDwfDigitalInBufferSizeGet(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalInBufferSizeGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInSampleModeInfo = _dwf.FDwfDigitalInSampleModeInfo # use IsBitSet
_FDwfDigitalInSampleModeInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInSampleModeInfo.restype = bool
def FDwfDigitalInSampleModeInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalInSampleModeInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(DwfDigitalInSampleMode(i))
return (True, supported)
FDwfDigitalInSampleModeSet = _dwf.FDwfDigitalInSampleModeSet
FDwfDigitalInSampleModeSet.argtypes = [HDWF, DwfDigitalInSampleMode]
FDwfDigitalInSampleModeSet.restype = bool
_FDwfDigitalInSampleModeGet = _dwf.FDwfDigitalInSampleModeGet
_FDwfDigitalInSampleModeGet.argtypes = [HDWF, ctypes.POINTER(DwfDigitalInSampleMode)]
_FDwfDigitalInSampleModeGet.restype = bool
def FDwfDigitalInSampleModeGet(hdwf):
value = DwfDigitalInSampleMode()
return (_FDwfDigitalInSampleModeGet(hdwf, ctypes.byref(value)), value)
_FDwfDigitalInAcquisitionModeInfo = _dwf.FDwfDigitalInAcquisitionModeInfo # use IsBitSet
_FDwfDigitalInAcquisitionModeInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInAcquisitionModeInfo.restype = bool
def FDwfDigitalInAcquisitionModeInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalInAcquisitionModeInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(ACQMODE(i))
return (True, supported)
FDwfDigitalInAcquisitionModeSet = _dwf.FDwfDigitalInAcquisitionModeSet
FDwfDigitalInAcquisitionModeSet.argtypes = [HDWF, ACQMODE]
FDwfDigitalInAcquisitionModeSet.restype = bool
_FDwfDigitalInAcquisitionModeGet = _dwf.FDwfDigitalInAcquisitionModeGet
_FDwfDigitalInAcquisitionModeGet.argtypes = [HDWF, ctypes.POINTER(ACQMODE)]
_FDwfDigitalInAcquisitionModeGet.restype = bool
def FDwfDigitalInAcquisitionModeGet(hdwf):
value = ACQMODE()
return (_FDwfDigitalInAcquisitionModeGet(hdwf, ctypes.byref(value)), value)
# Trigger configuration:
_FDwfDigitalInTriggerSourceInfo = _dwf.FDwfDigitalInTriggerSourceInfo # use IsBitSet
_FDwfDigitalInTriggerSourceInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalInTriggerSourceInfo.restype = bool
def FDwfDigitalInTriggerSourceInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalInTriggerSourceInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGSRC(i))
return (True, supported)
FDwfDigitalInTriggerSourceSet = _dwf.FDwfDigitalInTriggerSourceSet
FDwfDigitalInTriggerSourceSet.argtypes = [HDWF, TRIGSRC]
FDwfDigitalInTriggerSourceSet.restype = bool
_FDwfDigitalInTriggerSourceGet = _dwf.FDwfDigitalInTriggerSourceGet
_FDwfDigitalInTriggerSourceGet.argtypes = [HDWF, ctypes.POINTER(TRIGSRC)]
_FDwfDigitalInTriggerSourceGet.restype = bool
def FDwfDigitalInTriggerSourceGet(hdwf):
value = TRIGSRC()
return (_FDwfDigitalInTriggerSourceGet(hdwf, ctypes.byref(value)), value)
_FDwfDigitalInTriggerPositionInfo = _dwf.FDwfDigitalInTriggerPositionInfo
_FDwfDigitalInTriggerPositionInfo.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalInTriggerPositionInfo.restype = bool
def FDwfDigitalInTriggerPositionInfo(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalInTriggerPositionInfo(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalInTriggerPositionSet = _dwf.FDwfDigitalInTriggerPositionSet
FDwfDigitalInTriggerPositionSet.argtypes = [HDWF, ctypes.c_uint]
FDwfDigitalInTriggerPositionSet.restype = bool
_FDwfDigitalInTriggerPositionGet = _dwf.FDwfDigitalInTriggerPositionGet
_FDwfDigitalInTriggerPositionGet.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalInTriggerPositionGet.restype = bool
def FDwfDigitalInTriggerPositionGet(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalInTriggerPositionGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInTriggerAutoTimeoutInfo = _dwf.FDwfDigitalInTriggerAutoTimeoutInfo
_FDwfDigitalInTriggerAutoTimeoutInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p, _types.c_double_p]
_FDwfDigitalInTriggerAutoTimeoutInfo.restype = bool
def FDwfDigitalInTriggerAutoTimeoutInfo(hdwf):
val_a = ctypes.c_double()
val_b = ctypes.c_double()
val_c = ctypes.c_double()
return (_FDwfDigitalInTriggerAutoTimeoutInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c)), val_a.value, val_b.value, val_c.value)
FDwfDigitalInTriggerAutoTimeoutSet = _dwf.FDwfDigitalInTriggerAutoTimeoutSet
FDwfDigitalInTriggerAutoTimeoutSet.argtypes = [HDWF, ctypes.c_double]
FDwfDigitalInTriggerAutoTimeoutSet.restype = bool
_FDwfDigitalInTriggerAutoTimeoutGet = _dwf.FDwfDigitalInTriggerAutoTimeoutGet
_FDwfDigitalInTriggerAutoTimeoutGet.argtypes = [HDWF, _types.c_double_p]
_FDwfDigitalInTriggerAutoTimeoutGet.restype = bool
def FDwfDigitalInTriggerAutoTimeoutGet(hdwf):
value = ctypes.c_double()
return (_FDwfDigitalInTriggerAutoTimeoutGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalInTriggerInfo = _dwf.FDwfDigitalInTriggerInfo
_FDwfDigitalInTriggerInfo.argtypes = [HDWF, _types.c_uint_p, _types.c_uint_p, _types.c_uint_p, _types.c_uint_p]
_FDwfDigitalInTriggerInfo.restype = bool
def FDwfDigitalInTriggerInfo(hdwf):
val_a = ctypes.c_uint()
val_b = ctypes.c_uint()
val_c = ctypes.c_uint()
val_d = ctypes.c_uint()
return (_FDwfDigitalInTriggerInfo(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c), ctypes.byref(val_d)), val_a.value, val_b.value, val_c.value, val_d.value)
FDwfDigitalInTriggerSet = _dwf.FDwfDigitalInTriggerSet
FDwfDigitalInTriggerSet.argtypes = [HDWF, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint]
FDwfDigitalInTriggerSet.restype = bool
_FDwfDigitalInTriggerGet = _dwf.FDwfDigitalInTriggerGet
_FDwfDigitalInTriggerGet.argtypes = [HDWF, _types.c_uint_p, _types.c_uint_p, _types.c_uint_p, _types.c_uint_p]
_FDwfDigitalInTriggerGet.restype = bool
def FDwfDigitalInTriggerGet(hdwf):
val_a = ctypes.c_uint()
val_b = ctypes.c_uint()
val_c = ctypes.c_uint()
val_d = ctypes.c_uint()
return (_FDwfDigitalInTriggerGet(hdwf, ctypes.byref(val_a), ctypes.byref(val_b), ctypes.byref(val_c), ctypes.byref(val_d)), val_a.value, val_b.value, val_c.value, val_d.value)
# the logic for trigger bits: Low and High and # (Rise or Fall)
# bits set in Rise and Fall means any edge
# DIGITAL OUT INSTRUMENT FUNCTIONS
# Control:
FDwfDigitalOutReset = _dwf.FDwfDigitalOutReset
FDwfDigitalOutReset.argtypes = [HDWF]
FDwfDigitalOutReset.restype = bool
FDwfDigitalOutConfigure = _dwf.FDwfDigitalOutConfigure
FDwfDigitalOutConfigure.argtypes = [HDWF, ctypes.c_byte]
FDwfDigitalOutConfigure.restype = bool
_FDwfDigitalOutStatus = _dwf.FDwfDigitalOutStatus
_FDwfDigitalOutStatus.argtypes = [HDWF, ctypes.POINTER(DwfState)]
_FDwfDigitalOutStatus.restype = bool
def FDwfDigitalOutStatus(hdwf):
value = DwfState()
return (_FDwfDigitalOutStatus(hdwf, ctypes.byref(value)), value)
# Configuration:
_FDwfDigitalOutInternalClockInfo = _dwf.FDwfDigitalOutInternalClockInfo
_FDwfDigitalOutInternalClockInfo.argtypes = [HDWF, _types.c_double_p]
_FDwfDigitalOutInternalClockInfo.restype = bool
def FDwfDigitalOutInternalClockInfo(hdwf):
value = ctypes.c_double()
return (_FDwfDigitalOutInternalClockInfo(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalOutTriggerSourceInfo = _dwf.FDwfDigitalOutTriggerSourceInfo # use IsBitSet
_FDwfDigitalOutTriggerSourceInfo.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalOutTriggerSourceInfo.restype = bool
def FDwfDigitalOutTriggerSourceInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalOutTriggerSourceInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(TRIGSRC(i))
return (True, supported)
FDwfDigitalOutTriggerSourceSet = _dwf.FDwfDigitalOutTriggerSourceSet
FDwfDigitalOutTriggerSourceSet.argtypes = [HDWF, TRIGSRC]
FDwfDigitalOutTriggerSourceSet.restype = bool
_FDwfDigitalOutTriggerSourceGet = _dwf.FDwfDigitalOutTriggerSourceGet
_FDwfDigitalOutTriggerSourceGet.argtypes = [HDWF, ctypes.POINTER(TRIGSRC)]
_FDwfDigitalOutTriggerSourceGet.restype = bool
def FDwfDigitalOutTriggerSourceGet(hdwf):
value = TRIGSRC()
return (_FDwfDigitalOutTriggerSourceGet(hdwf, ctypes.byref(value)), value)
_FDwfDigitalOutRunInfo = _dwf.FDwfDigitalOutRunInfo
_FDwfDigitalOutRunInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p]
_FDwfDigitalOutRunInfo.restype = bool
def FDwfDigitalOutRunInfo(hdwf):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfDigitalOutRunInfo(hdwf, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfDigitalOutRunSet = _dwf.FDwfDigitalOutRunSet
FDwfDigitalOutRunSet.argtypes = [HDWF, ctypes.c_double]
FDwfDigitalOutRunSet.restype = bool
_FDwfDigitalOutRunGet = _dwf.FDwfDigitalOutRunGet
_FDwfDigitalOutRunGet.argtypes = [HDWF, _types.c_double_p]
_FDwfDigitalOutRunGet.restype = bool
def FDwfDigitalOutRunGet(hdwf):
value = ctypes.c_double()
return (_FDwfDigitalOutRunGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalOutRunStatus = _dwf.FDwfDigitalOutRunStatus
_FDwfDigitalOutRunStatus.argtypes = [HDWF, _types.c_double_p]
_FDwfDigitalOutRunStatus.restype = bool
def FDwfDigitalOutRunStatus(hdwf):
value = ctypes.c_double()
return (_FDwfDigitalOutRunStatus(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalOutWaitInfo = _dwf.FDwfDigitalOutWaitInfo
_FDwfDigitalOutWaitInfo.argtypes = [HDWF, _types.c_double_p, _types.c_double_p]
_FDwfDigitalOutWaitInfo.restype = bool
def FDwfDigitalOutWaitInfo(hdwf):
min = ctypes.c_double()
max = ctypes.c_double()
return (_FDwfDigitalOutWaitInfo(hdwf, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfDigitalOutWaitSet = _dwf.FDwfDigitalOutWaitSet
FDwfDigitalOutWaitSet.argtypes = [HDWF, ctypes.c_double]
FDwfDigitalOutWaitSet.restype = bool
_FDwfDigitalOutWaitGet = _dwf.FDwfDigitalOutWaitGet
_FDwfDigitalOutWaitGet.argtypes = [HDWF, _types.c_double_p]
_FDwfDigitalOutWaitGet.restype = bool
def FDwfDigitalOutWaitGet(hdwf):
value = ctypes.c_double()
return (_FDwfDigitalOutWaitGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalOutRepeatInfo = _dwf.FDwfDigitalOutRepeatInfo
_FDwfDigitalOutRepeatInfo.argtypes = [HDWF, _types.c_uint_p, _types.c_uint_p]
_FDwfDigitalOutRepeatInfo.restype = bool
def FDwfDigitalOutRepeatInfo(hdwf):
min = ctypes.c_uint()
max = ctypes.c_uint()
return (_FDwfDigitalOutRepeatInfo(hdwf, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfDigitalOutRepeatSet = _dwf.FDwfDigitalOutRepeatSet
FDwfDigitalOutRepeatSet.argtypes = [HDWF, ctypes.c_uint]
FDwfDigitalOutRepeatSet.restype = bool
_FDwfDigitalOutRepeatGet = _dwf.FDwfDigitalOutRepeatGet
_FDwfDigitalOutRepeatGet.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalOutRepeatGet.restype = bool
def FDwfDigitalOutRepeatGet(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalOutRepeatGet(hdwf, ctypes.byref(value)), value.value)
_FDwfDigitalOutRepeatStatus = _dwf.FDwfDigitalOutRepeatStatus
_FDwfDigitalOutRepeatStatus.argtypes = [HDWF, _types.c_uint_p]
_FDwfDigitalOutRepeatStatus.restype = bool
def FDwfDigitalOutRepeatStatus(hdwf):
value = ctypes.c_uint()
return (_FDwfDigitalOutRepeatStatus(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalOutRepeatTriggerSet = _dwf.FDwfDigitalOutRepeatTriggerSet
FDwfDigitalOutRepeatTriggerSet.argtypes = [HDWF, ctypes.c_byte]
FDwfDigitalOutRepeatTriggerSet.restype = bool
_FDwfDigitalOutRepeatTriggerGet = _dwf.FDwfDigitalOutRepeatTriggerGet
_FDwfDigitalOutRepeatTriggerGet.argtypes = [HDWF, _types.c_byte_p]
_FDwfDigitalOutRepeatTriggerGet.restype = bool
def FDwfDigitalOutRepeatTriggerGet(hdwf):
value = ctypes.c_byte()
return (_FDwfDigitalOutRepeatTriggerGet(hdwf, ctypes.byref(value)), bool(value.value))
_FDwfDigitalOutCount = _dwf.FDwfDigitalOutCount
_FDwfDigitalOutCount.argtypes = [HDWF, _types.c_int_p]
_FDwfDigitalOutCount.restype = bool
def FDwfDigitalOutCount(hdwf):
value = ctypes.c_int()
return (_FDwfDigitalOutCount(hdwf, ctypes.byref(value)), value.value)
FDwfDigitalOutEnableSet = _dwf.FDwfDigitalOutEnableSet
FDwfDigitalOutEnableSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_byte]
FDwfDigitalOutEnableSet.restype = bool
_FDwfDigitalOutEnableGet = _dwf.FDwfDigitalOutEnableGet
_FDwfDigitalOutEnableGet.argtypes = [HDWF, ctypes.c_int, _types.c_byte_p]
_FDwfDigitalOutEnableGet.restype = bool
def FDwfDigitalOutEnableGet(hdwf):
value = ctypes.c_byte()
return (_FDwfDigitalOutEnableGet(hdwf, ctypes.byref(value)), bool(value.value))
_FDwfDigitalOutOutputInfo = _dwf.FDwfDigitalOutOutputInfo # use IsBitSet
_FDwfDigitalOutOutputInfo.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfDigitalOutOutputInfo.restype = bool
def FDwfDigitalOutOutputInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalOutOutputInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(DwfDigitalOutOutput(i))
return (True, supported)
FDwfDigitalOutOutputSet = _dwf.FDwfDigitalOutOutputSet
FDwfDigitalOutOutputSet.argtypes = [HDWF, ctypes.c_int, DwfDigitalOutOutput]
FDwfDigitalOutOutputSet.restype = bool
_FDwfDigitalOutOutputGet = _dwf.FDwfDigitalOutOutputGet
_FDwfDigitalOutOutputGet.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(DwfDigitalOutOutput)]
_FDwfDigitalOutOutputGet.restype = bool
def FDwfDigitalOutOutputGet(hdwf):
value = DwfDigitalOutOutput()
return (_FDwfDigitalOutOutputGet(hdwf, ctypes.byref(value)), value)
_FDwfDigitalOutTypeInfo = _dwf.FDwfDigitalOutTypeInfo # use IsBitSet
_FDwfDigitalOutTypeInfo.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfDigitalOutTypeInfo.restype = bool
def FDwfDigitalOutTypeInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalOutTypeInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(DwfDigitalOutType(i))
return (True, supported)
FDwfDigitalOutTypeSet = _dwf.FDwfDigitalOutTypeSet
FDwfDigitalOutTypeSet.argtypes = [HDWF, ctypes.c_int, DwfDigitalOutType]
FDwfDigitalOutTypeSet.restype = bool
_FDwfDigitalOutTypeGet = _dwf.FDwfDigitalOutTypeGet
_FDwfDigitalOutTypeGet.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(DwfDigitalOutType)]
_FDwfDigitalOutTypeGet.restype = bool
def FDwfDigitalOutTypeGet(hdwf):
value = DwfDigitalOutType()
return (_FDwfDigitalOutTypeGet(hdwf, ctypes.byref(value)), value)
_FDwfDigitalOutIdleInfo = _dwf.FDwfDigitalOutIdleInfo # use IsBitSet
_FDwfDigitalOutIdleInfo.argtypes = [HDWF, ctypes.c_int, _types.c_int_p]
_FDwfDigitalOutIdleInfo.restype = bool
def FDwfDigitalOutIdleInfo(hdwf):
info = ctypes.c_int()
if not _FDwfDigitalOutIdleInfo(hdwf, ctypes.byref(info)):
return (False, [])
supported = []
for i in range(8 * ctypes.sizeof(ctypes.c_int)):
if info.value & (1 << i) != 0:
supported.append(DwfDigitalOutIdle(i))
return (True, supported)
FDwfDigitalOutIdleSet = _dwf.FDwfDigitalOutIdleSet
FDwfDigitalOutIdleSet.argtypes = [HDWF, ctypes.c_int, DwfDigitalOutIdle]
FDwfDigitalOutIdleSet.restype = bool
_FDwfDigitalOutIdleGet = _dwf.FDwfDigitalOutIdleGet
_FDwfDigitalOutIdleGet.argtypes = [HDWF, ctypes.c_int, ctypes.POINTER(DwfDigitalOutIdle)]
_FDwfDigitalOutIdleGet.restype = bool
def FDwfDigitalOutIdleGet(hdwf):
value = DwfDigitalOutIdle()
return (_FDwfDigitalOutIdleGet(hdwf, ctypes.byref(value)), value)
_FDwfDigitalOutDividerInfo = _dwf.FDwfDigitalOutDividerInfo
_FDwfDigitalOutDividerInfo.argtypes = [HDWF, ctypes.c_int, _types.c_uint_p, _types.c_uint_p]
_FDwfDigitalOutDividerInfo.restype = bool
def FDwfDigitalOutDividerInfo(hdwf, idxChannel):
min = ctypes.c_uint()
max = ctypes.c_uint()
return (_FDwfDigitalOutDividerInfo(hdwf, idxChannel, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfDigitalOutDividerInitSet = _dwf.FDwfDigitalOutDividerInitSet
FDwfDigitalOutDividerInitSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_uint]
FDwfDigitalOutDividerInitSet.restype = bool
_FDwfDigitalOutDividerInitGet = _dwf.FDwfDigitalOutDividerInitGet
_FDwfDigitalOutDividerInitGet.argtypes = [HDWF, ctypes.c_int, _types.c_uint_p]
_FDwfDigitalOutDividerInitGet.restype = bool
def FDwfDigitalOutDividerInitGet(hdwf, idxChannel):
value = ctypes.c_uint()
return (_FDwfDigitalOutDividerInitGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
FDwfDigitalOutDividerSet = _dwf.FDwfDigitalOutDividerSet
FDwfDigitalOutDividerSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_uint]
FDwfDigitalOutDividerSet.restype = bool
_FDwfDigitalOutDividerGet = _dwf.FDwfDigitalOutDividerGet
_FDwfDigitalOutDividerGet.argtypes = [HDWF, ctypes.c_int, _types.c_uint_p]
_FDwfDigitalOutDividerGet.restype = bool
def FDwfDigitalOutDividerGet(hdwf, idxChannel):
value = ctypes.c_uint()
return (_FDwfDigitalOutDividerGet(hdwf, idxChannel, ctypes.byref(value)), value.value)
_FDwfDigitalOutCounterInfo = _dwf.FDwfDigitalOutCounterInfo
_FDwfDigitalOutCounterInfo.argtypes = [HDWF, ctypes.c_int, _types.c_uint_p, _types.c_uint_p]
_FDwfDigitalOutCounterInfo.restype = bool
def FDwfDigitalOutCounterInfo(hdwf, idxChannel):
min = ctypes.c_uint()
max = ctypes.c_uint()
return (_FDwfDigitalOutCounterInfo(hdwf, idxChannel, ctypes.byref(min), ctypes.byref(max)), min.value, max.value)
FDwfDigitalOutCounterInitSet = _dwf.FDwfDigitalOutCounterInitSet
FDwfDigitalOutCounterInitSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_byte, ctypes.c_uint]
FDwfDigitalOutCounterInitSet.restype = bool
_FDwfDigitalOutCounterInitGet = _dwf.FDwfDigitalOutCounterInitGet
_FDwfDigitalOutCounterInitGet.argtypes = [HDWF, ctypes.c_int, _types.c_int_p, _types.c_uint_p]
_FDwfDigitalOutCounterInitGet.restype = bool
def FDwfDigitalOutCounterInitGet(hdwf, idxChannel):
val_a = ctypes.c_int()
val_b = ctypes.c_uint()
return (_FDwfDigitalOutCounterInitGet(hdwf, idxChannel, ctypes.byref(val_a), ctypes.byref(val_b)), val_a.value, val_b.value)
FDwfDigitalOutCounterSet = _dwf.FDwfDigitalOutCounterSet
FDwfDigitalOutCounterSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_uint, ctypes.c_uint]
FDwfDigitalOutCounterSet.restype = bool
_FDwfDigitalOutCounterGet = _dwf.FDwfDigitalOutCounterGet
_FDwfDigitalOutCounterGet.argtypes = [HDWF, ctypes.c_int, _types.c_uint_p, _types.c_uint_p]
_FDwfDigitalOutCounterGet.restype = bool
def FDwfDigitalOutCounterGet(hdwf, idxChannel):
val_a = ctypes.c_uint()
val_b = ctypes.c_uint()
return (_FDwfDigitalOutCounterGet(hdwf, idxChannel, ctypes.byref(val_a), ctypes.byref(val_b)), val_a.value, val_b.value)
_FDwfDigitalOutDataInfo = _dwf.FDwfDigitalOutDataInfo
_FDwfDigitalOutDataInfo.argtypes = [HDWF, ctypes.c_int, _types.c_uint_p]
_FDwfDigitalOutDataInfo.restype = bool
def FDwfDigitalOutDataInfo(hdwf, idxChannel):
value = ctypes.c_uint()
return (_FDwfDigitalOutDataInfo(hdwf, idxChannel, ctypes.byref(value)), value.value)
FDwfDigitalOutDataSet = _dwf.FDwfDigitalOutDataSet
FDwfDigitalOutDataSet.argtypes = [HDWF, ctypes.c_int, ctypes.c_void_p, ctypes.c_uint]
FDwfDigitalOutDataSet.restype = bool
| asgeir/pydigilent | pydigilent/lowlevel/dwf.py | Python | mit | 82,320 | 0.01318 |
"""Fixed the id and reference for article self referencing with foreign key
Revision ID: 551f78f9d8a5
Revises: 8e01032c9c5e
Create Date: 2018-11-17 19:13:52.491349
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '551f78f9d8a5'
down_revision = '8e01032c9c5e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('articles', sa.Column('parent_id', sa.Integer(), nullable=True))
op.drop_constraint('articles_parent_fkey', 'articles', type_='foreignkey')
op.create_foreign_key(None, 'articles', 'articles', ['parent_id'], ['id'])
op.drop_column('articles', 'parent')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('articles', sa.Column('parent', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'articles', type_='foreignkey')
op.create_foreign_key('articles_parent_fkey', 'articles', 'articles', ['parent'], ['id'])
op.drop_column('articles', 'parent_id')
# ### end Alembic commands ###
| dougmiller/theMetaCity | migrations/versions/551f78f9d8a5_fixed_the_id_and_reference_for_article_.py | Python | mit | 1,176 | 0.002551 |
# @author: Milinda Fernando
# School of Computing, University of Utah.
# generate all the slurm jobs for the sc16 poster, energy measurements,
import argparse
from subprocess import call
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='slurm_pbs')
parser.add_argument('-p','--prefix', help='file prefix that you need to merge')
parser.add_argument('-s','--suffix',help='suffix of the file')
parser.add_argument('-n','--n',help='number of flies that you need to merge')
args=parser.parse_args()
tol_list=['0.000010','0.000100','0.001000','0.010000','0.100000','0.200000','0.300000','0.400000','0.500000']
#sendCommMap_M_tol_0.010000_npes_4096_pts_100000_ps_4096mat.csv
for tol in tol_list:
inFName=args.prefix+tol+args.suffix+'_'+args.n+'mat'+'.csv'
outFName=args.prefix+tol+args.suffix+'_'+args.n+'mat_comma'+'.csv'
fin=open(inFName,'r')
fout=open(outFName,'w')
for line in fin:
line=line.strip()
line=line.replace('\t',',')
fout.write(line+'\n')
fin.close()
fout.close()
print 'OK'
| paralab/Dendro4 | python_scripts_sc16/csv_mat.py | Python | gpl-2.0 | 1,046 | 0.048757 |
# stdlib
import copy
import mock
import unittest
# project
from utils.service_discovery.config_stores import get_config_store
from utils.service_discovery.consul_config_store import ConsulStore
from utils.service_discovery.etcd_config_store import EtcdStore
from utils.service_discovery.abstract_config_store import AbstractConfigStore
from utils.service_discovery.sd_backend import get_sd_backend
from utils.service_discovery.sd_docker_backend import SDDockerBackend
def clear_singletons(agentConfig):
get_config_store(agentConfig)._drop()
get_sd_backend(agentConfig)._drop()
class Response(object):
"""Dummy response class for mocking purpose"""
def __init__(self, content):
self.content = content
def json(self):
return self.content
def raise_for_status(self):
pass
def _get_container_inspect(c_id):
"""Return a mocked container inspect dict from self.container_inspects."""
for co, _, _ in TestServiceDiscovery.container_inspects:
if co.get('Id') == c_id:
return co
return None
def _get_conf_tpls(image_name, trace_config=False):
"""Return a mocked configuration template from self.mock_templates."""
return copy.deepcopy(TestServiceDiscovery.mock_templates.get(image_name)[0])
def _get_check_tpls(image_name, **kwargs):
if image_name in TestServiceDiscovery.mock_templates:
return [copy.deepcopy(TestServiceDiscovery.mock_templates.get(image_name)[0][0][0:3])]
elif image_name in TestServiceDiscovery.bad_mock_templates:
try:
return [copy.deepcopy(TestServiceDiscovery.bad_mock_templates.get(image_name))]
except Exception:
return None
def client_read(path):
"""Return a mocked string that would normally be read from a config store (etcd, consul...)."""
parts = path.split('/')
config_parts = ['check_names', 'init_configs', 'instances']
image, config_part = parts[-2], parts[-1]
return TestServiceDiscovery.mock_tpls.get(image)[0][config_parts.index(config_part)]
class TestServiceDiscovery(unittest.TestCase):
docker_container_inspect = {
u'Id': u'69ff25598b2314d1cdb7752cc3a659fb1c1352b32546af4f1454321550e842c0',
u'Image': u'6ffc02088cb870652eca9ccd4c4fb582f75b29af2879792ed09bb46fd1c898ef',
u'Name': u'/nginx',
u'NetworkSettings': {u'IPAddress': u'172.17.0.21', u'Ports': {u'443/tcp': None, u'80/tcp': None}}
}
kubernetes_container_inspect = {
u'Id': u'389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9',
u'Image': u'de309495e6c7b2071bc60c0b7e4405b0d65e33e3a4b732ad77615d90452dd827',
u'Name': u'/k8s_sentinel.38057ab9_redis-master_default_27b84e1e-a81c-11e5-8347-42010af00002_f70875a1',
u'Config': {u'ExposedPorts': {u'6379/tcp': {}}},
u'NetworkSettings': {u'IPAddress': u'', u'Ports': None}
}
malformed_container_inspect = {
u'Id': u'69ff25598b2314d1cdb7752cc3a659fb1c1352b32546af4f1454321550e842c0',
u'Image': u'6ffc02088cb870652eca9ccd4c4fb582f75b29af2879792ed09bb46fd1c898ef',
u'Name': u'/nginx'
}
container_inspects = [
# (inspect_dict, expected_ip, expected_port)
(docker_container_inspect, '172.17.0.21', ['80', '443']),
(kubernetes_container_inspect, None, ['6379']), # arbitrarily defined in the mocked pod_list
(malformed_container_inspect, None, KeyError)
]
# templates with variables already extracted
mock_templates = {
# image_name: ([(check_name, init_tpl, instance_tpl, variables)], (expected_config_template))
'image_0': (
[('check_0', {}, {'host': '%%host%%'}, ['host'])],
('check_0', {}, {'host': '127.0.0.1'})),
'image_1': (
[('check_1', {}, {'port': '%%port%%'}, ['port'])],
('check_1', {}, {'port': '1337'})),
'image_2': (
[('check_2', {}, {'host': '%%host%%', 'port': '%%port%%'}, ['host', 'port'])],
('check_2', {}, {'host': '127.0.0.1', 'port': '1337'})),
}
# raw templates coming straight from the config store
mock_tpls = {
# image_name: ('[check_name]', '[init_tpl]', '[instance_tpl]', expected_python_tpl_list)
'image_0': (
('["check_0"]', '[{}]', '[{"host": "%%host%%"}]'),
[('check_0', {}, {"host": "%%host%%"})]),
'image_1': (
('["check_1"]', '[{}]', '[{"port": "%%port%%"}]'),
[('check_1', {}, {"port": "%%port%%"})]),
'image_2': (
('["check_2"]', '[{}]', '[{"host": "%%host%%", "port": "%%port%%"}]'),
[('check_2', {}, {"host": "%%host%%", "port": "%%port%%"})]),
'bad_image_0': ((['invalid template']), []),
'bad_image_1': (('invalid template'), []),
'bad_image_2': (None, [])
}
bad_mock_templates = {
'bad_image_0': ('invalid template'),
'bad_image_1': [('invalid template')],
'bad_image_2': None
}
def setUp(self):
self.etcd_agentConfig = {
'service_discovery': True,
'service_discovery_backend': 'docker',
'sd_template_dir': '/datadog/check_configs',
'sd_config_backend': 'etcd',
'sd_backend_host': '127.0.0.1',
'sd_backend_port': '2380'
}
self.consul_agentConfig = {
'service_discovery': True,
'service_discovery_backend': 'docker',
'sd_template_dir': '/datadog/check_configs',
'sd_config_backend': 'consul',
'sd_backend_host': '127.0.0.1',
'sd_backend_port': '8500'
}
self.auto_conf_agentConfig = {
'service_discovery': True,
'service_discovery_backend': 'docker',
'sd_template_dir': '/datadog/check_configs',
'additional_checksd': '/etc/dd-agent/checks.d/',
}
self.agentConfigs = [self.etcd_agentConfig, self.consul_agentConfig, self.auto_conf_agentConfig]
# sd_backend tests
@mock.patch('utils.http.requests.get')
@mock.patch('utils.kubeutil.check_yaml')
def test_get_host(self, mock_check_yaml, mock_get):
kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
pod_list = {
'items': [{
'status': {
'podIP': '127.0.0.1',
'containerStatuses': [
{'containerID': 'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'}
]
}
}]
}
mock_check_yaml.return_value = kubernetes_config
mock_get.return_value = Response(pod_list)
for c_ins, expected_ip, _ in self.container_inspects:
with mock.patch.object(AbstractConfigStore, '__init__', return_value=None):
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
with mock.patch('utils.kubeutil.get_conf_path', return_value=None):
sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
self.assertEqual(sd_backend._get_host(c_ins), expected_ip)
clear_singletons(self.auto_conf_agentConfig)
def test_get_ports(self):
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
for c_ins, _, expected_ports in self.container_inspects:
sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
if isinstance(expected_ports, list):
self.assertEqual(sd_backend._get_ports(c_ins), expected_ports)
else:
self.assertRaises(expected_ports, sd_backend._get_ports, c_ins)
clear_singletons(self.auto_conf_agentConfig)
@mock.patch('docker.Client.inspect_container', side_effect=_get_container_inspect)
@mock.patch.object(SDDockerBackend, '_get_config_templates', side_effect=_get_conf_tpls)
def test_get_check_configs(self, mock_inspect_container, mock_get_conf_tpls):
"""Test get_check_config with mocked container inspect and config template"""
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
with mock.patch.object(SDDockerBackend, '_get_host', return_value='127.0.0.1'):
with mock.patch.object(SDDockerBackend, '_get_ports', return_value=['1337']):
c_id = self.docker_container_inspect.get('Id')
for image in self.mock_templates.keys():
sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
self.assertEquals(
sd_backend._get_check_configs(c_id, image)[0],
self.mock_templates[image][1])
clear_singletons(self.auto_conf_agentConfig)
@mock.patch.object(AbstractConfigStore, 'get_check_tpls', side_effect=_get_check_tpls)
def test_get_config_templates(self, mock_get_check_tpls):
"""Test _get_config_templates with mocked get_check_tpls"""
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
with mock.patch.object(EtcdStore, 'get_client', return_value=None):
with mock.patch.object(ConsulStore, 'get_client', return_value=None):
for agentConfig in self.agentConfigs:
sd_backend = get_sd_backend(agentConfig=agentConfig)
# normal cases
for image in self.mock_templates.keys():
template = sd_backend._get_config_templates(image)
expected_template = self.mock_templates.get(image)[0]
self.assertEquals(template, expected_template)
# error cases
for image in self.bad_mock_templates.keys():
self.assertEquals(sd_backend._get_config_templates(image), None)
clear_singletons(agentConfig)
def test_render_template(self):
"""Test _render_template"""
valid_configs = [
(({}, {'host': '%%host%%'}, {'host': 'foo'}),
({}, {'host': 'foo'})),
(({}, {'host': '%%host%%', 'port': '%%port%%'}, {'host': 'foo', 'port': '1337'}),
({}, {'host': 'foo', 'port': '1337'})),
(({'foo': '%%bar%%'}, {}, {'bar': 'w00t'}),
({'foo': 'w00t'}, {})),
(({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'bar': 'w00t', 'host': 'localhost'}),
({'foo': 'w00t'}, {'host': 'localhost'}))
]
invalid_configs = [
({}, {'host': '%%host%%'}, {}), # no value to use
({}, {'host': '%%host%%'}, {'port': 42}), # the variable name doesn't match
({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'host': 'foo'}) # not enough value/no matching var name
]
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
for agentConfig in self.agentConfigs:
sd_backend = get_sd_backend(agentConfig=agentConfig)
for tpl, res in valid_configs:
init, instance, variables = tpl
config = sd_backend._render_template(init, instance, variables)
self.assertEquals(config, res)
for init, instance, variables in invalid_configs:
config = sd_backend._render_template(init, instance, variables)
self.assertEquals(config, None)
clear_singletons(agentConfig)
def test_fill_tpl(self):
"""Test _fill_tpl with mock _get_ports"""
valid_configs = [
# ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))
(
({}, {'host': 'localhost'}, [], None),
({'host': 'localhost'}, {})
),
(
({'NetworkSettings': {'IPAddress': '127.0.0.1'}},
{'host': '%%host%%', 'port': 1337}, ['host'], ['foo', 'bar:baz']),
({'host': '%%host%%', 'port': 1337, 'tags': ['foo', 'bar:baz']}, {'host': '127.0.0.1'})
),
(
({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Ports': {'42/tcp': None, '22/tcp': None}}},
{'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test']},
['host', 'port_1'], ['foo', 'bar:baz']),
({'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test', 'foo', 'bar:baz']},
{'host': '127.0.0.1', 'port_1': '42'})
)
]
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
for ac in self.agentConfigs:
sd_backend = get_sd_backend(agentConfig=ac)
try:
for co in valid_configs:
inspect, tpl, variables, tags = co[0]
instance_tpl, var_values = sd_backend._fill_tpl(inspect, tpl, variables, tags)
for key in instance_tpl.keys():
if isinstance(instance_tpl[key], list):
self.assertEquals(len(instance_tpl[key]), len(co[1][0].get(key)))
for elem in instance_tpl[key]:
self.assertTrue(elem in co[1][0].get(key))
else:
self.assertEquals(instance_tpl[key], co[1][0].get(key))
self.assertEquals(var_values, co[1][1])
clear_singletons(ac)
except Exception:
clear_singletons(ac)
raise
# config_stores tests
def test_get_auto_config(self):
"""Test _get_auto_config"""
expected_tpl = {
'redis': ('redisdb', None, {"host": "%%host%%", "port": "%%port%%"}),
'consul': ('consul', None, {"url": "http://%%host%%:%%port%%", "catalog_checks": True, "new_leader_checks": True}),
'foobar': None
}
config_store = get_config_store(self.auto_conf_agentConfig)
for image in expected_tpl.keys():
config = config_store._get_auto_config(image)
self.assertEquals(config, expected_tpl.get(image))
@mock.patch.object(AbstractConfigStore, 'client_read', side_effect=client_read)
def test_get_check_tpls(self, mock_client_read):
"""Test get_check_tpls"""
valid_config = ['image_0', 'image_1', 'image_2']
invalid_config = ['bad_image_0', 'bad_image_1']
config_store = get_config_store(self.auto_conf_agentConfig)
for image in valid_config:
tpl = self.mock_tpls.get(image)[1]
self.assertEquals(tpl, config_store.get_check_tpls(image))
for image in invalid_config:
tpl = self.mock_tpls.get(image)[1]
self.assertEquals(tpl, config_store.get_check_tpls(image))
| tebriel/dd-agent | tests/core/test_service_discovery.py | Python | bsd-3-clause | 15,146 | 0.004093 |
# -*- coding: utf-8 -*-
"""Tests for the Tk UI."""
#
# (C) Pywikibot team, 2008-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import os
import pywikibot
from pywikibot.tools import PY2
from tests.aspects import unittest, TestCase, DefaultSiteTestCase
if os.environ.get('PYWIKIBOT_TEST_GUI', '0') == '1':
if not PY2:
import tkinter
else:
import Tkinter as tkinter # noqa: N813
from pywikibot.userinterfaces.gui import EditBoxWindow, Tkdialog
class TestTkdialog(TestCase):
"""Test Tkdialog."""
net = True
def testTkdialog(self):
"""Test Tk dialog."""
try:
box = Tkdialog('foo', 'tests/data/MP_sounds.png', 'MP_sounds.png')
box.show_dialog()
except ImportError as e:
pywikibot.warning(e)
class TestTkinter(DefaultSiteTestCase):
"""Test Tkinter."""
net = True
def testTkinter(self):
"""Test Tkinter window."""
root = tkinter.Tk()
root.resizable(width=tkinter.FALSE, height=tkinter.FALSE)
root.title('pywikibot GUI')
page = pywikibot.Page(pywikibot.Site(), 'Main Page')
content = page.get()
myapp = EditBoxWindow(root)
myapp.bind('<Control-d>', myapp.debug)
v = myapp.edit(content, highlight=page.title())
assert v is None
def setUpModule(): # noqa: N802
"""Skip Travis tests if PYWIKIBOT_TEST_GUI variable is not set."""
if os.environ.get('PYWIKIBOT_TEST_GUI', '0') != '1':
raise unittest.SkipTest('Tkinter tests are disabled on Travis-CI')
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| PersianWikipedia/pywikibot-core | tests/tk_tests.py | Python | mit | 1,761 | 0 |
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
class Email:
emailCount = 0;
def __init__(self, address, password):
self.address = address
self.password = password
email.emailCount += 1
def initSMTP(self, emailserver, port):
self.smtpconnection = smtplib.SMTP(emailserver, port) #returns an SMTP object
self.smtpconnection.ehlo() #says "hello" to smtp server
self.smtpconnection.starttls() #enable TLS encryption
self.smtpconnection.login(self.address, self.password)
def sendEmail(self, recipient, subject, message, imgPath):
msg = MIMEMultipart()
msg["Subject"] = subject
msg["From"] = self.address
msg["To"] = recipient
msg.attach(MIMEText(message))
imgfp = open(imgPath, "rb")
img = MIMEImage(imgfp.read())
imgfp.close()
msg.attach(img)
self.smtpconnection.sendmail(self.address, recipient, msg.as_string())
def closeSMTP(self):
self.smtpconnection.close()
| andykhov/got-my-pi-on-you | src/mail.py | Python | mit | 1,141 | 0.008764 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Encuesta.altitud'
db.alter_column(u'encuesta_encuesta', 'altitud', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Changing field 'Encuesta.altitud'
db.alter_column(u'encuesta_encuesta', 'altitud', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
models = {
u'encuesta.aguafinca': {
'Meta': {'object_name': 'AguaFinca'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.beneficios': {
'Meta': {'object_name': 'Beneficios'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.combustible': {
'Meta': {'object_name': 'Combustible'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.composicion': {
'Meta': {'object_name': 'Composicion'},
'adultas': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'adultos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'educacion_dueno': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'educacion_maxima_hombre': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'educacion_maxima_mujeres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jovenes_mujeres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'jovenes_varones': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ninas': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ninos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permanente_hombres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permanente_mujeres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'relacion_finca_vivienda': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ViveFamilia']", 'null': 'True', 'blank': 'True'}),
'tecnico_hombres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tecnico_mujeres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'temporales_hombres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'temporales_mujeres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'encuesta.creditoe': {
'Meta': {'object_name': 'CreditoE'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.dequien': {
'Meta': {'object_name': 'DeQuien'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.duenofinca': {
'Meta': {'object_name': 'DuenoFinca'},
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'altitud': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'beneficiarios': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['encuesta.Organizacion']", 'null': 'True', 'blank': 'True'}),
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'dueno': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.DuenoFinca']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Entrevistado']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'position': ('geoposition.fields.GeopositionField', [], {'max_length': '42', 'null': 'True', 'blank': 'True'}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.energiafinca': {
'Meta': {'object_name': 'EnergiaFinca'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.entrevistado': {
'Meta': {'object_name': 'Entrevistado'},
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.meses': {
'Meta': {'object_name': 'Meses'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.necesidadalimento': {
'Meta': {'object_name': 'NecesidadAlimento'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.organizacion': {
'Meta': {'object_name': 'Organizacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.quienfinancia': {
'Meta': {'object_name': 'QuienFinancia'},
'beneficio_ser_socio': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'beneficiario_socio'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.Beneficios']"}),
'de_quien': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'quien'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.DeQuien']"}),
'desde': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'socio': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'socios'", 'symmetrical': 'False', 'to': u"orm['encuesta.SocioOrganizacion']"}),
'tiene_credito': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'credito'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.CreditoE']"})
},
u'encuesta.recolector': {
'Meta': {'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.seguridad': {
'Meta': {'object_name': 'Seguridad'},
'compra_alimento': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cubrir_necesidades': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meses_dificiles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'dificiles'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'porque_no_cubre': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'cubre'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.NecesidadAlimento']"}),
'soluciones_crisis': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'crisis'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.TiemposCrisis']"})
},
u'encuesta.serviciosbasicos': {
'Meta': {'object_name': 'ServiciosBasicos'},
'agua_consumo_humano': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'consumo'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.AguaFinca']"}),
'agua_trabajo_finca': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trabaja'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.AguaFinca']"}),
'combustible': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'combustible'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.Combustible']"}),
'electricidad': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'electricidad'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['encuesta.EnergiaFinca']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'encuesta.socioorganizacion': {
'Meta': {'object_name': 'SocioOrganizacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.tenecia': {
'Meta': {'object_name': 'Tenecia'},
'documento': ('django.db.models.fields.IntegerField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tipo': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.tiemposcrisis': {
'Meta': {'object_name': 'TiemposCrisis'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.vivefamilia': {
'Meta': {'object_name': 'ViveFamilia'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'lugar.comunidad': {
'Meta': {'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['encuesta'] | CARocha/estudiocafe | encuesta/migrations/0003_auto__chg_field_encuesta_altitud.py | Python | mit | 14,972 | 0.006946 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple trampoline to generate_profile.py in the src/ directory.
generate_profile.py generates a synthetic user profile.
"""
import optparse
import os
import sys
from slave import build_directory
from common import chromium_utils
def main():
parser = optparse.OptionParser()
parser.add_option('--build-dir', help='ignored')
parser.add_option('--target', help='Release or Debug')
parser.add_option('--profile-type-to-generate')
options, args = parser.parse_args()
output_dir = os.path.join(build_directory.GetBuildOutputDirectory(),
options.target,
'generated_profile')
cmd = [
sys.executable,
os.path.join('src', 'tools', 'perf', 'generate_profile'),
'-v',
'--browser=' + options.target.lower(),
'--profile-type-to-generate=' + options.profile_type_to_generate,
'--output-dir=' + output_dir,
'--output-format=buildbot',
] + args
return chromium_utils.RunCommand(cmd)
if '__main__' == __name__:
sys.exit(main())
| eunchong/build | scripts/slave/generate_profile_shim.py | Python | bsd-3-clause | 1,231 | 0.007311 |
#! /usr/bin/env python
#
# whoosh_test.py ---
#
# Filename: whoosh_test.py
# Description:
# Author: Werther Zhang
# Maintainer:
# Created: Mon Oct 23 19:29:49 2017 (+0800)
#
# Change Log:
#
#
import os
from whoosh.index import create_in
from whoosh.index import exists_in
from whoosh.index import open_dir
from whoosh.fields import *
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), author=TEXT, content=TEXT) # stored=True will show the result in results[0]
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(title=u"First document", path=u"/a", author=u"Werther", content=u"This is the first document we've added!")
writer.add_document(title=u"Second document", path=u"/b", content=u"The second one is even more interesting!")
writer.commit()
from whoosh.qparser import QueryParser
with ix.searcher() as searcher:
query = QueryParser("author", ix.schema).parse("werther")
results = searcher.search(query)
print results[0]
from whoosh.index import create_in
from whoosh.fields import *
from whoosh.qparser import QueryParser
class BookshelfDatabase(object):
"""BookshelfDatabase API"""
_DATABASE_DIR = '/mnt/mmc/database/bookshelf'
def __init__(self):
ix = None
# title (filename or title in db)
# path (relative path in /mnt/mmc/mi)
# author (author of the file)
# content (basename of file ; title; author)
# fileid (hash of path)
# date (file update time in string)
#
# when index, check whether file updated,
# using date AND fileid to get the item , if it not exists, update fileid with new info
# when search, using content defaultly and merge result in path , show title and path to user
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), author=TEXT, content=TEXT, fileid=TEXT(unique=True), date=TEXT)
if not os.path.exists(BookshelfDatabase._DATABASE_DIR):
os.mkdir(BookshelfDatabase._DATABASE_DIR)
if not exists_in(BookshelfDatabase._DATABASE_DIR):
ix = create_in(BookshelfDatabase._DATABASE_DIR, schema)
else:
ix = open_dir(BookshelfDatabase._DATABASE_DIR)
def add(self, title, path, content, fileid, date, author=None):
pass
def update(self, title, path, content, fileid, date, author=None):
pass
# check fileid AND date exists
def exists(self, fileid, date):
pass
def search(self, content=None, author=None):
pass
| pengzhangdev/slackbot | slackbot/plugins/component/database/BookshelfDatabase.py | Python | mit | 2,538 | 0.005516 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud Storage client.
This library evolved from the Google App Engine GCS client available at
https://github.com/GoogleCloudPlatform/appengine-gcs-client.
"""
# pytype: skip-file
from __future__ import absolute_import
import errno
import io
import logging
import multiprocessing
import re
import sys
import threading
import time
import traceback
from builtins import object
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.filesystemio import Downloader
from apache_beam.io.filesystemio import DownloaderStream
from apache_beam.io.filesystemio import PipeStream
from apache_beam.io.filesystemio import Uploader
from apache_beam.io.filesystemio import UploaderStream
from apache_beam.utils import retry
__all__ = ['GcsIO']
_LOGGER = logging.getLogger(__name__)
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
# Batch endpoint URL for GCS.
# We have to specify an API specific endpoint here since Google APIs global
# batch endpoints will be deprecated on 03/25/2019.
# See https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html. # pylint: disable=line-too-long
# Currently apitools library uses a global batch endpoint by default:
# https://github.com/google/apitools/blob/master/apitools/base/py/batch.py#L152
# TODO: remove this constant and it's usage after apitools move to using an API
# specific batch endpoint or after Beam gcsio module start using a GCS client
# library that does not use global batch endpoints.
GCS_BATCH_ENDPOINT = 'https://www.googleapis.com/batch/storage/v1'
def parse_gcs_path(gcs_path, object_optional=False):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.*)$', gcs_path)
if match is None or (match.group(2) == '' and not object_optional):
raise ValueError('GCS path must be in the form gs://<bucket>/<object>.')
return match.group(1), match.group(2)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __init__(self, storage_client=None):
if storage_client is None:
storage_client = storage.StorageV1(
credentials=auth.get_service_credentials(),
get_credentials=False,
http=get_new_http(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
self.client = storage_client
self._rewrite_cb = None
def _set_rewrite_response_callback(self, callback):
"""For testing purposes only. No backward compatibility guarantees.
Args:
callback: A function that receives ``storage.RewriteResponse``.
"""
self._rewrite_cb = callback
def open(
self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
downloader = GcsDownloader(
self.client, filename, buffer_size=read_buffer_size)
return io.BufferedReader(
DownloaderStream(
downloader, read_buffer_size=read_buffer_size, mode=mode),
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
uploader = GcsUploader(self.client, filename, mime_type)
return io.BufferedWriter(
UploaderStream(uploader, mode=mode), buffer_size=128 * 1024)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for path in paths:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
path = paths[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(
self,
src,
dest,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite API call will return after these many bytes.
Used for testing.
Raises:
TimeoutError: on timeout.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
response = self.client.objects.Rewrite(request)
while not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
request.rewriteToken = response.rewriteToken
response = self.client.objects.Rewrite(request)
if self._rewrite_cb is not None:
self._rewrite_cb(response)
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(
self,
src_dest_pairs,
dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite call will return after these many bytes. Used
primarily for testing.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
pair_to_request = {}
for pair in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(pair[0])
dest_bucket, dest_path = parse_gcs_path(pair[1])
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
pair_to_request[pair] = request
pair_to_status = {}
while True:
pairs_in_batch = list(set(src_dest_pairs) - set(pair_to_status))
if not pairs_in_batch:
break
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for pair in pairs_in_batch:
batch_request.Add(self.client.objects, 'Rewrite', pair_to_request[pair])
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
for pair, api_call in zip(pairs_in_batch, api_calls):
src, dest = pair
response = api_call.response
if self._rewrite_cb is not None:
self._rewrite_cb(response)
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
pair_to_status[pair] = exception
elif not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten,
response.objectSize,
src,
dest)
pair_to_request[pair].rewriteToken = response.rewriteToken
else:
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
pair_to_status[pair] = None
return [(pair[0], pair[1], pair_to_status[pair]) for pair in src_dest_pairs]
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.list_prefix(src):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def checksum(self, path):
"""Looks up the checksum of a GCS object.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).crc32c
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def kms_key(self, path):
"""Returns the KMS key of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: KMS key name of the GCS object as a string, or None if it doesn't
have one.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).kmsKeyName
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def last_updated(self, path):
"""Returns the last updated epoch time of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: last updated time of the GCS object in second.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
datetime = self.client.objects.Get(request).updated
return (
time.mktime(datetime.timetuple()) - time.timezone +
datetime.microsecond / 1000000.0)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def list_prefix(self, path):
"""Lists files matching the prefix.
Args:
path: GCS file path pattern in the form gs://<bucket>/[name].
Returns:
Dictionary of file name -> size.
"""
bucket, prefix = parse_gcs_path(path, object_optional=True)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
_LOGGER.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if counter % 10000 == 0:
_LOGGER.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
else:
break
_LOGGER.info(
"Finished listing %s files in %s seconds.",
counter,
time.time() - start_time)
return file_sizes
class GcsDownloader(Downloader):
def __init__(self, client, path, buffer_size):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._buffer_size = buffer_size
# Get object state.
self._get_request = (
storage.StorageObjectsGetRequest(
bucket=self._bucket, object=self._name))
try:
metadata = self._get_object_metadata(self._get_request)
except HttpError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self._path)
else:
_LOGGER.error(
'HTTP error while requesting file %s: %s', self._path, http_error)
raise
self._size = metadata.size
# Ensure read is from file of the correct generation.
self._get_request.generation = metadata.generation
# Initialize read buffer state.
self._download_stream = io.BytesIO()
self._downloader = transfer.Download(
self._download_stream,
auto_transfer=False,
chunksize=self._buffer_size,
num_retries=20)
self._client.objects.Get(self._get_request, download=self._downloader)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self._client.objects.Get(get_request)
@property
def size(self):
return self._size
def get_range(self, start, end):
self._download_stream.seek(0)
self._download_stream.truncate(0)
self._downloader.GetRange(start, end - 1)
return self._download_stream.getvalue()
class GcsUploader(Uploader):
def __init__(self, client, path, mime_type):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._mime_type = mime_type
# Set up communication with child thread.
parent_conn, child_conn = multiprocessing.Pipe()
self._child_conn = child_conn
self._conn = parent_conn
# Set up uploader.
self._insert_request = (
storage.StorageObjectsInsertRequest(
bucket=self._bucket, name=self._name))
self._upload = transfer.Upload(
PipeStream(self._child_conn),
self._mime_type,
chunksize=WRITE_CHUNK_SIZE)
self._upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self._upload_thread = threading.Thread(target=self._start_upload)
self._upload_thread.daemon = True
self._upload_thread.last_error = None
self._upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
try:
self._client.objects.Insert(self._insert_request, upload=self._upload)
except Exception as e: # pylint: disable=broad-except
_LOGGER.error(
'Error in _start_upload while inserting file %s: %s',
self._path,
traceback.format_exc())
self._upload_thread.last_error = e
finally:
self._child_conn.close()
def put(self, data):
try:
self._conn.send_bytes(data.tobytes())
except EOFError:
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
raise
def finish(self):
self._conn.close()
# TODO(udim): Add timeout=DEFAULT_HTTP_TIMEOUT_SECONDS * 2 and raise if
# isAlive is True.
self._upload_thread.join()
# Check for exception since the last put() call.
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
| iemejia/incubator-beam | sdks/python/apache_beam/io/gcp/gcsio.py | Python | apache-2.0 | 23,315 | 0.005833 |
from __future__ import print_function
import os
import pytest
from os.path import join
import sys
import unittest
import subprocess
if sys.platform == "win32":
GULP = "gulp.cmd"
else:
GULP = "gulp"
@pytest.mark.js
class TestBokehJS(unittest.TestCase):
def test_bokehjs(self):
os.chdir('bokehjs')
proc = subprocess.Popen([join('node_modules', '.bin', GULP), "test"],
stdout=subprocess.PIPE)
out, errs = proc.communicate()
msg = out.decode('utf-8', errors='ignore')
print(msg)
if proc.returncode != 0:
assert False
if __name__ == "__main__":
unittest.main()
| phobson/bokeh | tests/test_bokehjs.py | Python | bsd-3-clause | 669 | 0.00299 |
def yield_function(n):
for i in range(n):
print "pre", i
yield i
print "post", i
for x in yield_function(10):
print x
print | MichaelReiter/ProgrammingPractice | yield.py | Python | mit | 142 | 0.035211 |
""" @Imports """
from cuescience_shop.tests.support.support import ClientTestSupport
from django.test.testcases import TestCase
class _NatSpecTemplate(TestCase):
def setUp(self):
self.client_test_support = ClientTestSupport(self)
def test(self):
""" @MethodBody """ | cuescience/cuescience-shop | shop/tests/models/_NatSpecTemplate.py | Python | mit | 293 | 0.003413 |
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
def find_next(parent, child):
parent = parent.next
while parent:
if parent.left:
child.next = parent.left
return
elif parent.right:
child.next = parent.right
return
else:
parent = parent.next
if not root: return
q = [root]
while q:
nxt = []
for node in q:
if node.left:
if node.right:
node.left.next = node.right
else:
find_next(node, node.left)
nxt.append(node.left)
if node.right:
find_next(node, node.right)
nxt.append(node.right)
q = nxt | YiqunPeng/Leetcode-pyq | solutions/117PopulatingNextRightPointersInEachNodeII.py | Python | gpl-3.0 | 1,205 | 0.005809 |
from karld.loadump import dump_dicts_to_json_file
from karld.loadump import ensure_dir
from karld.loadump import ensure_file_path_dir
from karld.loadump import i_get_csv_data
from karld.loadump import is_file_csv
from karld.loadump import i_get_json_data
from karld.loadump import is_file_json
from karld.loadump import raw_line_reader
from karld.loadump import split_csv_file
from karld.loadump import split_file
from karld.loadump import split_file_output
from karld.loadump import split_file_output_csv
from karld.loadump import split_file_output_json
from karld.loadump import write_as_csv
from karld.loadump import write_as_json
| johnwlockwood/karl_data | karld/io.py | Python | apache-2.0 | 641 | 0 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test__sink_name_from_path(unittest2.TestCase):
def _callFUT(self, path, project):
from gcloud.logging.sink import _sink_name_from_path
return _sink_name_from_path(path, project)
def test_invalid_path_length(self):
PATH = 'projects/foo'
PROJECT = None
self.assertRaises(ValueError, self._callFUT, PATH, PROJECT)
def test_invalid_path_format(self):
SINK_NAME = 'SINK_NAME'
PROJECT = 'PROJECT'
PATH = 'foo/%s/bar/%s' % (PROJECT, SINK_NAME)
self.assertRaises(ValueError, self._callFUT, PATH, PROJECT)
def test_invalid_project(self):
SINK_NAME = 'SINK_NAME'
PROJECT1 = 'PROJECT1'
PROJECT2 = 'PROJECT2'
PATH = 'projects/%s/sinks/%s' % (PROJECT1, SINK_NAME)
self.assertRaises(ValueError, self._callFUT, PATH, PROJECT2)
def test_valid_data(self):
SINK_NAME = 'SINK_NAME'
PROJECT = 'PROJECT'
PATH = 'projects/%s/sinks/%s' % (PROJECT, SINK_NAME)
sink_name = self._callFUT(PATH, PROJECT)
self.assertEqual(sink_name, SINK_NAME)
class TestSink(unittest2.TestCase):
PROJECT = 'test-project'
SINK_NAME = 'sink-name'
FILTER = 'logName:syslog AND severity>=INFO'
DESTINATION_URI = 'faux.googleapis.com/destination'
def _getTargetClass(self):
from gcloud.logging.sink import Sink
return Sink
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
conn = _Connection()
client = _Client(self.PROJECT, conn)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=client)
self.assertEqual(sink.name, self.SINK_NAME)
self.assertEqual(sink.filter_, self.FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertTrue(sink.client is client)
self.assertEqual(sink.project, self.PROJECT)
self.assertEqual(sink.full_name, FULL)
self.assertEqual(sink.path, '/%s' % (FULL,))
def test_from_api_repr_minimal(self):
CLIENT = _Client(project=self.PROJECT)
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
RESOURCE = {
'name': FULL,
'filter': self.FILTER,
'destination': self.DESTINATION_URI,
}
klass = self._getTargetClass()
sink = klass.from_api_repr(RESOURCE, client=CLIENT)
self.assertEqual(sink.name, self.SINK_NAME)
self.assertEqual(sink.filter_, self.FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertTrue(sink._client is CLIENT)
self.assertEqual(sink.project, self.PROJECT)
self.assertEqual(sink.full_name, FULL)
def test_from_api_repr_w_description(self):
CLIENT = _Client(project=self.PROJECT)
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
RESOURCE = {
'name': FULL,
'filter': self.FILTER,
'destination': self.DESTINATION_URI,
}
klass = self._getTargetClass()
sink = klass.from_api_repr(RESOURCE, client=CLIENT)
self.assertEqual(sink.name, self.SINK_NAME)
self.assertEqual(sink.filter_, self.FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertTrue(sink._client is CLIENT)
self.assertEqual(sink.project, self.PROJECT)
self.assertEqual(sink.full_name, FULL)
def test_from_api_repr_with_mismatched_project(self):
PROJECT1 = 'PROJECT1'
PROJECT2 = 'PROJECT2'
CLIENT = _Client(project=PROJECT1)
FULL = 'projects/%s/sinks/%s' % (PROJECT2, self.SINK_NAME)
RESOURCE = {
'name': FULL,
'filter': self.FILTER,
'destination': self.DESTINATION_URI,
}
klass = self._getTargetClass()
self.assertRaises(ValueError, klass.from_api_repr,
RESOURCE, client=CLIENT)
def test_create_w_bound_client(self):
TARGET = 'projects/%s/sinks' % (self.PROJECT,)
RESOURCE = {
'name': self.SINK_NAME,
'filter': self.FILTER,
'destination': self.DESTINATION_URI,
}
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=client)
sink.create()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % TARGET)
self.assertEqual(req['data'], RESOURCE)
def test_create_w_alternate_client(self):
TARGET = 'projects/%s/sinks' % (self.PROJECT,)
RESOURCE = {
'name': self.SINK_NAME,
'filter': self.FILTER,
'destination': self.DESTINATION_URI,
}
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=client1)
sink.create(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % TARGET)
self.assertEqual(req['data'], RESOURCE)
def test_exists_miss_w_bound_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
conn = _Connection()
CLIENT = _Client(project=self.PROJECT, connection=conn)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT)
self.assertFalse(sink.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % FULL)
def test_exists_hit_w_alternate_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({'name': FULL})
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT1)
self.assertTrue(sink.exists(client=CLIENT2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % FULL)
def test_reload_w_bound_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
NEW_FILTER = 'logName:syslog AND severity>=INFO'
NEW_DESTINATION_URI = 'faux.googleapis.com/other'
RESOURCE = {
'name': self.SINK_NAME,
'filter': NEW_FILTER,
'destination': NEW_DESTINATION_URI,
}
conn = _Connection(RESOURCE)
CLIENT = _Client(project=self.PROJECT, connection=conn)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT)
sink.reload()
self.assertEqual(sink.filter_, NEW_FILTER)
self.assertEqual(sink.destination, NEW_DESTINATION_URI)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % FULL)
def test_reload_w_alternate_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
NEW_FILTER = 'logName:syslog AND severity>=INFO'
NEW_DESTINATION_URI = 'faux.googleapis.com/other'
RESOURCE = {
'name': self.SINK_NAME,
'filter': NEW_FILTER,
'destination': NEW_DESTINATION_URI,
}
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT1)
sink.reload(client=CLIENT2)
self.assertEqual(sink.filter_, NEW_FILTER)
self.assertEqual(sink.destination, NEW_DESTINATION_URI)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % FULL)
def test_update_w_bound_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
RESOURCE = {
'name': self.SINK_NAME,
'filter': self.FILTER,
'destination': self.DESTINATION_URI,
}
conn = _Connection(RESOURCE)
CLIENT = _Client(project=self.PROJECT, connection=conn)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT)
sink.update()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'PUT')
self.assertEqual(req['path'], '/%s' % FULL)
self.assertEqual(req['data'], RESOURCE)
def test_update_w_alternate_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
RESOURCE = {
'name': self.SINK_NAME,
'filter': self.FILTER,
'destination': self.DESTINATION_URI,
}
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT1)
sink.update(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'PUT')
self.assertEqual(req['path'], '/%s' % FULL)
self.assertEqual(req['data'], RESOURCE)
def test_delete_w_bound_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
conn = _Connection({})
CLIENT = _Client(project=self.PROJECT, connection=conn)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT)
sink.delete()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'DELETE')
self.assertEqual(req['path'], '/%s' % FULL)
def test_delete_w_alternate_client(self):
FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME)
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI,
client=CLIENT1)
sink.delete(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'DELETE')
self.assertEqual(req['path'], '/%s' % FULL)
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from gcloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except:
raise NotFound('miss')
else:
return response
class _Client(object):
def __init__(self, project, connection=None):
self.project = project
self.connection = connection
| huangkuan/hack | lib/gcloud/logging/test_sink.py | Python | apache-2.0 | 13,154 | 0.000076 |
""" Test script for the unicodedata module.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import sha
encoding = 'utf-8'
def test_methods():
h = sha.sha()
for i in range(65536):
char = unichr(i)
data = [
# Predicates (single char)
char.isalnum() and u'1' or u'0',
char.isalpha() and u'1' or u'0',
char.isdecimal() and u'1' or u'0',
char.isdigit() and u'1' or u'0',
char.islower() and u'1' or u'0',
char.isnumeric() and u'1' or u'0',
char.isspace() and u'1' or u'0',
char.istitle() and u'1' or u'0',
char.isupper() and u'1' or u'0',
# Predicates (multiple chars)
(char + u'abc').isalnum() and u'1' or u'0',
(char + u'abc').isalpha() and u'1' or u'0',
(char + u'123').isdecimal() and u'1' or u'0',
(char + u'123').isdigit() and u'1' or u'0',
(char + u'abc').islower() and u'1' or u'0',
(char + u'123').isnumeric() and u'1' or u'0',
(char + u' \t').isspace() and u'1' or u'0',
(char + u'abc').istitle() and u'1' or u'0',
(char + u'ABC').isupper() and u'1' or u'0',
# Mappings (single char)
char.lower(),
char.upper(),
char.title(),
# Mappings (multiple chars)
(char + u'abc').lower(),
(char + u'ABC').upper(),
(char + u'abc').title(),
(char + u'ABC').title(),
]
h.update(u''.join(data).encode(encoding))
return h.hexdigest()
def test_unicodedata():
h = sha.sha()
for i in range(65536):
char = unichr(i)
data = [
# Properties
str(unicodedata.digit(char, -1)),
str(unicodedata.numeric(char, -1)),
str(unicodedata.decimal(char, -1)),
unicodedata.category(char),
unicodedata.bidirectional(char),
unicodedata.decomposition(char),
str(unicodedata.mirrored(char)),
str(unicodedata.combining(char)),
]
h.update(''.join(data))
return h.hexdigest()
### Run tests
print 'Testing Unicode Database...'
print 'Methods:',
print test_methods()
# In case unicodedata is not available, this will raise an ImportError,
# but still test the above cases...
import unicodedata
print 'Functions:',
print test_unicodedata()
# Some additional checks of the API:
print 'API:',
assert unicodedata.digit(u'A',None) is None
assert unicodedata.digit(u'9') == 9
assert unicodedata.digit(u'\u215b',None) is None
assert unicodedata.digit(u'\u2468') == 9
assert unicodedata.numeric(u'A',None) is None
assert unicodedata.numeric(u'9') == 9
assert unicodedata.numeric(u'\u215b') == 0.125
assert unicodedata.numeric(u'\u2468') == 9.0
assert unicodedata.decimal(u'A',None) is None
assert unicodedata.decimal(u'9') == 9
assert unicodedata.decimal(u'\u215b',None) is None
assert unicodedata.decimal(u'\u2468',None) is None
assert unicodedata.category(u'\uFFFE') == 'Cn'
assert unicodedata.category(u'a') == 'Ll'
assert unicodedata.category(u'A') == 'Lu'
assert unicodedata.bidirectional(u'\uFFFE') == ''
assert unicodedata.bidirectional(u' ') == 'WS'
assert unicodedata.bidirectional(u'A') == 'L'
assert unicodedata.decomposition(u'\uFFFE') == ''
assert unicodedata.decomposition(u'\u00bc') == '<fraction> 0031 2044 0034'
assert unicodedata.mirrored(u'\uFFFE') == 0
assert unicodedata.mirrored(u'a') == 0
assert unicodedata.mirrored(u'\u2201') == 1
assert unicodedata.combining(u'\uFFFE') == 0
assert unicodedata.combining(u'a') == 0
assert unicodedata.combining(u'\u20e1') == 230
print 'ok'
| atmark-techno/atmark-dist | user/python/Lib/test/test_unicodedata.py | Python | gpl-2.0 | 3,846 | 0.00468 |
#!/usr/bin/env python
# trialcoverage -- plugin to integrate Twisted trial with Ned Batchelder's coverage.py
#
# Author: Brian Warner
# Packaged by: Zooko Wilcox-O'Hearn
# Thanks to: Jonathan Lange
#
# See README.txt for licensing information.
import os, re, sys
try:
from ez_setup import use_setuptools
except ImportError:
pass
else:
use_setuptools(download_delay=0)
from setuptools import find_packages, setup
trove_classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: DFSG approved",
"License :: OSI Approved :: BSD License",
"License :: Other/Proprietary License",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Topic :: Software Development :: Libraries",
]
PKG='trialcoverage'
VERSIONFILE = os.path.join(PKG, "_version.py")
verstr = "unknown"
try:
verstrline = open(VERSIONFILE, "rt").read()
except EnvironmentError:
pass # Okay, there is no version file.
else:
VSRE = r"^verstr = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
print "unable to find version in %s" % (VERSIONFILE,)
raise RuntimeError("if %s.py exists, it must be well-formed" % (VERSIONFILE,))
setup_requires = []
data_fnames=[ 'COPYING.SPL.txt', 'COPYING.GPL', 'COPYING.TGPPL.html', 'README.txt' ]
# In case we are building for a .deb with stdeb's sdist_dsc command, we put the
# docs in "share/doc/python-$PKG".
doc_loc = "share/doc/" + PKG
data_files = [(doc_loc, data_fnames)]
setup(name=PKG,
version=verstr,
description="a plugin to integrate Twisted trial with Ned Batchelder's coverage.py",
author='Brian Warner',
author_email='zooko@zooko.com',
url='http://tahoe-lafs.org/trac/' + PKG,
license='BSD', # see README.txt for details -- there are also alternative licences
packages=find_packages() + ['twisted'],
include_package_data=True,
setup_requires=setup_requires,
classifiers=trove_classifiers,
zip_safe=False, # I prefer unzipped for easier access.
install_requires=['coverage>=3.4a1', 'pyutil>=1.6.0', 'setuptools'],
tests_require=['mock', 'setuptools_trial >= 0.5'],
data_files=data_files,
test_suite='trialcoverage.test',
)
| simplegeo/trialcoverage | setup.py | Python | gpl-2.0 | 2,609 | 0.005366 |
# ---------------------------------------------------------------------------
# OrmapLayersConfig.py
# Created by: Shad Campbell
# Date: 3/11/2011
# Updated by:
# Description: This is a configuration file to be customized by each county.
# Do not delete any of the items in this file. If they are not in use then
# specify thier value and/or definition query to "".
# ---------------------------------------------------------------------------
LOTSANNO_LAYER="LotsAnno"
LOTSANNO_QD="\"MapNumber\" = '*MapNumber*'OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
PLATSANNO_LAYER="PlatsAnno"
PLATSANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
TAXCODEANNO_LAYER="TaxCodeAnno"
TAXCODEANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
TAXNUMANNO_LAYER="TaxlotNumberAnno"
TAXNUMANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ACRESANNO_LAYER="TaxlotAcresAnno"
ACRESANNO_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO10_LAYER="Anno0010scale"
ANNO10_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO20_LAYER="Anno0020scale"
ANNO20_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO30_LAYER="Anno0030scale"
ANNO30_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO40_LAYER="Anno0040scale"
ANNO40_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO50_LAYER="Anno0050scale"
ANNO50_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO60_LAYER="Anno0060scale"
ANNO60_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO100_LAYER="Anno0100scale"
ANNO100_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO200_LAYER="Anno0200scale"
ANNO200_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO400_LAYER="Anno0400scale"
ANNO400_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO800_LAYER="Anno0800scale"
ANNO800_QD="\"MapNumber\" = '*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
ANNO2000_LAYER="Anno2000scale"
ANNO2000_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
CORNER_ABOVE_LAYER="Corner"
CORNER_ABOVE_QD="\"MapNumber\"='*MapNumber*'"
TAXCODELINES_ABOVE_LAYER="TaxCodeLines - Above"
TAXCODELINES_ABOVE_QD=""
TAXLOTLINES_ABOVE_LAYER="TaxlotLines - Above"
TAXLOTLINES_ABOVE_QD="\"LineType\" <> 32"
REFLINES_ABOVE_LAYER="ReferenceLines - Above"
REFLINES_ABOVE_QD="\"MAPNUMBER\" = '*MapNumber*'"
CARTOLINES_ABOVE_LAYER="CartographicLines - Above"
CARTOLINES_ABOVE_QD=""
WATERLINES_ABOVE_LAYER="WaterLines - Above"
WATERLINES_ABOVE_QD=""
WATER_ABOVE_LAYER="Water - Above"
WATER_ABOVE_QD=""
MAPINDEXSEEMAP_LAYER=""
MAPINDEXSEEMAP_QD=""
MAPINDEX_LAYER="SeeMaps"
MAPINDEX_QD="\"IndexMap\" = '*MapNumber*'"
CORNER_BELOW_LAYER="Corner - Below"
CORNER_BELOW_QD=""
TAXCODELINES_BELOW_LAYER="TaxCodeLines - Below"
TAXCODELINES_BELOW_QD=""
TAXLOTLINES_BELOW_LAYER="TaxlotLines - Below"
TAXLOTLINES_BELOW_QD=""
REFLINES_BELOW_LAYER="ReferenceLines - Below"
REFLINES_BELOW_QD=""
CARTOLINES_BELOW_LAYER="CartographicLines - Below"
CARTOLINES_BELOW_QD=""
WATERLINES_BELOW_LAYER="WaterLines - Below"
WATERLINES_BELOW_QD=""
WATER_BELOW_LAYER="Water - Below"
WATER_BELOW_QD=""
PAGELAYOUT_TABLE="giscarto.CREATOR_ASR.PAGELAYOUTELEMENTS"
CANCELLEDNUMBERS_TABLE="giscarto.CREATOR_ASR.CANCELLEDNUMBERS"
CUSTOMDEFINITIONQUERIES_TABLE="CustomDefinitionQueries"
EXTRA1_LAYER="Arrow0010scale"
EXTRA1_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA2_LAYER="Arrow0020scale"
EXTRA2_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA3_LAYER="Arrow0030scale"
EXTRA3_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA4_LAYER="Arrow0040scale"
EXTRA4_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA5_LAYER="Arrow0050scale"
EXTRA5_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA6_LAYER="Arrow0100scale"
EXTRA6_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA7_LAYER="Arrow0200scale"
EXTRA7_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA8_LAYER="Arrow0400scale"
EXTRA8_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA9_LAYER="Arrow2000scale"
EXTRA9_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA10_LAYER="MapSecLines - Below"
EXTRA10_QD="\"MapNumber\"='*MapNumber*'"
EXTRA11_LAYER="Railroad"
EXTRA11_QD="CL <> 'Y'"
EXTRA12_LAYER="MapArea"
EXTRA12_QD="\"MapNumber\"='*MapNumber*'"
EXTRA13_LAYER=""
EXTRA13_QD=""
EXTRA14_LAYER="Taxlots - Above"
EXTRA14_QD="\"MapNumber\"='*MapNumber*'"
EXTRA15_LAYER="Arrow0060scale"
EXTRA15_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA16_LAYER="Landmarks"
EXTRA16_QD="\"MapNumber\"='*MapNumber*' OR \"MapNumber\" is NULL OR \"MapNumber\" = ''"
EXTRA17_LAYER=""
EXTRA17_QD=""
EXTRA18_LAYER=""
EXTRA18_QD=""
EXTRA19_LAYER=""
EXTRA19_QD=""
EXTRA20_LAYER=""
EXTRA20_QD=""
| ORMAPtools/MapProduction | Config File Templates/ORMAP_LayersConfig.py | Python | gpl-3.0 | 5,573 | 0.024224 |
import asyncio
from server import Server
#def main(arguments):
def main():
loop = asyncio.get_event_loop()
server = Server()
asyncio.async(server.run_server())
try:
loop.run_forever()
except KeyboardInterrupt:
print('Received interrupt, closing')
server.close()
finally:
loop.stop()
loop.close()
if __name__ == '__main__':
#arguments = docopt(__doc__, version='evolver_server 0.1')
#main(arguments)
main()
| r4mp/evolver_server | evolver_server/app.py | Python | agpl-3.0 | 486 | 0.012346 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
def execute():
import webnotes
entries = webnotes.conn.sql("""select voucher_type, voucher_no
from `tabGL Entry` group by voucher_type, voucher_no""", as_dict=1)
for entry in entries:
try:
cancelled_voucher = webnotes.conn.sql("""select name from `tab%s` where name = %s
and docstatus=2""" % (entry['voucher_type'], "%s"), entry['voucher_no'])
if cancelled_voucher:
webnotes.conn.sql("""delete from `tabGL Entry` where voucher_type = %s and
voucher_no = %s""", (entry['voucher_type'], entry['voucher_no']))
except:
pass | saurabh6790/test-med-app | patches/october_2013/p05_delete_gl_entries_for_cancelled_vouchers.py | Python | agpl-3.0 | 683 | 0.02489 |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
class TreeError(Exception):
"""General tree error"""
pass
class NoLengthError(TreeError):
"""Missing length when expected"""
pass
class DuplicateNodeError(TreeError):
"""Duplicate nodes with identical names"""
pass
class MissingNodeError(TreeError):
"""Expecting a node"""
pass
class NoParentError(MissingNodeError):
"""Missing a parent"""
pass
| Kleptobismol/scikit-bio | skbio/tree/_exception.py | Python | bsd-3-clause | 814 | 0 |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import datetime
import json
import sys
import time
from flask import current_app
class DateTimeEncoder(json.JSONEncoder):
"""Custom JSON Encoder to handle datetime objects
from:
`http://stackoverflow.com/questions/12122007/python-json-encoder-to-support-datetime`_
also consider:
`http://hg.tryton.org/2.4/trytond/file/ade5432ac476/trytond/protocols/jsonrpc.py#l53`_
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return (datetime.datetime.min + obj).time().isoformat()
else:
return super(DateTimeEncoder, self).default(obj)
class UnicodeSafeJsonWrapper(dict):
"""JSON received via POST has keys as unicode. This makes get work with plain
`str` keys.
"""
def __getitem__(self, key):
ret = self.get(key)
if ret is None:
raise KeyError(key)
return ret
def get(self, key, default=None):
return super(UnicodeSafeJsonWrapper, self).get(unicode(key), default) # noqa
def as_json(obj, **kwargs):
return json.dumps(obj, cls=DateTimeEncoder, **kwargs)
def service_for(obj):
module = sys.modules['ggrc.services']
if type(obj) is str or type(obj) is unicode: # noqa
model_type = obj
else:
model_type = obj.__class__.__name__
return getattr(module, model_type, None)
def url_for(obj, id=None):
service = service_for(obj)
if service is None:
return None
if id is not None:
return service.url_for(id=id)
return service.url_for(obj)
def view_service_for(obj):
module = sys.modules['ggrc.views']
if type(obj) is str or type(obj) is unicode: # noqa
model_type = obj
else:
model_type = obj.__class__.__name__
return getattr(module, model_type, None)
def view_url_for(obj, id=None):
service = view_service_for(obj)
if service is None:
return None
if id is not None:
return service.url_for(id=id)
return service.url_for(obj)
def encoded_dict(in_dict):
# http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround
out_dict = {}
for k, v in in_dict.iteritems():
if isinstance(v, unicode): # noqa
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
out_dict[k] = v
return out_dict
def merge_dict(destination, source, path=None):
"""merges source into destination"""
if path is None:
path = []
for key in source:
if key in destination:
if isinstance(destination[key], dict) and isinstance(source[key], dict):
merge_dict(destination[key], source[key], path + [str(key)])
elif destination[key] == source[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
destination[key] = source[key]
return destination
def merge_dicts(*args):
result = {}
for arg in args:
result = merge_dict(result, arg)
return result
class BenchmarkContextManager(object):
def __init__(self, message):
self.message = message
def __enter__(self):
self.start = time.time()
def __exit__(self, exc_type, exc_value, exc_trace):
end = time.time()
current_app.logger.info("{:.4f} {}".format(end - self.start, self.message))
benchmark = BenchmarkContextManager
| vladan-m/ggrc-core | src/ggrc/utils.py | Python | apache-2.0 | 3,658 | 0.013395 |
import xmlrpclib
def listEngineTypes(address):
client = xmlrpclib.ServerProxy(address + '/admin/')
engine_types = client.listEngineTypes()
return engine_types
def allocateEngine(address, engine_type):
client = xmlrpclib.ServerProxy(str(address) + '/admin/')
access_id = client.allocateEngine(str(engine_type))
return access_id
def interruptInstance(address, instance_id):
client = xmlrpclib.ServerProxy(address + '/admin/')
client.interruptInstance(instance_id)
| ccordoba12/codenode | codenode/frontend/backend/rpc.py | Python | bsd-3-clause | 499 | 0.008016 |
#
# Pathological subscriber
# Subscribes to one random topic and prints received messages
#
import sys
import time
from random import randint
import zmq
def main(url=None):
ctx = zmq.Context.instance()
subscriber = ctx.socket(zmq.SUB)
if url is None:
url = "tcp://localhost:5556"
subscriber.connect(url)
subscription = b"%03d" % randint(0,999)
subscriber.setsockopt(zmq.SUBSCRIBE, subscription)
while True:
topic, data = subscriber.recv_multipart()
assert topic == subscription
print data
if __name__ == '__main__':
main(sys.argv[1] if len(sys.argv) > 1 else None)
| soscpd/bee | root/tests/zguide/examples/Python/pathosub.py | Python | mit | 636 | 0.004717 |
# -*- coding: utf-8 -*-
# 正处于设计开发阶段
from PyQt5 import QtCore
class datcomEvent(QtCore.QEvent):
"""
自定义的Datcom事件系统
"""
dtTypeID = QtCore.QEvent.registerEventType()
def __init__(self, type = dtTypeID):
"""
构造函数
"""
super(datcomEvent, self).__init__(type)
self.eventLabel = '' #规则的名称[ 'NMACHLinkTable' ,'RuleNumToCount','RuleIndexToCombo',
self.controlVariables = {} #存储引起变换的变量的名称和值,某些规则可能是多触发的,因此使用dict类型 {'FLTCON/NMACH':'1'}
class datcomEventWarpper(QtCore.QObject):
"""
datcomModel中使用的,作为注册中心使用
"""
def __init__(self):
"""
注册中心的模型
receiver | 注册者的实例 |event的接收者
eventLabel | 事件标签 |需要监控的事件类别
controlVariable | 事件的参数和参数值 |控制变量和值
"""
super(datcomEventWarpper, self).__init__()
#注册中心
#单个模板 {‘receiver':None,'eventLable':'','controlVariables’:[]}
self.registerCenter = []
def registerObject(self, receiver, eventLabel, controlVariables):
"""
向注册中心注册一个事件接收
@param receiver reference to the widget to receive the event
@type QObject
@param eventLabel 事件标签
@type str
@param controlVariables 触发事件的变量
@type str
注意事项:应当在对象销毁的地方显式调用反注册函数
"""
if controlVariables is None or type(controlVariables) != list or\
eventLabel is None or eventLabel =='' or \
receiver is None:
self.logger.warning("调用注册函数的参数无效!")
return
#开始注册过程
def isRegistered(self, iDict):
"""
检查对象iDict是否在仓库中已经注册了,包含当前注册返回True,没有返回False
"""
def simluateSendEvent(self, eventLabel, eventStr):
"""
"""
if eventLabel in self.doc:
for iR in self.doc[eventLabel]['Receiver']:
tEvent = datcomEvent()
tEvent.Url = eventLabel
tEvent.Value = {eventLabel:eventStr}
tApp = QtCore.QCoreApplication.instance()
tApp.notify(iR, tEvent)
| darkspring2015/PyDatcomLab | PyDatcomLab/Core/datcomEventManager.py | Python | mit | 2,614 | 0.013364 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Less Frequently Used (LFU) Windows Registry plugin."""
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import lfu
from tests.parsers.winreg_plugins import test_lib
class TestBootExecutePlugin(test_lib.RegistryPluginTestCase):
"""Tests for the LFU BootExecute Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path: the Windows Registry key path.
time_string: string containing the key last written date and time.
Returns:
A Windows Registry key (instance of dfwinreg.WinRegistryKey).
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
u'Session Manager', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
value_data = u'autocheck autochk *\x00'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'BootExecute', data=value_data,
data_type=dfwinreg_definitions.REG_MULTI_SZ, offset=123)
registry_key.AddValue(registry_value)
value_data = u'2592000'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'CriticalSectionTimeout', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=153)
registry_key.AddValue(registry_value)
value_data = u'\x00'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'ExcludeFromKnownDlls', data=value_data,
data_type=dfwinreg_definitions.REG_MULTI_SZ, offset=163)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'GlobalFlag', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=173)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'HeapDeCommitFreeBlockThreshold', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=183)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'HeapDeCommitTotalFreeThreshold', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=203)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'HeapSegmentCommit', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=213)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'HeapSegmentReserve', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=223)
registry_key.AddValue(registry_value)
value_data = u'2'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'NumberOfInitialSessions', data=value_data,
data_type=dfwinreg_definitions.REG_SZ, offset=243)
registry_key.AddValue(registry_value)
return registry_key
def testProcess(self):
"""Tests the Process function."""
key_path = (
u'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\Session Manager')
time_string = u'2012-08-31 20:45:29'
registry_key = self._CreateTestKey(key_path, time_string)
plugin_object = lfu.BootExecutePlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin_object)
self.assertEqual(len(storage_writer.events), 2)
event_object = storage_writer.events[0]
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(time_string)
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'[{0:s}] BootExecute: autocheck autochk *').format(key_path)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
event_object = storage_writer.events[1]
expected_message = (
u'[{0:s}] '
u'CriticalSectionTimeout: 2592000 '
u'ExcludeFromKnownDlls: [] '
u'GlobalFlag: 0 '
u'HeapDeCommitFreeBlockThreshold: 0 '
u'HeapDeCommitTotalFreeThreshold: 0 '
u'HeapSegmentCommit: 0 '
u'HeapSegmentReserve: 0 '
u'NumberOfInitialSessions: 2').format(key_path)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
class TestBootVerificationRegistry(test_lib.RegistryPluginTestCase):
"""Tests for the LFU BootVerification Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path: the Windows Registry key path.
time_string: string containing the key last written date and time.
Returns:
A Windows Registry key (instance of dfwinreg.WinRegistryKey).
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
u'BootVerificationProgram', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
value_data = u'C:\\WINDOWS\\system32\\googleupdater.exe'.encode(
u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'ImagePath', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=123)
registry_key.AddValue(registry_value)
return registry_key
def testProcess(self):
"""Tests the Process function."""
key_path = u'\\ControlSet001\\Control\\BootVerificationProgram'
time_string = u'2012-08-31 20:45:29'
registry_key = self._CreateTestKey(key_path, time_string)
plugin_object = lfu.BootVerificationPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin_object)
self.assertEqual(len(storage_writer.events), 1)
event_object = storage_writer.events[0]
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, plugin_object.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(time_string)
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'[{0:s}] '
u'ImagePath: C:\\WINDOWS\\system32\\googleupdater.exe').format(
key_path)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| dc3-plaso/plaso | tests/parsers/winreg_plugins/lfu.py | Python | apache-2.0 | 7,326 | 0.000956 |
version_info = (0, 6, 16)
__version__ = version = '.'.join(map(str, version_info))
__project__ = PROJECT = 'django-summernote'
__author__ = AUTHOR = "Park Hyunwoo <ez.amiryo@gmail.com>"
default_app_config = 'django_summernote.apps.DjangoSummernoteConfig'
| WQuanfeng/django-summernote | django_summernote/__init__.py | Python | mit | 258 | 0 |
#encoding: utf-8
from random import choice
from .helper import gen_bids
class CustomCookieMiddleware(object):
def __init__(self):
self.bids = gen_bids()
def process_request(self, request, spider):
request.headers["Cookie"] = 'bid="%s"' % choice(self.bids)
class CustomUserAgentMiddleware(object):
def process_request(self, request, spider):
ug = "Baiduspider"
request.headers["User-Agent"] = ug
class CustomHeadersMiddleware(object):
def process_request(self, request, spider):
request.headers["Accept-Language"] = "zh-CN,zh" | neozhangthe1/scraper | douban/photo/photo/misc/middlewares.py | Python | gpl-2.0 | 587 | 0.005111 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
LOG = logging.getLogger(__name__)
SNAT_NS_PREFIX = 'snat-'
SNAT_INT_DEV_PREFIX = constants.SNAT_INT_DEV_PREFIX
class SnatNamespace(namespaces.Namespace):
def __init__(self, router_id, agent_conf, driver, use_ipv6):
self.router_id = router_id
name = self.get_snat_ns_name(router_id)
super(SnatNamespace, self).__init__(
name, agent_conf, driver, use_ipv6)
@classmethod
def get_snat_ns_name(cls, router_id):
return namespaces.build_ns_name(SNAT_NS_PREFIX, router_id)
@namespaces.check_ns_existence
def delete(self):
ns_ip = ip_lib.IPWrapper(namespace=self.name)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(SNAT_INT_DEV_PREFIX):
LOG.debug('Unplugging DVR device %s', d.name)
self.driver.unplug(d.name, namespace=self.name,
prefix=SNAT_INT_DEV_PREFIX)
# TODO(mrsmith): delete ext-gw-port
LOG.debug('DVR: destroy snat ns: %s', self.name)
super(SnatNamespace, self).delete()
| sebrandon1/neutron | neutron/agent/l3/dvr_snat_ns.py | Python | apache-2.0 | 1,810 | 0 |
# _ GraphGrammar.py __________________________________________________
# This class implements a graph grammar, that is basically an ordered
# collecttion of GGrule's
# ____________________________________________________________________
from GGrule import *
class GraphGrammar:
def __init__(self, GGrules = None):
"Constructor, it receives GGrules, that is a list of GGrule elements"
self.GGrules = [] # We'll insert rules by order of execution
self.rewritingSystem = None # No rewriting system assigned yet
while len(self.GGrules) < len(GGrules): # iterate until each rule is inserted
min = 30000 # set mininum number to a very high number
minRule = None # pointer to rule to be inserted
for rule in GGrules: # search for the minimum execution order that is not inserted
if rule.executionOrder < min and not rule in self.GGrules:
min = rule.executionOrder
minRule = rule
self.GGrules.append(minRule)
def setGraphRewritingSystem(self, rs):
"Sets the attribute rewritingSystem to rs and also calls the same method for each rule"
self.rewritingSystem = rs
for rule in self.GGrules:
rule.setGraphGrammar(self)
rule.setGraphRewritingSystem(rs)
def initialAction(self, graph): # , atom3i = None):
"action to be performed before the graph grammar starts its execution (must be overriden)"
pass
def finalAction(self, graph): #, atom3i = None):
"action to be performed after the graph grammar starts its execution (must be overriden)"
pass
| Balannen/LSMASOMM | atom3/Kernel/GraphGrammar/GraphGrammar.py | Python | gpl-3.0 | 1,680 | 0.021429 |
RES = 1
TEMP = 0
OUT_OF_RANGE = -999
# Ni1000 relationship between resistances and temperatures [(temp0,resistance0), (temp1,resistance1), (tempN,resistanceN)]
ni1000_5000ppm_values = [(-80, 672.0), (-75, 692.0), (-70, 712.0), (-60, 751.8), (-50, 790.9), (-40, 830.8),
(-30, 871.7), (-20, 913.5), (-10, 956.2), (0, 1000.0),
(10, 1044.8), (20, 1090.7), (30, 1137.6), (40, 1185.7), (50, 1235.0), (60, 1285.4),
(70, 1337.1), (80, 1390.1), (90, 1444.4),
(100, 1500.0), (110, 1557.0), (120, 1615.4), (130, 1675.2), (140, 1736.5), (150, 1799.3),
(160, 1863.6), (170, 1929.5),
(180, 1997.0), (190, 2066.1), (200, 2137.0), (210, 2209.5), (220, 2283.7), (230, 2359.8),
(240, 2437.6), (250, 2517.3)]
ni1000_6180ppm_values = [(-70, 647.8), (-60, 695.2), (-50, 742.6), (-40, 791.3), (-30, 841.5), (-20, 893), (-10, 945.8),
(0, 1000.0),
(10, 1055.5), (20, 1112.4), (30, 1170.6), (40, 1230.1), (50, 1291.1), (60, 1353.4),
(70, 1417.2), (80, 1482.5), (90, 1549.3),
(100, 1617.8), (110, 1687.9), (120, 1759.7), (130, 1833.3), (140, 1908.9), (150, 1986.3),
(160, 2065.9), (170, 2147.6),
(180, 2231.5), (190, 2317.8), (200, 2406.6), (210, 2498), (220, 2592), (230, 2688.9),
(240, 2788.7), (250, 2891.6)]
pt1000_values = [(-70, 723.35), (-60, 763.28), (-50, 803.06), (-40, 842.71), (-30, 882.22), (-20, 921.6), (-10, 960.86),
(0, 1000),
(10, 1039), (20, 1077.9), (30, 1116.7), (40, 1155.4), (50, 1194), (60, 1232.4), (70, 1270.8),
(80, 1309), (90, 1347.1),
(100, 1385.1), (110, 1422.9), (120, 1460.7), (130, 1498.3), (140, 1535.8), (150, 1573.9),
(160, 1610.5), (170, 1447.7),
(180, 1684.8), (190, 1721.7), (200, 1758.6), (210, 1795.3), (220, 1831.9), (230, 1868.4),
(240, 1904.7), (250, 1941)]
# Public functions
def ni1000_5000ppm_res_to_temp(ni1000_resistance):
"""
This function converts an Ni1000 5000ppm sensor resistance to temperature
Parameters:
===========
ni1000_resistance: Ni1000 5000ppm resistance in Ohms
Return:
===========
Ni1000 5000ppm resistance converted to temperature
"""
return res_to_temp(ni1000_5000ppm_values, ni1000_resistance)
def pt1000_res_to_temp(pt1000_resistance):
"""
This function converts an PT1000 sensor resistance to temperature
Parameters:
===========
pt1000_resistance: PT1000 resistance in Ohms
Return:
===========
PT1000 resistance converted to temperature
"""
return res_to_temp(pt1000_values, pt1000_resistance)
# Public functions
def ni1000_6180ppm_res_to_temp(ni1000_resistance):
"""
This function converts an Ni1000 6180ppm sensor resistance to temperature
Parameters:
===========
ni1000_resistance: Ni1000 6180ppm resistance in Ohms
Return:
===========
Ni1000 6180ppm resistance converted to temperature
"""
return res_to_temp(ni1000_6180ppm_values, ni1000_resistance)
# Private functions
def res_to_temp(values_list, resistance):
"""
This function converts a sensor resistance to temperature
Parameters:
===========
values_list: relationship between resistances and temperatures [(temp0,resistance0), (temp1,resistance1), (tempN,resistanceN)]
resistance: a sensor resistance in Ohms
Return:
===========
Sensor resistance converted to temperature
"""
first_resistance = values_list[0][RES]
last_resistance = values_list[-1][RES]
start_index = 0
end_index = -1
calculated_temp = OUT_OF_RANGE
if (resistance >= first_resistance) and (resistance <= last_resistance):
while values_list[start_index][RES] < resistance:
start_index += 1
while values_list[end_index][RES] > resistance:
end_index -= 1
delta_res = abs(values_list[start_index][RES] - values_list[end_index][RES])
delta_temp = abs(values_list[start_index][TEMP] - values_list[end_index][TEMP])
if delta_temp == 0:
return values_list[start_index][TEMP]
temp_coefficient = delta_res / delta_temp
calculated_temp = ((resistance - values_list[end_index][RES]) / temp_coefficient) + values_list[end_index][TEMP]
return calculated_temp | PW-Sat2/PWSat2OBC | integration_tests/emulator/beacon_parser/resistance_sensors.py | Python | agpl-3.0 | 4,701 | 0.003829 |
from blogengine.models import Post
from django.contrib import admin
admin.site.register(Post)
| rickhurst/Django-non-rel-blog | blogengine/admin.py | Python | bsd-3-clause | 95 | 0 |
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the TakeiCoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "TakeiCoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created" | takeicoin/takeicoin | share/qt/clean_mac_info_plist.py | Python | mit | 898 | 0.017817 |
from template.test import TestCase, main
class StringTest(TestCase):
def testString(self):
self.Expect(DATA)
DATA = r"""
-- test --
[% USE String -%]
string: [[% String.text %]]
-- expect --
string: []
-- test --
[% USE String 'hello world' -%]
string: [[% String.text %]]
-- expect --
string: [hello world]
-- test --
[% USE String text='hello world' -%]
string: [[% String.text %]]
-- expect --
string: [hello world]
-- test --
[% USE String -%]
string: [[% String %]]
-- expect --
string: []
-- test --
[% USE String 'hello world' -%]
string: [[% String %]]
-- expect --
string: [hello world]
-- test --
[% USE String text='hello world' -%]
string: [[% String %]]
-- expect --
string: [hello world]
-- test --
[% USE String text='hello' -%]
string: [[% String.append(' world') %]]
string: [[% String %]]
-- expect --
string: [hello world]
string: [hello world]
-- test --
[% USE String text='hello' -%]
[% copy = String.copy -%]
string: [[% String %]]
string: [[% copy %]]
-- expect --
string: [hello]
string: [hello]
-- test --
[% USE String -%]
[% hi = String.new('hello') -%]
[% lo = String.new('world') -%]
[% hw = String.new(text="$hi $lo") -%]
hi: [[% hi %]]
lo: [[% lo %]]
hw: [[% hw %]]
-- expect --
hi: [hello]
lo: [world]
hw: [hello world]
-- test --
[% USE hi = String 'hello' -%]
[% lo = hi.new('world') -%]
hi: [[% hi %]]
lo: [[% lo %]]
-- expect --
hi: [hello]
lo: [world]
-- test --
[% USE hi = String 'hello' -%]
[% lo = hi.copy -%]
hi: [[% hi %]]
lo: [[% lo %]]
-- expect --
hi: [hello]
lo: [hello]
-- test --
[% USE hi = String 'hello' -%]
[% lo = hi.copy.append(' world') -%]
hi: [[% hi %]]
lo: [[% lo %]]
-- expect --
hi: [hello]
lo: [hello world]
-- test --
[% USE hi = String 'hello' -%]
[% lo = hi.new('hey').append(' world') -%]
hi: [[% hi %]]
lo: [[% lo %]]
-- expect --
hi: [hello]
lo: [hey world]
-- test --
[% USE hi=String "hello world\n" -%]
hi: [[% hi %]]
[% lo = hi.chomp -%]
hi: [[% hi %]]
lo: [[% lo %]]
-- expect --
hi: [hello world
]
hi: [hello world]
lo: [hello world]
-- test --
[% USE foo=String "foop" -%]
[[% foo.chop %]]
[[% foo.chop %]]
-- expect --
[foo]
[fo]
-- test --
[% USE hi=String "hello" -%]
left: [[% hi.copy.left(11) %]]
right: [[% hi.copy.right(11) %]]
center: [[% hi.copy.center(11) %]]
centre: [[% hi.copy.centre(12) %]]
-- expect --
left: [hello ]
right: [ hello]
center: [ hello ]
centre: [ hello ]
-- test --
[% USE str=String('hello world') -%]
hi: [[% str.upper %]]
hi: [[% str %]]
lo: [[% str.lower %]]
cap: [[% str.capital %]]
-- expect --
hi: [HELLO WORLD]
hi: [HELLO WORLD]
lo: [hello world]
cap: [Hello world]
-- test --
[% USE str=String('hello world') -%]
len: [[% str.length %]]
-- expect --
len: [11]
-- test --
[% USE str=String(" \n\n\t\r hello\nworld\n\r \n \r") -%]
[[% str.trim %]]
-- expect --
[hello
world]
-- test --
[% USE str=String(" \n\n\t\r hello \n \n\r world\n\r \n \r") -%]
[[% str.collapse %]]
-- expect --
[hello world]
-- test --
[% USE str=String("hello") -%]
[[% str.append(' world') %]]
[[% str.prepend('well, ') %]]
-- expect --
[hello world]
[well, hello world]
-- test --
[% USE str=String("hello") -%]
[[% str.push(' world') %]]
[[% str.unshift('well, ') %]]
-- expect --
[hello world]
[well, hello world]
-- test --
[% USE str=String('foo bar') -%]
[[% str.copy.pop(' bar') %]]
[[% str.copy.shift('foo ') %]]
-- expect --
[foo]
[bar]
-- test --
[% USE str=String('Hello World') -%]
[[% str.copy.truncate(5) %]]
[[% str.copy.truncate(8, '...') %]]
[[% str.copy.truncate(20, '...') %]]
-- expect --
[Hello]
[Hello...]
[Hello World]
-- test --
[% USE String('foo') -%]
[[% String.append(' ').repeat(4) %]]
-- expect --
[foo foo foo foo ]
-- test --
[% USE String('foo') -%]
[% String.format("[%s]") %]
-- expect --
[foo]
-- test --
[% USE String('foo bar foo baz') -%]
[[% String.replace('foo', 'oof') %]]
-- expect --
[oof bar oof baz]
-- test --
[% USE String('foo bar foo baz') -%]
[[% String.copy.remove('foo\s*') %]]
[[% String.copy.remove('ba[rz]\s*') %]]
-- expect --
[bar baz]
[foo foo ]
-- test --
[% USE String('foo bar foo baz') -%]
[[% String.split.join(', ') %]]
-- expect --
[foo, bar, foo, baz]
-- test --
[% USE String('foo bar foo baz') -%]
[[% String.split(' bar ').join(', ') %]]
-- expect --
[foo, foo baz]
-- test --
[% USE String('foo bar foo baz') -%]
[[% String.split(' bar ').join(', ') %]]
-- expect --
[foo, foo baz]
-- test --
[% USE String('foo bar foo baz') -%]
[[% String.split('\s+').join(', ') %]]
-- expect --
[foo, bar, foo, baz]
-- test --
[% USE String('foo bar foo baz') -%]
[[% String.split('\s+', 2).join(', ') %]]
-- expect --
[foo, bar foo baz]
-- test --
[% USE String('foo bar foo baz') -%]
[% String.search('foo') ? 'ok' : 'not ok' %]
[% String.search('fooz') ? 'not ok' : 'ok' %]
[% String.search('^foo') ? 'ok' : 'not ok' %]
[% String.search('^bar') ? 'not ok' : 'ok' %]
-- expect --
ok
ok
ok
ok
-- test --
[% USE String 'foo < bar' filter='html' -%]
[% String %]
-- expect --
foo < bar
-- test --
[% USE String 'foo bar' filter='uri' -%]
[% String %]
-- expect --
foo%20bar
-- test --
[% USE String 'foo bar' filters='uri' -%]
[% String %]
-- expect --
foo%20bar
-- test --
[% USE String ' foo bar ' filters=['trim' 'uri'] -%]
[[% String %]]
-- expect --
[foo%20bar]
-- test --
[% USE String ' foo bar ' filter='trim, uri' -%]
[[% String %]]
-- expect --
[foo%20bar]
-- test --
[% USE String ' foo bar ' filters='trim, uri' -%]
[[% String %]]
-- expect --
[foo%20bar]
-- test --
[% USE String 'foo bar' filters={ replace=['bar', 'baz'],
trim='', uri='' } -%]
[[% String %]]
-- expect --
[foo%20baz]
-- test --
[% USE String 'foo bar' filters=[ 'replace', ['bar', 'baz'],
'trim', 'uri' ] -%]
[[% String %]]
-- expect --
[foo%20baz]
-- test --
[% USE String 'foo bar' -%]
[% String %]
[% String.filter('uri') %]
[% String.filter('replace', 'bar', 'baz') %]
[% String.output_filter('uri') -%]
[% String %]
[% String.output_filter({ repeat => [3] }) -%]
[% String %]
-- expect --
foo bar
foo%20bar
foo baz
foo%20bar
foo%20barfoo%20barfoo%20bar
-- test --
[% USE String;
a = 'HeLLo';
b = 'hEllO';
a == b ? "not ok 0\n" : "ok 0\n";
String.new(a) == String.new(b) ? "not ok 1\n" : "ok 1\n";
String.new(a).lower == String.new(b).lower ? "ok 2\n" : "not ok 2\n";
String.new(a).lower.equals(String.new(b).lower) ? "ok 3\n" : "not ok 3\n";
a.search("(?i)^$b\$") ? "ok 4\n" : "not ok 4\n";
-%]
-- expect --
ok 0
ok 1
ok 2
ok 3
ok 4
-- test --
[% USE String('Hello World') -%]
a: [% String.substr(6) %]!
b: [% String.substr(0, 5) %]!
c: [% String.substr(0, 5, 'Goodbye') %]!
d: [% String %]!
-- expect --
a: World!
b: Hello!
c: Hello!
d: Goodbye World!
-- test --
[% USE str = String('foo bar baz wiz waz woz') -%]
a: [% str.substr(4, 3) %]
b: [% str.substr(12) %]
c: [% str.substr(0, 11, 'FOO') %]
d: [% str %]
-- expect --
a: bar
b: wiz waz woz
c: foo bar baz
d: FOO wiz waz woz
"""
| gsnedders/Template-Python | t/string_test.py | Python | artistic-2.0 | 6,966 | 0.001005 |
#!/usr/bin/python
import csv
import json
import datetime
import time
from utils import dateToISOString
#############################
#############################
# This file normalizes incoming
# data from the morph.io API
# to conform with the Mongo
# data model.
#############################
#############################
# Normalie data function
def normalize(data, update_date):
# Load in mission object to add mission location data elements
missions_in = open('../python/json/missions.json','rb')
missions = json.load(missions_in)
# Load in country object to add country location data elements
countries_in = open('../python/json/countries.json','rb')
countries = json.load(countries_in)
# Output data array of objects to load into mongo
data_out = []
# Iterators to keep track of what has been entered
dates = {}
country_date = {}
country_date_mission = {}
# Dictionary to convert string type input to data base type conventions
type_dict = {'Individual Police':'ip', 'Experts on Mission':'eom', 'Contingent Troop':'troops', 'Formed Police Units':'fpu'}
# loop through incoming dat
for entry in data:
# Check to see if all mission all country object has been created for that date
if str(entry['date']) not in dates:
# create all mission all country object dont include numeric fields
data_out.append({
'cont_date':dateToISOString(datetime.datetime.strptime(str(entry['date']), '%Y%m%d').date()),
'tcc_country_id': 'all',
'mission': 'all',
'total': 0,
'total_m': 0,
'total_f': 0
})
# Add key (date) value (data_out index number) pair to dates object
dates[str(entry['date'])] = len(data_out)-1
# Check to see if all mission object has been created for that date country
if (entry['tcc'] + '-' + str(entry['date'])) not in country_date:
# Create all mission object for country date combo dont include numeric fields
data_out.append({
'cont_date':dateToISOString(datetime.datetime.strptime(str(entry['date']), '%Y%m%d').date()),
'tcc_country_id': entry['tccIso3Alpha'],
'tcc_country_string': entry['tcc'],
'tcc_au': countries[entry['tccIso3Alpha']]['au'],
'tcc_eu': countries[entry['tccIso3Alpha']]['eu'],
'tcc_ecowas': countries[entry['tccIso3Alpha']]['ecowas'],
'tcc_cis': countries[entry['tccIso3Alpha']]['cis'],
'tcc_gcc': countries[entry['tccIso3Alpha']]['gcc'],
'tcc_g20': countries[entry['tccIso3Alpha']]['g20'],
'tcc_eccas': countries[entry['tccIso3Alpha']]['eccas'],
'tcc_shanghai': countries[entry['tccIso3Alpha']]['shanghai'],
'tcc_nam': countries[entry['tccIso3Alpha']]['nam'],
'tcc_oecd': countries[entry['tccIso3Alpha']]['oecd'],
'tcc_uma': countries[entry['tccIso3Alpha']]['uma'],
'tcc_nato': countries[entry['tccIso3Alpha']]['nato'],
'tcc_igad': countries[entry['tccIso3Alpha']]['igad'],
'tcc_sadc': countries[entry['tccIso3Alpha']]['sadc'],
'tcc_eac': countries[entry['tccIso3Alpha']]['eac'],
'tcc_oic': countries[entry['tccIso3Alpha']]['oic'],
'tcc_g8': countries[entry['tccIso3Alpha']]['g8'],
'tcc_comesa': countries[entry['tccIso3Alpha']]['comesa'],
'tcc_p5g4a3': countries[entry['tccIso3Alpha']]['p5g4a3'],
'tcc_oas': countries[entry['tccIso3Alpha']]['oas'],
'tcc_censad': countries[entry['tccIso3Alpha']]['cen_sad'],
'tcc_asean': countries[entry['tccIso3Alpha']]['asean'],
'tcc_g77': countries[entry['tccIso3Alpha']]['g77'],
'tcc_arabLeague': countries[entry['tccIso3Alpha']]['arab_league'],
'tcc_capital': countries[entry['tccIso3Alpha']]['capital'],
'tcc_capital_loc': countries[entry['tccIso3Alpha']]['capital_loc'],
'tcc_continent': countries[entry['tccIso3Alpha']]['continent'],
'tcc_un_region': countries[entry['tccIso3Alpha']]['un_region'],
'tcc_un_bloc': countries[entry['tccIso3Alpha']]['un_bloc'],
'mission': 'all',
'total': 0,
'total_m': 0,
'total_f': 0
})
# Add key (country-date) value (data_out index number) pair to dates object
country_date[(entry['tcc'] + '-' + str(entry['date']))] = len(data_out)-1
if (entry['tcc'] + '-' + str(entry['date']) + '-' + entry['mission']) not in country_date_mission:
# create new country-mission-date object
data_out.append({
'cont_date':dateToISOString(datetime.datetime.strptime(str(entry['date']), '%Y%m%d').date()),
'tcc_country_id': entry['tccIso3Alpha'],
'tcc_country_string': entry['tcc'],
'tcc_au': countries[entry['tccIso3Alpha']]['au'],
'tcc_eu': countries[entry['tccIso3Alpha']]['eu'],
'tcc_ecowas': countries[entry['tccIso3Alpha']]['ecowas'],
'tcc_cis': countries[entry['tccIso3Alpha']]['cis'],
'tcc_gcc': countries[entry['tccIso3Alpha']]['gcc'],
'tcc_g20': countries[entry['tccIso3Alpha']]['g20'],
'tcc_eccas': countries[entry['tccIso3Alpha']]['eccas'],
'tcc_shanghai': countries[entry['tccIso3Alpha']]['shanghai'],
'tcc_nam': countries[entry['tccIso3Alpha']]['nam'],
'tcc_oecd': countries[entry['tccIso3Alpha']]['oecd'],
'tcc_uma': countries[entry['tccIso3Alpha']]['uma'],
'tcc_nato': countries[entry['tccIso3Alpha']]['nato'],
'tcc_igad': countries[entry['tccIso3Alpha']]['igad'],
'tcc_sadc': countries[entry['tccIso3Alpha']]['sadc'],
'tcc_eac': countries[entry['tccIso3Alpha']]['eac'],
'tcc_oic': countries[entry['tccIso3Alpha']]['oic'],
'tcc_g8': countries[entry['tccIso3Alpha']]['g8'],
'tcc_comesa': countries[entry['tccIso3Alpha']]['comesa'],
'tcc_p5g4a3': countries[entry['tccIso3Alpha']]['p5g4a3'],
'tcc_oas': countries[entry['tccIso3Alpha']]['oas'],
'tcc_censad': countries[entry['tccIso3Alpha']]['cen_sad'],
'tcc_asean': countries[entry['tccIso3Alpha']]['asean'],
'tcc_g77': countries[entry['tccIso3Alpha']]['g77'],
'tcc_arabLeague': countries[entry['tccIso3Alpha']]['arab_league'],
'tcc_capital': countries[entry['tccIso3Alpha']]['capital'],
'tcc_capital_loc': countries[entry['tccIso3Alpha']]['capital_loc'],
'tcc_continent': countries[entry['tccIso3Alpha']]['continent'],
'tcc_un_region': countries[entry['tccIso3Alpha']]['un_region'],
'tcc_un_bloc': countries[entry['tccIso3Alpha']]['un_bloc'],
'mission': entry['mission'],
'mission_country_id': missions[entry['mission']]['country_id'],
'mission_country': missions[entry['mission']]['country'],
'mission_hq': missions[entry['mission']]['hq'],
'mission_hq_loc': missions[entry['mission']]['mission_loc'],
'mission_continent': countries[missions[entry['mission']]['country_id']]['continent'],
'mission_un_region': countries[missions[entry['mission']]['country_id']]['un_region'],
'mission_un_bloc': countries[missions[entry['mission']]['country_id']]['un_bloc'],
'mission_au': countries[missions[entry['mission']]['country_id']]['au'],
'mission_eu': countries[missions[entry['mission']]['country_id']]['eu'],
'mission_ecowas': countries[missions[entry['mission']]['country_id']]['ecowas'],
'mission_cis': countries[missions[entry['mission']]['country_id']]['cis'],
'mission_gcc': countries[missions[entry['mission']]['country_id']]['gcc'],
'mission_g20': countries[missions[entry['mission']]['country_id']]['g20'],
'mission_eccas': countries[missions[entry['mission']]['country_id']]['eccas'],
'mission_shanghai': countries[missions[entry['mission']]['country_id']]['shanghai'],
'mission_nam': countries[missions[entry['mission']]['country_id']]['nam'],
'mission_oecd': countries[missions[entry['mission']]['country_id']]['oecd'],
'mission_uma': countries[missions[entry['mission']]['country_id']]['uma'],
'mission_nato': countries[missions[entry['mission']]['country_id']]['nato'],
'mission_igad': countries[missions[entry['mission']]['country_id']]['igad'],
'mission_sadc': countries[missions[entry['mission']]['country_id']]['sadc'],
'mission_eac': countries[missions[entry['mission']]['country_id']]['eac'],
'mission_oic': countries[missions[entry['mission']]['country_id']]['oic'],
'mission_g8': countries[missions[entry['mission']]['country_id']]['g8'],
'mission_comesa': countries[missions[entry['mission']]['country_id']]['comesa'],
'mission_p5g4a3': countries[missions[entry['mission']]['country_id']]['p5g4a3'],
'mission_oas': countries[missions[entry['mission']]['country_id']]['oas'],
'mission_censad': countries[missions[entry['mission']]['country_id']]['cen_sad'],
'mission_asean': countries[missions[entry['mission']]['country_id']]['asean'],
'mission_g77': countries[missions[entry['mission']]['country_id']]['g77'],
'mission_arabLeague': countries[missions[entry['mission']]['country_id']]['arab_league'],
'total': 0,
'total_m': 0,
'total_f': 0
})
# Add key (country-date-mission) value (data_out index number) pair to dates object
country_date_mission[(entry['tcc'] + '-' + str(entry['date']) + '-' + entry['mission'])] = len(data_out)-1
# Get insertion indexes for current entry
country_date_mission_index = country_date_mission[(entry['tcc'] + '-' + str(entry['date']) + '-' + entry['mission'])]
country_date_index = country_date[(entry['tcc'] + '-' + str(entry['date']))]
dates_index = dates[str(entry['date'])]
# Convert type to correct convention
type_abbr = type_dict[entry['type']]
type_abbr_m = type_dict[entry['type']] + '_m'
type_abbr_f = type_dict[entry['type']] + '_f'
# Insert country_date_mission data
data_out[country_date_mission_index][type_abbr] = entry['T']
data_out[country_date_mission_index][type_abbr_m] = entry['M']
data_out[country_date_mission_index][type_abbr_f] = entry['F']
data_out[country_date_mission_index]['total'] += entry['T']
data_out[country_date_mission_index]['total_m'] += entry['M']
data_out[country_date_mission_index]['total_f'] += entry['F']
# Check to see if there is an entry in country_date entry and add accordingly
if type_abbr in data_out[country_date_index]:
data_out[country_date_index][type_abbr] += entry['T']
data_out[country_date_index][type_abbr_m] += entry['M']
data_out[country_date_index][type_abbr_f] += entry['F']
else:
data_out[country_date_index][type_abbr] = entry['T']
data_out[country_date_index][type_abbr_m] = entry['M']
data_out[country_date_index][type_abbr_f] = entry['F']
# Check to see if there is an entry in dates entry and add accordingly
if type_abbr in data_out[dates_index]:
data_out[dates_index][type_abbr] += entry['T']
data_out[dates_index][type_abbr_m] += entry['M']
data_out[dates_index][type_abbr_f] += entry['F']
else:
data_out[dates_index][type_abbr] = entry['T']
data_out[dates_index][type_abbr_m] = entry['M']
data_out[dates_index][type_abbr_f] = entry['F']
# Add to totals for tcc and total aggregates
data_out[country_date_index]['total'] += entry['T']
data_out[country_date_index]['total_m'] += entry['M']
data_out[country_date_index]['total_f'] += entry['F']
data_out[dates_index]['total'] += entry['T']
data_out[dates_index]['total_m'] += entry['M']
data_out[dates_index]['total_f'] += entry['F']
# Observer corner case
if type_abbr == 'eom':
data_out[country_date_mission_index]['observers'] = entry['T']
if 'observers' in data_out[country_date_index]:
data_out[country_date_index]['observers'] += entry['T']
else:
data_out[country_date_index]['observers'] = entry['T']
if 'observers' in data_out[dates_index]:
data_out[dates_index]['observers'] += entry['T']
else:
data_out[dates_index]['observers'] = entry['T']
else:
pass
# civpol corner case
if type_abbr == 'ip' or type_abbr == 'fpu':
if 'civpol' in data_out[country_date_mission_index]:
data_out[country_date_mission_index]['civpol'] += entry['T']
else:
data_out[country_date_mission_index]['civpol'] = entry['T']
if 'civpol' in data_out[country_date_index]:
data_out[country_date_index]['civpol'] += entry['T']
else:
data_out[country_date_index]['civpol'] = entry['T']
if 'civpol' in data_out[dates_index]:
data_out[dates_index]['civpol'] += entry['T']
else:
data_out[dates_index]['civpol'] = entry['T']
print "Converted " + str(len(data)) + " records into " + str(len(data_out)) + " documents."
# test for double entries in normalization process
test_array = []
double_array = []
for point in data_out:
test_param = point['cont_date'] + ' ' + point['tcc_country_id'] + ' ' + point['mission']
if test_param in test_array:
double_array.append(test_param)
else:
test_array.append(test_param)
if double_array == []:
print 'No double entries!'
else:
for param in double_array:
print param
# write out archive of update into archive folder
print_out = open('../ppp_files/update_archive/json/' + update_date + '.json', 'w')
print_out.write(json.dumps(data_out, indent=4, separators=(',', ':')))
print_out.close()
return data_out | IPIDataLab/PPP_Loader | python/data_norm.py | Python | gpl-2.0 | 12,915 | 0.022145 |
'''
A condition
'''
from base import Base
from compares import const
class ComparisonMixin(object):
'''
Compare two values with a comparison utility
to denote if a change has validated.
'''
def compare(self, a, b, ctype=None):
'''
compare 'a' against 'b' for a comparison of `ctype`
by defauly ctype will compare for an exact match
'''
if ctype is None:
ctype = const.EXACT
# internal importer for core.compares.simple.
Comp = self.get_comparison_class(ctype)
# new class of
comp = Comp(self)
# perform comparison
return comp.match(a,b)
def get_comparison_class(self, compare):
'''
Return the compare class by string
'''
m = __import__('core.compares.simple', fromlist=[compare])
# print 'module', m
# print 'compare', compare
k = getattr(m, compare)
return k
class Condition(Base, ComparisonMixin):
'''
A condition perpetuates changes of an object base upon
rules applied at configuration.
'''
def __init__(self, node, attr, value=None, valid=None):
'''
A condition requires
a node (Node|String|iterable),
the attribute to monitor (String),
a value to validate condition.
Optionally `valid` callback when the condition is met
'''
self.watch = node
self.field = attr
self.target = value
self._valid_cb = valid
def valid(self):
'''
Is this condition valid
'''
vs = self._validate()
for node in vs:
val = vs[node]
if val == False: return False
return True
def get_nodes(self):
'''
return a list of Nodes retrieved from the machine using the
`watch` attr. Each item in the `watch` iterable will be
parsed into a Node type.
'''
if isinstance(self.watch, (tuple, list,) ) is not True:
# create iterable
return [self.watch]
# is iterable
return self.watch
def _validate(self, nodes=None, field=None, ctype=None):
'''
validate the condition against the assigned node.
Returns boolean
Provide nodes as a node, a list of nodes or a string for
network aquisition.
ctype defines the comapre utility to use for validation
'''
nodes = nodes or self.get_nodes()
# attr of the node to inspect
field = field or self.field
# the value to target.
value = self.target
if len(nodes) == 0:
return (False, 'no machine node %s' % self.watch)
r = {};
# print 'nodes', nodes
for node in nodes:
# current value
v = node.get(field)
# print 'node:', v, 'cache', cv, 'ctype', ctype
c = self.compare(v, value, ctype)
r.update({ node: c })
# import pdb;pdb.set_trace()
return r
| Strangemother/python-state-machine | scratch/machine_2/core/conditions.py | Python | mit | 3,066 | 0.005545 |
#!/usr/bin/python
import sys
class StatisticalTest(object):
def __init__(self):
pass
maxRunOfLength(x) =< 10 . log2(n)
| count0(x) - count1(x) | <= 10.sqrt(n)
| count00(x) - n/4 | <= 10.sqrt(n)
| cpantel/gravityFalls | Coursera Crypto 1/StatisticalTests.py | Python | gpl-2.0 | 217 | 0.013825 |
#!/Users/kerem/github-stuff/demo-gui-python/py3env/bin/python3
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| keremgocen/demo-gui-python | py3env/bin/viewer.py | Python | apache-2.0 | 1,064 | 0.00094 |
#!/usr/bin/env python
"""
<Program Name>
test_formats.py
<Author>
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
January 2017 (modified from TUF's original formats.py)
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'formats.py'
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import unittest
import datetime
import securesystemslib.formats
import securesystemslib.schema
import six
class TestFormats(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_schemas(self):
# Test conditions for valid schemas.
valid_schemas = {
'ISO8601_DATETIME_SCHEMA': (securesystemslib.formats.ISO8601_DATETIME_SCHEMA,
'1985-10-21T13:20:00Z'),
'UNIX_TIMESTAMP_SCHEMA': (securesystemslib.formats.UNIX_TIMESTAMP_SCHEMA, 499137720),
'HASH_SCHEMA': (securesystemslib.formats.HASH_SCHEMA, 'A4582BCF323BCEF'),
'HASHDICT_SCHEMA': (securesystemslib.formats.HASHDICT_SCHEMA,
{'sha256': 'A4582BCF323BCEF'}),
'HEX_SCHEMA': (securesystemslib.formats.HEX_SCHEMA, 'A4582BCF323BCEF'),
'KEYID_SCHEMA': (securesystemslib.formats.KEYID_SCHEMA, '123456789abcdef'),
'KEYIDS_SCHEMA': (securesystemslib.formats.KEYIDS_SCHEMA,
['123456789abcdef', '123456789abcdef']),
'SIG_SCHEME_SCHEMA': (securesystemslib.formats.SIG_SCHEME_SCHEMA, 'ecdsa-sha2-nistp256'),
'RELPATH_SCHEMA': (securesystemslib.formats.RELPATH_SCHEMA, 'metadata/root/'),
'RELPATHS_SCHEMA': (securesystemslib.formats.RELPATHS_SCHEMA,
['targets/role1/', 'targets/role2/']),
'PATH_SCHEMA': (securesystemslib.formats.PATH_SCHEMA, '/home/someuser/'),
'PATHS_SCHEMA': (securesystemslib.formats.PATHS_SCHEMA,
['/home/McFly/', '/home/Tannen/']),
'URL_SCHEMA': (securesystemslib.formats.URL_SCHEMA,
'https://www.updateframework.com/'),
'VERSION_SCHEMA': (securesystemslib.formats.VERSION_SCHEMA,
{'major': 1, 'minor': 0, 'fix': 8}),
'LENGTH_SCHEMA': (securesystemslib.formats.LENGTH_SCHEMA, 8),
'NAME_SCHEMA': (securesystemslib.formats.NAME_SCHEMA, 'Marty McFly'),
'BOOLEAN_SCHEMA': (securesystemslib.formats.BOOLEAN_SCHEMA, True),
'THRESHOLD_SCHEMA': (securesystemslib.formats.THRESHOLD_SCHEMA, 1),
'ROLENAME_SCHEMA': (securesystemslib.formats.ROLENAME_SCHEMA, 'Root'),
'RSAKEYBITS_SCHEMA': (securesystemslib.formats.RSAKEYBITS_SCHEMA, 4096),
'PASSWORD_SCHEMA': (securesystemslib.formats.PASSWORD_SCHEMA, 'secret'),
'PASSWORDS_SCHEMA': (securesystemslib.formats.PASSWORDS_SCHEMA, ['pass1', 'pass2']),
'KEYVAL_SCHEMA': (securesystemslib.formats.KEYVAL_SCHEMA,
{'public': 'pubkey', 'private': 'privkey'}),
'PUBLIC_KEYVAL_SCHEMA': (securesystemslib.formats.PUBLIC_KEYVAL_SCHEMA,
{'public': 'pubkey'}),
'PUBLIC_KEYVAL_SCHEMA2': (securesystemslib.formats.PUBLIC_KEYVAL_SCHEMA,
{'public': 'pubkey', 'private': ''}),
'KEY_SCHEMA': (securesystemslib.formats.KEY_SCHEMA,
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyval': {'public': 'pubkey',
'private': 'privkey'}}),
'PUBLIC_KEY_SCHEMA': (securesystemslib.formats.KEY_SCHEMA,
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyval': {'public': 'pubkey'}}),
'PUBLIC_KEY_SCHEMA2': (securesystemslib.formats.KEY_SCHEMA,
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyval': {'public': 'pubkey',
'private': ''}}),
'RSAKEY_SCHEMA': (securesystemslib.formats.RSAKEY_SCHEMA,
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': '123456789abcdef',
'keyval': {'public': 'pubkey',
'private': 'privkey'}}),
'FILEINFO_SCHEMA': (securesystemslib.formats.FILEINFO_SCHEMA,
{'length': 1024,
'hashes': {'sha256': 'A4582BCF323BCEF'},
'custom': {'type': 'paintjob'}}),
'FILEDICT_SCHEMA': (securesystemslib.formats.FILEDICT_SCHEMA,
{'metadata/root.json': {'length': 1024,
'hashes': {'sha256': 'ABCD123'},
'custom': {'type': 'metadata'}}}),
'SIGNATURE_SCHEMA': (securesystemslib.formats.SIGNATURE_SCHEMA,
{'keyid': '123abc',
'method': 'evp',
'sig': 'A4582BCF323BCEF'}),
'SIGNATURESTATUS_SCHEMA': (securesystemslib.formats.SIGNATURESTATUS_SCHEMA,
{'threshold': 1,
'good_sigs': ['123abc'],
'bad_sigs': ['123abc'],
'unknown_sigs': ['123abc'],
'untrusted_sigs': ['123abc'],
'unknown_method_sigs': ['123abc']}),
'SIGNABLE_SCHEMA': (securesystemslib.formats.SIGNABLE_SCHEMA,
{'signed': 'signer',
'signatures': [{'keyid': '123abc',
'method': 'evp',
'sig': 'A4582BCF323BCEF'}]}),
'KEYDICT_SCHEMA': (securesystemslib.formats.KEYDICT_SCHEMA,
{'123abc': {'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyval': {'public': 'pubkey',
'private': 'privkey'}}}),
'KEYDB_SCHEMA': (securesystemslib.formats.KEYDB_SCHEMA,
{'123abc': {'keytype': 'rsa',
'keyid': '123456789abcdef',
'keyval': {'public': 'pubkey',
'private': 'privkey'}}}),
'ROLE_SCHEMA': (securesystemslib.formats.ROLE_SCHEMA,
{'keyids': ['123abc'],
'threshold': 1,
'paths': ['path1/', 'path2']}),
'ROLEDICT_SCHEMA': (securesystemslib.formats.ROLEDICT_SCHEMA,
{'root': {'keyids': ['123abc'],
'threshold': 1,
'paths': ['path1/', 'path2']}}),
'ROOT_SCHEMA': (securesystemslib.formats.ROOT_SCHEMA,
{'_type': 'root',
'version': 8,
'consistent_snapshot': False,
'compression_algorithms': ['gz'],
'expires': '1985-10-21T13:20:00Z',
'keys': {'123abc': {'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyval': {'public': 'pubkey',
'private': 'privkey'}}},
'roles': {'root': {'keyids': ['123abc'],
'threshold': 1,
'paths': ['path1/', 'path2']}}}),
'TARGETS_SCHEMA': (securesystemslib.formats.TARGETS_SCHEMA,
{'_type': 'targets',
'version': 8,
'expires': '1985-10-21T13:20:00Z',
'targets': {'metadata/targets.json': {'length': 1024,
'hashes': {'sha256': 'ABCD123'},
'custom': {'type': 'metadata'}}},
'delegations': {'keys': {'123abc': {'keytype':'rsa',
'scheme': 'rsassa-pss-sha256',
'keyval': {'public': 'pubkey',
'private': 'privkey'}}},
'roles': [{'name': 'root', 'keyids': ['123abc'],
'threshold': 1,
'paths': ['path1/', 'path2']}]}}),
'SNAPSHOT_SCHEMA': (securesystemslib.formats.SNAPSHOT_SCHEMA,
{'_type': 'snapshot',
'version': 8,
'expires': '1985-10-21T13:20:00Z',
'meta': {'snapshot.json': {'version': 1024}}}),
'TIMESTAMP_SCHEMA': (securesystemslib.formats.TIMESTAMP_SCHEMA,
{'_type': 'timestamp',
'version': 8,
'expires': '1985-10-21T13:20:00Z',
'meta': {'metadattimestamp.json': {'length': 1024,
'hashes': {'sha256': 'AB1245'}}}}),
'MIRROR_SCHEMA': (securesystemslib.formats.MIRROR_SCHEMA,
{'url_prefix': 'http://localhost:8001',
'metadata_path': 'metadata/',
'targets_path': 'targets/',
'confined_target_dirs': ['path1/', 'path2/'],
'custom': {'type': 'mirror'}}),
'MIRRORDICT_SCHEMA': (securesystemslib.formats.MIRRORDICT_SCHEMA,
{'mirror1': {'url_prefix': 'http://localhost:8001',
'metadata_path': 'metadata/',
'targets_path': 'targets/',
'confined_target_dirs': ['path1/', 'path2/'],
'custom': {'type': 'mirror'}}}),
'MIRRORLIST_SCHEMA': (securesystemslib.formats.MIRRORLIST_SCHEMA,
{'_type': 'mirrors',
'version': 8,
'expires': '1985-10-21T13:20:00Z',
'mirrors': [{'url_prefix': 'http://localhost:8001',
'metadata_path': 'metadata/',
'targets_path': 'targets/',
'confined_target_dirs': ['path1/', 'path2/'],
'custom': {'type': 'mirror'}}]})}
# Iterate 'valid_schemas', ensuring each 'valid_schema' correctly matches
# its respective 'schema_type'.
for schema_name, (schema_type, valid_schema) in six.iteritems(valid_schemas):
if not schema_type.matches(valid_schema):
print('bad schema: ' + repr(valid_schema))
self.assertEqual(True, schema_type.matches(valid_schema))
# Test conditions for invalid schemas.
# Set the 'valid_schema' of 'valid_schemas' to an invalid
# value and test that it does not match 'schema_type'.
for schema_name, (schema_type, valid_schema) in six.iteritems(valid_schemas):
invalid_schema = 0xBAD
if isinstance(schema_type, securesystemslib.schema.Integer):
invalid_schema = 'BAD'
self.assertEqual(False, schema_type.matches(invalid_schema))
def test_unix_timestamp_to_datetime(self):
# Test conditions for valid arguments.
UNIX_TIMESTAMP_SCHEMA = securesystemslib.formats.UNIX_TIMESTAMP_SCHEMA
self.assertTrue(datetime.datetime, securesystemslib.formats.unix_timestamp_to_datetime(499137720))
datetime_object = datetime.datetime(1985, 10, 26, 1, 22)
self.assertEqual(datetime_object, securesystemslib.formats.unix_timestamp_to_datetime(499137720))
# Test conditions for invalid arguments.
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.unix_timestamp_to_datetime, 'bad')
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.unix_timestamp_to_datetime, 1000000000000000000000)
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.unix_timestamp_to_datetime, -1)
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.unix_timestamp_to_datetime, ['5'])
def test_datetime_to_unix_timestamp(self):
# Test conditions for valid arguments.
datetime_object = datetime.datetime(2015, 10, 21, 19, 28)
self.assertEqual(1445455680, securesystemslib.formats.datetime_to_unix_timestamp(datetime_object))
# Test conditions for invalid arguments.
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.datetime_to_unix_timestamp, 'bad')
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.datetime_to_unix_timestamp, 1000000000000000000000)
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.datetime_to_unix_timestamp, ['1'])
def test_format_base64(self):
# Test conditions for valid arguments.
data = 'updateframework'.encode('utf-8')
self.assertEqual('dXBkYXRlZnJhbWV3b3Jr', securesystemslib.formats.format_base64(data))
self.assertTrue(isinstance(securesystemslib.formats.format_base64(data), six.string_types))
# Test conditions for invalid arguments.
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.format_base64, 123)
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.format_base64, True)
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.format_base64, ['123'])
def test_parse_base64(self):
# Test conditions for valid arguments.
base64 = 'dXBkYXRlZnJhbWV3b3Jr'
self.assertEqual(b'updateframework', securesystemslib.formats.parse_base64(base64))
self.assertTrue(isinstance(securesystemslib.formats.parse_base64(base64), six.binary_type))
# Test conditions for invalid arguments.
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.parse_base64, 123)
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.parse_base64, True)
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.parse_base64, ['123'])
self.assertRaises(securesystemslib.exceptions.FormatError, securesystemslib.formats.parse_base64, '/')
def test_encode_canonical(self):
# Test conditions for valid arguments.
encode = securesystemslib.formats.encode_canonical
result = []
output = result.append
bad_output = 123
self.assertEqual('""', encode(""))
self.assertEqual('[1,2,3]', encode([1, 2, 3]))
self.assertEqual('[1,2,3]', encode([1,2,3]))
self.assertEqual('[]', encode([]))
self.assertEqual('{}', encode({}))
self.assertEqual('{"A":[99]}', encode({"A": [99]}))
self.assertEqual('{"A":true}', encode({"A": True}))
self.assertEqual('{"B":false}', encode({"B": False}))
self.assertEqual('{"x":3,"y":2}', encode({"x": 3, "y": 2}))
self.assertEqual('{"x":3,"y":null}', encode({"x": 3, "y": None}))
# Condition where 'encode()' sends the result to the callable
# 'output'.
self.assertEqual(None, encode([1, 2, 3], output))
self.assertEqual('[1,2,3]', ''.join(result))
# Test conditions for invalid arguments.
self.assertRaises(securesystemslib.exceptions.FormatError, encode, securesystemslib.exceptions.FormatError)
self.assertRaises(securesystemslib.exceptions.FormatError, encode, 8.0)
self.assertRaises(securesystemslib.exceptions.FormatError, encode, {"x": 8.0})
self.assertRaises(securesystemslib.exceptions.FormatError, encode, 8.0, output)
self.assertRaises(securesystemslib.exceptions.FormatError, encode, {"x": securesystemslib.exceptions.FormatError})
# Run unit test.
if __name__ == '__main__':
unittest.main()
| vladimir-v-diaz/securesystemslib | tests/test_formats.py | Python | mit | 15,792 | 0.005256 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Marcus Uneson <marcus.uneson@gmail.com>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, absolute_import, division
import os
import pickle
import random
import time
from nltk.corpus import treebank
from nltk.tbl import error_list, Template
from nltk.tag.brill import Word, Pos
from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger
def demo():
"""
Run a demo with defaults. See source comments for details,
or docstrings of any of the more specific demo_* functions.
"""
postag()
def demo_repr_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="repr")
def demo_str_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="str")
def demo_verbose_rule_format():
"""
Exemplify Rule.format("verbose")
"""
postag(ruleformat="verbose")
def demo_multiposition_feature():
"""
The feature/s of a template takes a list of positions
relative to the current word where the feature should be
looked for, conceptually joined by logical OR. For instance,
Pos([-1, 1]), given a value V, will hold whenever V is found
one step to the left and/or one step to the right.
For contiguous ranges, a 2-arg form giving inclusive end
points can also be used: Pos(-3, -1) is the same as the arg
below.
"""
postag(templates=[Template(Pos([-3,-2,-1]))])
def demo_multifeature_template():
"""
Templates can have more than a single feature.
"""
postag(templates=[Template(Word([0]), Pos([-2,-1]))])
def demo_template_statistics():
"""
Show aggregate statistics per template. Little used templates are
candidates for deletion, much used templates may possibly be refined.
Deleting unused templates is mostly about saving time and/or space:
training is basically O(T) in the number of templates T
(also in terms of memory usage, which often will be the limiting factor).
"""
postag(incremental_stats=True, template_stats=True)
def demo_generated_templates():
"""
Template.expand and Feature.expand are class methods facilitating
generating large amounts of templates. See their documentation for
details.
Note: training with 500 templates can easily fill all available
even on relatively small corpora
"""
wordtpls = Word.expand([-1,0,1], [1,2], excludezero=False)
tagtpls = Pos.expand([-2,-1,0,1], [1,2], excludezero=True)
templates = list(Template.expand([wordtpls, tagtpls], combinations=(1,3)))
print("Generated {0} templates for transformation-based learning".format(len(templates)))
postag(templates=templates, incremental_stats=True, template_stats=True)
def demo_learning_curve():
"""
Plot a learning curve -- the contribution on tagging accuracy of
the individual rules.
Note: requires matplotlib
"""
postag(incremental_stats=True, separate_baseline_data=True, learning_curve_output="learningcurve.png")
def demo_error_analysis():
"""
Writes a file with context for each erroneous word after tagging testing data
"""
postag(error_output="errors.txt")
def demo_serialize_tagger():
"""
Serializes the learned tagger to a file in pickle format; reloads it
and validates the process.
"""
postag(serialize_output="tagger.pcl")
def demo_high_accuracy_rules():
"""
Discard rules with low accuracy. This may hurt performance a bit,
but will often produce rules which are more interesting read to a human.
"""
postag(num_sents=3000, min_acc=0.96, min_score=10)
def postag(
templates=None,
tagged_data=None,
num_sents=1000,
max_rules=300,
min_score=3,
min_acc=None,
train=0.8,
trace=3,
randomize=False,
ruleformat="str",
incremental_stats=False,
template_stats=False,
error_output=None,
serialize_output=None,
learning_curve_output=None,
learning_curve_take=300,
baseline_backoff_tagger=None,
separate_baseline_data=False,
cache_baseline_tagger=None):
"""
Brill Tagger Demonstration
:param templates: how many sentences of training and testing data to use
:type templates: list of Template
:param tagged_data: maximum number of rule instances to create
:type tagged_data: C{int}
:param num_sents: how many sentences of training and testing data to use
:type num_sents: C{int}
:param max_rules: maximum number of rule instances to create
:type max_rules: C{int}
:param min_score: the minimum score for a rule in order for it to be considered
:type min_score: C{int}
:param min_acc: the minimum score for a rule in order for it to be considered
:type min_acc: C{float}
:param train: the fraction of the the corpus to be used for training (1=all)
:type train: C{float}
:param trace: the level of diagnostic tracing output to produce (0-4)
:type trace: C{int}
:param randomize: whether the training data should be a random subset of the corpus
:type randomize: C{bool}
:param ruleformat: rule output format, one of "str", "repr", "verbose"
:type ruleformat: C{str}
:param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow)
:type incremental_stats: C{bool}
:param template_stats: if true, will print per-template statistics collected in training and (optionally) testing
:type template_stats: C{bool}
:param error_output: the file where errors will be saved
:type error_output: C{string}
:param serialize_output: the file where the learned tbl tagger will be saved
:type serialize_output: C{string}
:param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available)
:type learning_curve_output: C{string}
:param learning_curve_take: how many rules plotted
:type learning_curve_take: C{int}
:param baseline_backoff_tagger: the file where rules will be saved
:type baseline_backoff_tagger: tagger
:param separate_baseline_data: use a fraction of the training data exclusively for training baseline
:type separate_baseline_data: C{bool}
:param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get
deterministic output from the baseline unigram tagger between python versions)
:type cache_baseline_tagger: C{string}
Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This
is fast and fine for a demo, but is likely to generalize worse on unseen data.
Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high).
"""
# defaults
baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER
if templates is None:
from nltk.tag.brill import describe_template_sets, brill24
# some pre-built template sets taken from typical systems or publications are
# available. Print a list with describe_template_sets()
# for instance:
templates = brill24()
(training_data, baseline_data, gold_data, testing_data) = \
_demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data)
# creating (or reloading from cache) a baseline tagger (unigram tagger)
# this is just a mechanism for getting deterministic output from the baseline between
# python versions
if cache_baseline_tagger:
if not os.path.exists(cache_baseline_tagger):
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
with open(cache_baseline_tagger, 'w') as print_rules:
pickle.dump(baseline_tagger, print_rules)
print("Trained baseline tagger, pickled it to {0}".format(cache_baseline_tagger))
with open(cache_baseline_tagger, "r") as print_rules:
baseline_tagger= pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(cache_baseline_tagger))
else:
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
print("Trained baseline tagger")
if gold_data:
print(" Accuracy on test set: {0:0.4f}".format(baseline_tagger.evaluate(gold_data)))
# creating a Brill tagger
tbrill = time.time()
trainer = BrillTaggerTrainer(baseline_tagger, templates, trace, ruleformat=ruleformat)
print("Training tbl tagger...")
brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc)
print("Trained tbl tagger in {0:0.2f} seconds".format(time.time() - tbrill))
if gold_data:
print(" Accuracy on test set: %.4f" % brill_tagger.evaluate(gold_data))
# printing the learned rules, if learned silently
if trace == 1:
print("\nLearned rules: ")
for (ruleno, rule) in enumerate(brill_tagger.rules(),1):
print("{0:4d} {1:s}".format(ruleno, rule.format(ruleformat)))
# printing template statistics (optionally including comparison with the training data)
# note: if not separate_baseline_data, then baseline accuracy will be artificially high
if incremental_stats:
print("Incrementally tagging the test data, collecting individual rule statistics")
(taggedtest, teststats) = brill_tagger.batch_tag_incremental(testing_data, gold_data)
print(" Rule statistics collected")
if not separate_baseline_data:
print("WARNING: train_stats asked for separate_baseline_data=True; the baseline "
"will be artificially high")
trainstats = brill_tagger.train_stats()
if template_stats:
brill_tagger.print_template_statistics(teststats)
if learning_curve_output:
_demo_plot(learning_curve_output, teststats, trainstats, take=learning_curve_take)
print("Wrote plot of learning curve to {0}".format(learning_curve_output))
else:
print("Tagging the test data")
taggedtest = brill_tagger.tag_sents(testing_data)
if template_stats:
brill_tagger.print_template_statistics()
# writing error analysis to file
if error_output is not None:
with open(error_output, 'w') as f:
f.write('Errors for Brill Tagger %r\n\n' % serialize_output)
f.write(u'\n'.join(error_list(gold_data, taggedtest)).encode('utf-8') + '\n')
print("Wrote tagger errors including context to {0}".format(error_output))
# serializing the tagger to a pickle file and reloading (just to see it works)
if serialize_output is not None:
taggedtest = brill_tagger.tag_sents(testing_data)
with open(serialize_output, 'w') as print_rules:
pickle.dump(brill_tagger, print_rules)
print("Wrote pickled tagger to {0}".format(serialize_output))
with open(serialize_output, "r") as print_rules:
brill_tagger_reloaded = pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(serialize_output))
taggedtest_reloaded = brill_tagger.tag_sents(testing_data)
if taggedtest == taggedtest_reloaded:
print("Reloaded tagger tried on test set, results identical")
else:
print("PROBLEM: Reloaded tagger gave different results on test set")
def _demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data):
# train is the proportion of data used in training; the rest is reserved
# for testing.
if tagged_data is None:
print("Loading tagged data from treebank... ")
tagged_data = treebank.tagged_sents()
if num_sents is None or len(tagged_data) <= num_sents:
num_sents = len(tagged_data)
if randomize:
random.seed(len(tagged_data))
random.shuffle(tagged_data)
cutoff = int(num_sents * train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:num_sents]
testing_data = [[t[0] for t in sent] for sent in gold_data]
if not separate_baseline_data:
baseline_data = training_data
else:
bl_cutoff = len(training_data) // 3
(baseline_data, training_data) = (training_data[:bl_cutoff], training_data[bl_cutoff:])
(trainseqs, traintokens) = corpus_size(training_data)
(testseqs, testtokens) = corpus_size(testing_data)
(bltrainseqs, bltraintokens) = corpus_size(baseline_data)
print("Read testing data ({0:d} sents/{1:d} wds)".format(testseqs, testtokens))
print("Read training data ({0:d} sents/{1:d} wds)".format(trainseqs, traintokens))
print("Read baseline data ({0:d} sents/{1:d} wds) {2:s}".format(
bltrainseqs, bltraintokens, "" if separate_baseline_data else "[reused the training set]"))
return (training_data, baseline_data, gold_data, testing_data)
def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None):
testcurve = [teststats['initialerrors']]
for rulescore in teststats['rulescores']:
testcurve.append(testcurve[-1] - rulescore)
testcurve = [1 - x/teststats['tokencount'] for x in testcurve[:take]]
traincurve = [trainstats['initialerrors']]
for rulescore in trainstats['rulescores']:
traincurve.append(traincurve[-1] - rulescore)
traincurve = [1 - x/trainstats['tokencount'] for x in traincurve[:take]]
import matplotlib.pyplot as plt
r = list(range(len(testcurve)))
plt.plot(r, testcurve, r, traincurve)
plt.axis([None, None, None, 1.0])
plt.savefig(learning_curve_output)
NN_CD_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'.*', 'NN')])
REGEXP_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
def corpus_size(seqs):
return (len(seqs), sum(len(x) for x in seqs))
if __name__ == '__main__':
demo_learning_curve()
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/tbl/demo.py | Python | gpl-2.0 | 14,715 | 0.006116 |
#coding: utf-8
#This file is part of Ficlatté.
#Copyright © 2015-2017 Paul Robertson, Jim Stitzel and Shu Sam Chen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of version 3 of the GNU Affero General Public
# License as published by the Free Software Foundation
#
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^submit/$', 'comment.views.submit_comment', name='submit_comment'),
url(r'^(?P<comment_id>\d+)/like/$', 'comment.views.like_comment', name='like_comment'),
url(r'^(?P<comment_id>\d+)/unlike/$', 'comment.views.unlike_comment', name='unlike_comment'),
]
| HSAR/Ficlatte | comment/urls.py | Python | agpl-3.0 | 1,083 | 0.004625 |
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, class (GUICG2)
import GemRB
from GUIDefines import *
import CommonTables
ClassWindow = 0
TextAreaControl = 0
DoneButton = 0
BackButton = 0
ClassCount = 0
HasSubClass = 0
ClassID = 0
def AdjustTextArea():
global HasSubClass, ClassID
Class = GemRB.GetVar("Class")-1
TextAreaControl.SetText(CommonTables.Classes.GetValue(Class,1) )
ClassName = CommonTables.Classes.GetRowName(Class)
ClassID = CommonTables.Classes.GetValue(ClassName, "ID")
#determining if this class has any subclasses
HasSubClass = 0
for i in range(1, ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed != ClassID:
continue
HasSubClass = 1
break
if HasSubClass == 0:
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
else:
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
return
def OnLoad():
global ClassWindow, TextAreaControl, DoneButton, BackButton
global ClassCount
GemRB.LoadWindowPack("GUICG", 800, 600)
#this replaces help02.2da for class restrictions
ClassCount = CommonTables.Classes.GetRowCount()+1
ClassWindow = GemRB.LoadWindow(2)
rid = CommonTables.Races.FindValue(3, GemRB.GetVar('BaseRace'))
RaceName = CommonTables.Races.GetRowName(rid)
#radiobutton groups must be set up before doing anything else to them
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_SET)
Button.SetState(IE_GUI_BUTTON_DISABLED)
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Allowed = CommonTables.Classes.GetValue(ClassName, RaceName)
Button = ClassWindow.GetControl(j+2)
j = j+1
t = CommonTables.Classes.GetValue(ClassName, "NAME_REF")
Button.SetText(t )
if Allowed==0:
continue
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, ClassPress)
Button.SetVarAssoc("Class", i)
BackButton = ClassWindow.GetControl(17)
BackButton.SetText(15416)
BackButton.SetFlags(IE_GUI_BUTTON_CANCEL,OP_OR)
DoneButton = ClassWindow.GetControl(0)
DoneButton.SetText(36789)
DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR)
ScrollBarControl = ClassWindow.GetControl(15)
TextAreaControl = ClassWindow.GetControl(16)
Class = GemRB.GetVar("Class")-1
if Class<0:
TextAreaControl.SetText(17242)
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
else:
AdjustTextArea()
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress)
ClassWindow.SetVisible(WINDOW_VISIBLE)
return
def ClassPress():
global HasSubClass
AdjustTextArea()
if HasSubClass == 0:
return
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_SET)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetText("")
j=0
for i in range(1, ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed != ClassID:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
t = CommonTables.Classes.GetValue(ClassName, "NAME_REF")
Button.SetText(t )
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, ClassPress2)
Button.SetVarAssoc("Class", i)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress2)
return
def ClassPress2():
Class = GemRB.GetVar("Class")-1
TextAreaControl.SetText(CommonTables.Classes.GetValue(Class,1) )
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def BackPress2():
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
if ClassWindow:
ClassWindow.Unload()
OnLoad()
return
def BackPress():
if ClassWindow:
ClassWindow.Unload()
GemRB.SetNextScript("CharGen3")
GemRB.SetVar("Class",0) #scrapping the class value
MyChar = GemRB.GetVar("Slot")
GemRB.SetPlayerStat (IE_CLASS, 0)
return
def NextPress():
#classcolumn is base class
Class = GemRB.GetVar("Class")
ClassColumn = CommonTables.Classes.GetValue(Class - 1, 3)
if ClassColumn <= 0: #it was already a base class
ClassColumn = Class
GemRB.SetVar("BaseClass", ClassColumn)
if ClassWindow:
ClassWindow.Unload()
GemRB.SetNextScript("CharGen4") #alignment
return
| tomprince/gemrb | gemrb/GUIScripts/iwd2/Class.py | Python | gpl-2.0 | 5,585 | 0.03026 |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utility library."""
__author__ = ['rafek@google.com (Rafe Kaplan)',
'guido@google.com (Guido van Rossum)',
]
__all__ = [
'positional',
]
import gflags
import inspect
import logging
import types
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
FLAGS = gflags.FLAGS
gflags.DEFINE_enum('positional_parameters_enforcement', 'WARNING',
['EXCEPTION', 'WARNING', 'IGNORE'],
'The action when an oauth2client.util.positional declaration is violated.')
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style key-word only
parameters. For example, in Python 3 it is possible to write:
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after * must be a keyword:
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example:
To define a function like above, do:
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a required
keyword argument:
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter:
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
'self' and 'cls':
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by the
--positional_parameters_enforcement flag. The flag may be set to 'EXCEPTION',
'WARNING' or 'IGNORE' to raise an exception, log a warning, or do nothing,
respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a key-word only argument is provided as a positional parameter,
but only if the --positional_parameters_enforcement flag is set to
'EXCEPTION'.
"""
def positional_decorator(wrapped):
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = '%s() takes at most %d positional argument%s (%d given)' % (
wrapped.__name__, max_positional_args, plural_s, len(args))
if FLAGS.positional_parameters_enforcement == 'EXCEPTION':
raise TypeError(message)
elif FLAGS.positional_parameters_enforcement == 'WARNING':
logger.warning(message)
else: # IGNORE
pass
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, (int, long)):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, types.StringTypes):
return scopes
else:
return ' '.join(scopes)
def dict_to_tuple_key(dictionary):
"""Converts a dictionary to a tuple that can be used as an immutable key.
The resulting key is always sorted so that logically equivalent dictionaries
always produce an identical tuple for a key.
Args:
dictionary: the dictionary to use as the key.
Returns:
A tuple representing the dictionary in it's naturally sorted ordering.
"""
return tuple(sorted(dictionary.items()))
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gcutil/lib/google_api_python_client/oauth2client/util.py | Python | gpl-3.0 | 5,523 | 0.004527 |
XXXXXXXXXXXXXXX
| steventimberman/masterDebater | venv/lib/python2.7/site-packages/django_fluent_comments-1.4.3.dist-info/top_level.txt.py | Python | mit | 16 | 0 |
# -*- coding: utf-8 -*-
_counties = [
("Aberdeenshire", "Aberdeenshire"),
("Anglesey", "Anglesey"),
("Angus", "Angus"),
("Argyll", "Argyll"),
("Ayrshire", "Ayrshire"),
("Banffshire", "Banffshire"),
("Bedfordshire", "Bedfordshire"),
("Berwickshire", "Berwickshire"),
("Breconshire", "Breconshire"),
("Buckinghamshire", "Buckinghamshire"),
("Bute", "Bute"),
("Caernarvonshire", "Caernarvonshire"),
("Caithness", "Caithness"),
("Cambridgeshire", "Cambridgeshire"),
("Cardiganshire", "Cardiganshire"),
("Carmarthenshire", "Carmarthenshire"),
("Cheshire", "Cheshire"),
("Clackmannanshire", "Clackmannanshire"),
("Cornwall and Isles of Scilly", "Cornwall and Isles of Scilly"),
("Cumbria", "Cumbria"),
("Denbighshire", "Denbighshire"),
("Derbyshire", "Derbyshire"),
("Devon", "Devon"),
("Dorset", "Dorset"),
("Dumbartonshire", "Dumbartonshire"),
("Dumfriesshire", "Dumfriesshire"),
("Durham", "Durham"),
("East Lothian", "East Lothian"),
("East Sussex", "East Sussex"),
("Essex", "Essex"),
("Fife", "Fife"),
("Flintshire", "Flintshire"),
("Glamorgan", "Glamorgan"),
("Gloucestershire", "Gloucestershire"),
("Greater London", "Greater London"),
("Greater Manchester", "Greater Manchester"),
("Hampshire", "Hampshire"),
("Hertfordshire", "Hertfordshire"),
("Herefordshire", "Herefordshire"),
("Inverness", "Inverness"),
("Kent", "Kent"),
("Kincardineshire", "Kincardineshire"),
("Kinross-shire", "Kinross-shire"),
("Kirkcudbrightshire", "Kirkcudbrightshire"),
("Lanarkshire", "Lanarkshire"),
("Lancashire", "Lancashire"),
("Leicestershire", "Leicestershire"),
("Lincolnshire", "Lincolnshire"),
("London", "London"),
("Merionethshire", "Merionethshire"),
("Merseyside", "Merseyside"),
("Midlothian", "Midlothian"),
("Monmouthshire", "Monmouthshire"),
("Montgomeryshire", "Montgomeryshire"),
("Moray", "Moray"),
("Nairnshire", "Nairnshire"),
("Norfolk", "Norfolk"),
("North Yorkshire", "North Yorkshire"),
("Northamptonshire", "Northamptonshire"),
("Northumberland", "Northumberland"),
("Nottinghamshire", "Nottinghamshire"),
("Orkney", "Orkney"),
("Oxfordshire", "Oxfordshire"),
("Peebleshire", "Peebleshire"),
("Pembrokeshire", "Pembrokeshire"),
("Perthshire", "Perthshire"),
("Radnorshire", "Radnorshire"),
("Renfrewshire", "Renfrewshire"),
("Ross & Cromarty", "Ross & Cromarty"),
("Roxburghshire", "Roxburghshire"),
("Selkirkshire", "Selkirkshire"),
("Shetland", "Shetland"),
("Shropshire", "Shropshire"),
("Somerset", "Somerset"),
("South Yorkshire", "South Yorkshire"),
("Staffordshire", "Staffordshire"),
("Stirlingshire", "Stirlingshire"),
("Suffolk", "Suffolk"),
("Surrey", "Surrey"),
("Sutherland", "Sutherland"),
("Tyne and Wear", "Tyne and Wear"),
("Warwickshire", "Warwickshire"),
("West Lothian", "West Lothian"),
("West Midlands", "West Midlands"),
("West Sussex", "West Sussex"),
("West Yorkshire", "West Yorkshire"),
("Wigtownshire", "Wigtownshire"),
("Wiltshire", "Wiltshire"),
("Worcestershire", "Worcestershire"),
]
def get_counties():
return _counties
| willprice/mentor-finder | mentor_finder/models/county.py | Python | gpl-3.0 | 3,324 | 0.000301 |
# -*- coding: utf-8 -*-
from hashlib import sha1
from phpserialize import dumps
from calendar import timegm
from time import strptime
import zlib
#calendar.timegm(time.strptime('01/12/2011', '%d/%m/%Y'))
def create(data):
assert isinstance(data, dict)
assert 'request_data' in data
assert 'contract_data' in data['request_data']
assert 'product_data' in data['request_data']
assert isinstance(data['request_data']['product_data'], list)
mod_identifiers = {
'viva': 'viva',
'rfc': 'rfc',
'relocate_ci': 'CI-Umzug',
'swapci': 'Geräteaustausch',
}
contract_data = data['request_data']['contract_data']
product_data = data['request_data']['product_data']
license_data = {
'C__LICENCE__OBJECT_COUNT': 0,
'C__LICENCE__DB_NAME': contract_data['db_name'] or '',
'C__LICENCE__CUSTOMER_NAME': contract_data['customer_name'],
'C__LICENCE__REG_DATE': timegm(strptime(contract_data['date_start'], '%d/%m/%Y')),
'C__LICENCE__RUNTIME': timegm(strptime(contract_data['end_date'], '%d/%m/%Y')) - timegm(strptime(contract_data['date_start'], '%d/%m/%Y')),
'C__LICENCE__EMAIL': ''.join('i-doit@', contract_data['customer_name']),
'C__LICENCE__TYPE': 'Einzellizenz Subskription',
'C__LICENCE__DATA': {},
}
for product in product_data:
if 'Objektanzahl' in product:
license_data['C__LICENCE__OBJECT_COUNT'] += product[
'Objektanzahl'].isdigit() and int(product['Objektanzahl']) or 0
if 'Multitenancy' in product:
if product['Multitenancy'] == 'Single':
license_data['C__LICENCE__TYPE'] = 'Einzellizenz Subskription'
elif product['Multitenancy'] == 'Multi':
license_data['C__LICENCE__TYPE'] = 'Hosting'
if 'Lizenztyp' in product and product['Lizenztyp'] == 'Kaufversion':
license_data['C__LICENCE__TYPE'] = 'Kauflizenz'
if 'Produkttyp' in product and product['Produkttyp'] == 'Modul':
if 'identifier' in product:
license_data['C__LICENCE__DATA'][
product['identifier']] = True
for key in mod_identifiers:
if mod_identifiers[key] in product['name'].lower():
license_data['C__LICENCE__DATA'][key] = True
if license_data['C__LICENCE__TYPE'] == 'Hosting':
license_data['C__LICENCE__DB_NAME'] == ''
elif license_data['C__LICENCE__TYPE'] == 'Kauflizenz':
license_data['C__LICENCE__DB_NAME'] == ''
del license_data['C__LICENCE__RUNTIME']
license_key = sha1(dumps(license_data))
#sort
#serialize with phpserialize.dumps
#gzip with zlib.compress
#reverse:
# f = open('license.key','rb')
# f_unzipped = zlib.decompress(f.read())
# license_dict = phpserialize.loads(f_unzipped)
# return license encoded in base_64
return True
| openbig/odoo-contract | sale_contractmanagement/idoit_license_gen.py | Python | agpl-3.0 | 2,980 | 0.003357 |
#
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from ._discrete_distns import binom
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation',
'unitary_group']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""
A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super(matrix_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the"
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the"
"same number of columns.")
else:
mean = np.zeros((numrows,numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""
Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
if size == 1:
#out = np.squeeze(out, axis=0)
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""
A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.multinomial : Sampling from the multinomial distribution.
"""
def __init__(self, seed=None):
super(multinomial_gen, self).__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""
Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""
Return: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[...,-1] = 1. - p[...,:-1].sum(axis=-1)
# true for bad p
pcond = np.any(p <= 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""
Return: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." % (xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""
Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""
Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""
Mean of the Multinomial distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""
Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[...,i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""
Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""
Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""
Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
"""
def __init__(self, seed=None):
super(special_ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super(ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D*np.sqrt((x*x).sum())
# Householder transformation
Hx = -D*(np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""
A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
----------
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> np.random.seed(514)
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5))
>>> x
array([[ 1. , -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1. , -0.24351129, 0.06703474],
[ 0.18366501, -0.24351129, 1. , 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super(random_correlation_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ];
the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i,i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""
Draw random correlation matrices
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
class unitary_group_gen(multi_rv_generic):
r"""
A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", arXiv:math-ph/0609050v2.
Examples
--------
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
This generates one random matrix from U(3). The dot product confirms that it is unitary up to machine precision.
"""
def __init__(self, seed=None):
super(unitary_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
z = 1/math.sqrt(2)*(random_state.normal(size=(dim,dim)) +
1j*random_state.normal(size=(dim,dim)))
q, r = scipy.linalg.qr(z)
d = r.diagonal()
q *= d/abs(d)
return q
unitary_group = unitary_group_gen()
| jjhelmus/scipy | scipy/stats/_multivariate.py | Python | bsd-3-clause | 114,780 | 0.000741 |
__author__ = 'Evan Murawski'
import unittest
import backend
from backend.interactions import *
from backend.beam import Beam
import backend.solver as solver
from backend.solver import SolverError
import backend.shearmomentgenerator as shearmomentgenerator
from backend.shearmomentgenerator import Shear_Moment_Error
import matplotlib.pyplot as plt
import numpy as np
class TestBeamAnalyzer(unittest.TestCase):
"""Unit tests for the backend."""
ALMOST = 0.01
beams = []
STEP_SIZE = 0.001
def setUp(self):
"""Setup the tests. Creates various beams with known solutions and force moment plots."""
self.beams = []
self.beams.append(Beam(10))
self.beams[0].add_interaction(Force(5, -10))
self.beams[0].add_interaction(Force(0, 0, False))
self.beams[0].add_interaction(Force(10, 0, False))
self.beams.append(Beam(5.5))
self.beams[1].add_interaction(Force(0, 0, False))
self.beams[1].add_interaction(Moment(0, 0, False))
self.beams[1].add_interaction(Force(5.5, 10))
self.beams[1].add_interaction(Moment(4, 40))
self.beams.append(Beam(30))
self.beams[2].add_interaction(Force(0, 0, False))
self.beams[2].add_interaction(Force(20, 0, False))
self.beams[2].add_interaction(Dist_Force(0, -1, 10))
self.beams[2].add_interaction(Force(15, -20))
self.beams[2].add_interaction(Force(30, -10))
self.beams.append(Beam(10))
self.beams[3].add_interaction(Force(1, 7))
self.beams[3].add_interaction(Dist_Force(2, -5, 7))
self.beams[3].add_interaction(Moment(8, 10))
self.beams[3].add_interaction(Force(8, 0, False))
self.beams[3].add_interaction(Moment(0, 0, False))
#A very simple beam with one known force and two unknown forces
def test_beam0(self):
solver.solve(self.beams[0])
#Test solution
self.assertEqual(5, self.beams[0].interactions[0].magnitude)
self.assertEqual(5, self.beams[0].interactions[2].magnitude)
shear_moment = shearmomentgenerator.generate_numerical(self.beams[0], self.STEP_SIZE)
#Test moment
assert abs(shear_moment[0][1] - 0 ) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE/2)][1] - 25) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE/4)][1] - 25/2) < self.ALMOST
#Test shear
assert abs(shear_moment[1][0] - 5) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE/2) -1][0] - 5 ) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE/2) +2][0] - (-5)) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE) -1][0] - (-5)) < self.ALMOST
def test_beam1(self):
solver.solve(self.beams[1])
#Test solution
self.assertEqual(-10, self.beams[1].interactions[0].magnitude)
self.assertEqual(-95, self.beams[1].interactions[1].magnitude)
shear_moment = shearmomentgenerator.generate_numerical(self.beams[1], self.STEP_SIZE)
#Test shear
for item in shear_moment:
assert abs(item[0] - (-10)) < self.ALMOST
#Test moment
assert abs(shear_moment[0][1] - 95) < self.ALMOST
assert abs(shear_moment[int(4/self.STEP_SIZE - 1)][1] - 55 ) < self.ALMOST
assert abs(shear_moment[int(5.5/self.STEP_SIZE) - 1][1] - 0) < self.ALMOST
def test_beam2(self):
solver.solve(self.beams[2])
#Test the solution
self.assertEqual(7.5, self.beams[2].interactions[0].magnitude)
self.assertEqual(32.5, self.beams[2].interactions[3].magnitude)
shear_moment = shearmomentgenerator.generate_numerical(self.beams[2], self.STEP_SIZE)
#Test shear
assert abs(shear_moment[0][0] - 7.5) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE)][0] - (-2.5)) < self.ALMOST
assert abs(shear_moment[int(15/self.STEP_SIZE) - 1][0] - (-2.5)) < self.ALMOST
assert abs(shear_moment[int(15/self.STEP_SIZE) + 1][0] - (-22.5)) < self.ALMOST
assert abs(shear_moment[int(20/self.STEP_SIZE) - 1][0] - (-22.5)) < self.ALMOST
assert abs(shear_moment[int(20/self.STEP_SIZE) + 1][0] - (10)) < self.ALMOST
#Test moment
assert abs(shear_moment[0][1] - 0) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE)][1] - 25) < self.ALMOST
assert abs(shear_moment[int(15/self.STEP_SIZE)][1] - 12.5) < self.ALMOST
assert abs(shear_moment[int(20/self.STEP_SIZE)][1] - (-100)) < self.ALMOST
assert abs(shear_moment[int(30/self.STEP_SIZE) -1][1] - 0) < self.ALMOST
def test_beam3(self):
solver.solve(self.beams[3])
#Test the solution
self.assertEqual(-48.5, self.beams[3].interactions[0].magnitude)
self.assertEqual(18, self.beams[3].interactions[4].magnitude)
shear_moment = shearmomentgenerator.generate_numerical(self.beams[3], self.STEP_SIZE)
#Test shear
assert abs(shear_moment[0][0] - 0) < self.ALMOST
assert abs(shear_moment[int(1/self.STEP_SIZE) -1][0] - 0) < self.ALMOST
assert abs(shear_moment[int(1/self.STEP_SIZE) + 1][0] - 7) < self.ALMOST
assert abs(shear_moment[int(2/self.STEP_SIZE) -1][0] - 7) < self.ALMOST
assert abs(shear_moment[int(7/self.STEP_SIZE) +1][0] - (-18)) < self.ALMOST
assert abs(shear_moment[int(8/self.STEP_SIZE) -1][0] - (-18)) < self.ALMOST
assert abs(shear_moment[int(8/self.STEP_SIZE) +1][0] - (0)) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE) -1][0] - (0)) < self.ALMOST
#Test moment
assert abs(shear_moment[0][1] - 48.5) < self.ALMOST
assert abs(shear_moment[int(1/self.STEP_SIZE) - 1][1] - 48.5) < self.ALMOST
#Had to decrease criteria due to steep slope
assert abs(shear_moment[int(8/self.STEP_SIZE) - 1][1] - 10) < 0.02
assert abs(shear_moment[int(8/self.STEP_SIZE) +1][1] - 0) < self.ALMOST
assert abs(shear_moment[int(10/self.STEP_SIZE) -1][1] - 0) < self.ALMOST
def test_interaction_location_error(self):
with self.assertRaises(InteractionLocationError):
Force(-1, 3)
with self.assertRaises(InteractionLocationError):
self.beams[0].add_interaction(Force(13, 3))
def test_solver_error(self):
self.beams[0].add_interaction(Force(3, 0, False))
with self.assertRaises(SolverError):
solver.solve(self.beams[0])
def test_shear_moment_error(self):
with self.assertRaises(Shear_Moment_Error):
shearmomentgenerator.generate_numerical(self.beams[0], self.STEP_SIZE) | EvanMurawski/BeamAnalyzer | beamanalyzer/test/test.py | Python | mit | 6,877 | 0.009597 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
from odoo.osv import expression
class Contract(models.Model):
_name = 'hr.contract'
_description = 'Contract'
_inherit = ['mail.thread', 'mail.activity.mixin']
name = fields.Char('Contract Reference', required=True)
active = fields.Boolean(default=True)
structure_type_id = fields.Many2one('hr.payroll.structure.type', string="Salary Structure Type")
employee_id = fields.Many2one('hr.employee', string='Employee', tracking=True, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
department_id = fields.Many2one('hr.department', compute='_compute_employee_contract', store=True, readonly=False,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]", string="Department")
job_id = fields.Many2one('hr.job', compute='_compute_employee_contract', store=True, readonly=False,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]", string='Job Position')
date_start = fields.Date('Start Date', required=True, default=fields.Date.today, tracking=True,
help="Start date of the contract.")
date_end = fields.Date('End Date', tracking=True,
help="End date of the contract (if it's a fixed-term contract).")
trial_date_end = fields.Date('End of Trial Period',
help="End date of the trial period (if there is one).")
resource_calendar_id = fields.Many2one(
'resource.calendar', 'Working Schedule', compute='_compute_employee_contract', store=True, readonly=False,
default=lambda self: self.env.company.resource_calendar_id.id, copy=False, index=True,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
wage = fields.Monetary('Wage', required=True, tracking=True, help="Employee's monthly gross wage.")
notes = fields.Text('Notes')
state = fields.Selection([
('draft', 'New'),
('open', 'Running'),
('close', 'Expired'),
('cancel', 'Cancelled')
], string='Status', group_expand='_expand_states', copy=False,
tracking=True, help='Status of the contract', default='draft')
company_id = fields.Many2one('res.company', compute='_compute_employee_contract', store=True, readonly=False,
default=lambda self: self.env.company, required=True)
company_country_id = fields.Many2one('res.country', string="Company country", related='company_id.country_id', readonly=True)
"""
kanban_state:
* draft + green = "Incoming" state (will be set as Open once the contract has started)
* open + red = "Pending" state (will be set as Closed once the contract has ended)
* red = Shows a warning on the employees kanban view
"""
kanban_state = fields.Selection([
('normal', 'Grey'),
('done', 'Green'),
('blocked', 'Red')
], string='Kanban State', default='normal', tracking=True, copy=False)
currency_id = fields.Many2one(string="Currency", related='company_id.currency_id', readonly=True)
permit_no = fields.Char('Work Permit No', related="employee_id.permit_no", readonly=False)
visa_no = fields.Char('Visa No', related="employee_id.visa_no", readonly=False)
visa_expire = fields.Date('Visa Expire Date', related="employee_id.visa_expire", readonly=False)
hr_responsible_id = fields.Many2one('res.users', 'HR Responsible', tracking=True,
help='Person responsible for validating the employee\'s contracts.')
calendar_mismatch = fields.Boolean(compute='_compute_calendar_mismatch')
first_contract_date = fields.Date(related='employee_id.first_contract_date')
@api.depends('employee_id.resource_calendar_id', 'resource_calendar_id')
def _compute_calendar_mismatch(self):
for contract in self:
contract.calendar_mismatch = contract.resource_calendar_id != contract.employee_id.resource_calendar_id
def _expand_states(self, states, domain, order):
return [key for key, val in type(self).state.selection]
@api.depends('employee_id')
def _compute_employee_contract(self):
for contract in self.filtered('employee_id'):
contract.job_id = contract.employee_id.job_id
contract.department_id = contract.employee_id.department_id
contract.resource_calendar_id = contract.employee_id.resource_calendar_id
contract.company_id = contract.employee_id.company_id
@api.onchange('company_id')
def _onchange_company_id(self):
if self.company_id:
structure_types = self.env['hr.payroll.structure.type'].search([
'|',
('country_id', '=', self.company_id.country_id.id),
('country_id', '=', False)])
if structure_types:
self.structure_type_id = structure_types[0]
elif self.structure_type_id not in structure_types:
self.structure_type_id = False
@api.onchange('structure_type_id')
def _onchange_structure_type_id(self):
if self.structure_type_id.default_resource_calendar_id:
self.resource_calendar_id = self.structure_type_id.default_resource_calendar_id
@api.constrains('employee_id', 'state', 'kanban_state', 'date_start', 'date_end')
def _check_current_contract(self):
""" Two contracts in state [incoming | open | close] cannot overlap """
for contract in self.filtered(lambda c: (c.state not in ['draft', 'cancel'] or c.state == 'draft' and c.kanban_state == 'done') and c.employee_id):
domain = [
('id', '!=', contract.id),
('employee_id', '=', contract.employee_id.id),
'|',
('state', 'in', ['open', 'close']),
'&',
('state', '=', 'draft'),
('kanban_state', '=', 'done') # replaces incoming
]
if not contract.date_end:
start_domain = []
end_domain = ['|', ('date_end', '>=', contract.date_start), ('date_end', '=', False)]
else:
start_domain = [('date_start', '<=', contract.date_end)]
end_domain = ['|', ('date_end', '>', contract.date_start), ('date_end', '=', False)]
domain = expression.AND([domain, start_domain, end_domain])
if self.search_count(domain):
raise ValidationError(_('An employee can only have one contract at the same time. (Excluding Draft and Cancelled contracts)'))
@api.constrains('date_start', 'date_end')
def _check_dates(self):
if self.filtered(lambda c: c.date_end and c.date_start > c.date_end):
raise ValidationError(_('Contract start date must be earlier than contract end date.'))
@api.model
def update_state(self):
contracts = self.search([
('state', '=', 'open'), ('kanban_state', '!=', 'blocked'),
'|',
'&',
('date_end', '<=', fields.Date.to_string(date.today() + relativedelta(days=7))),
('date_end', '>=', fields.Date.to_string(date.today() + relativedelta(days=1))),
'&',
('visa_expire', '<=', fields.Date.to_string(date.today() + relativedelta(days=60))),
('visa_expire', '>=', fields.Date.to_string(date.today() + relativedelta(days=1))),
])
for contract in contracts:
contract.activity_schedule(
'mail.mail_activity_data_todo', contract.date_end,
_("The contract of %s is about to expire.", contract.employee_id.name),
user_id=contract.hr_responsible_id.id or self.env.uid)
contracts.write({'kanban_state': 'blocked'})
self.search([
('state', '=', 'open'),
'|',
('date_end', '<=', fields.Date.to_string(date.today() + relativedelta(days=1))),
('visa_expire', '<=', fields.Date.to_string(date.today() + relativedelta(days=1))),
]).write({
'state': 'close'
})
self.search([('state', '=', 'draft'), ('kanban_state', '=', 'done'), ('date_start', '<=', fields.Date.to_string(date.today())),]).write({
'state': 'open'
})
contract_ids = self.search([('date_end', '=', False), ('state', '=', 'close'), ('employee_id', '!=', False)])
# Ensure all closed contract followed by a new contract have a end date.
# If closed contract has no closed date, the work entries will be generated for an unlimited period.
for contract in contract_ids:
next_contract = self.search([
('employee_id', '=', contract.employee_id.id),
('state', 'not in', ['cancel', 'new']),
('date_start', '>', contract.date_start)
], order="date_start asc", limit=1)
if next_contract:
contract.date_end = next_contract.date_start - relativedelta(days=1)
continue
next_contract = self.search([
('employee_id', '=', contract.employee_id.id),
('date_start', '>', contract.date_start)
], order="date_start asc", limit=1)
if next_contract:
contract.date_end = next_contract.date_start - relativedelta(days=1)
return True
def _assign_open_contract(self):
for contract in self:
contract.employee_id.sudo().write({'contract_id': contract.id})
def _get_contract_wage(self):
self.ensure_one()
return self[self._get_contract_wage_field()]
def _get_contract_wage_field(self):
self.ensure_one()
return 'wage'
def write(self, vals):
res = super(Contract, self).write(vals)
if vals.get('state') == 'open':
self._assign_open_contract()
if vals.get('state') == 'close':
for contract in self.filtered(lambda c: not c.date_end):
contract.date_end = max(date.today(), contract.date_start)
calendar = vals.get('resource_calendar_id')
if calendar:
self.filtered(lambda c: c.state == 'open' or (c.state == 'draft' and c.kanban_state == 'done')).mapped('employee_id').write({'resource_calendar_id': calendar})
if 'state' in vals and 'kanban_state' not in vals:
self.write({'kanban_state': 'normal'})
return res
@api.model
def create(self, vals):
contracts = super(Contract, self).create(vals)
if vals.get('state') == 'open':
contracts._assign_open_contract()
open_contracts = contracts.filtered(lambda c: c.state == 'open' or c.state == 'draft' and c.kanban_state == 'done')
# sync contract calendar -> calendar employee
for contract in open_contracts.filtered(lambda c: c.employee_id and c.resource_calendar_id):
contract.employee_id.resource_calendar_id = contract.resource_calendar_id
return contracts
def _track_subtype(self, init_values):
self.ensure_one()
if 'state' in init_values and self.state == 'open' and 'kanban_state' in init_values and self.kanban_state == 'blocked':
return self.env.ref('hr_contract.mt_contract_pending')
elif 'state' in init_values and self.state == 'close':
return self.env.ref('hr_contract.mt_contract_close')
return super(Contract, self)._track_subtype(init_values)
| rven/odoo | addons/hr_contract/models/hr_contract.py | Python | agpl-3.0 | 11,699 | 0.005214 |
# Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import bottle
from bottle import get, post, route, static_file, view, HTTPError
import shared_state
import logging
logger = logging.getLogger(__name__)
@route('/unauth')
def login():
return HTTPError(401, 'Unauthorized')
@post('/login')
def login():
"""Authenticate users"""
username = post_get('username')
password = post_get('password')
logger.info("Authentication attempt with username: [{0}]".format(username))
if shared_state.auth.login(username, password):
return "You provided valid credentials"
else:
return HTTPError(401, 'Invalid credentials')
@route('/logout')
def logout():
shared_state.auth.logout(success_redirect='/unauth')
@route('/admin')
@view('admin_page')
def admin():
"""Only admin users can see this"""
shared_state.auth.require(role='admin', fail_redirect='/unauth')
return dict(
current_user=shared_state.auth.current_user,
users=shared_state.auth.list_users(),
roles=shared_state.auth.list_roles()
)
@post('/create_user')
def create_user():
try:
shared_state.auth.create_user(postd().username, postd().role, postd().password)
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@post('/delete_user')
def delete_user():
try:
shared_state.auth.delete_user(post_get('username'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@post('/create_role')
def create_role():
try:
shared_state.auth.create_role(post_get('role'), post_get('level'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
@post('/delete_role')
def delete_role():
try:
shared_state.auth.delete_role(post_get('role'))
return dict(ok=True, msg='')
except Exception, e:
return dict(ok=False, msg=e.message)
def postd():
return bottle.request.forms
def post_get(name, default=''):
return bottle.request.POST.get(name, default).strip() | threatstream/mnemosyne | webapi/admin.py | Python | gpl-3.0 | 2,846 | 0.001054 |
#!/usr/bin/env python
try:
from collections import OrderedDict
import json
except ImportError:
from ordereddict import OrderedDict
import simplejson as json
import itertools
import six
from csvkit import CSVKitWriter
def parse_object(obj, path=''):
"""
Recursively parse JSON objects and a dictionary of paths/keys and values.
Inspired by JSONPipe (https://github.com/dvxhouse/jsonpipe).
"""
if isinstance(obj, dict):
iterator = obj.items()
elif isinstance(obj, (list, tuple)):
iterator = enumerate(obj)
else:
return { path.strip('/'): obj }
d = {}
for key, value in iterator:
key = six.text_type(key)
d.update(parse_object(value, path + key + '/'))
return d
def ndjson2csv(f, key=None, **kwargs):
"""
Convert a JSON document into CSV format.
Supports both JSON and "Newline-delimited JSON".
The top-level element of the input must be a list or a dictionary. If it is a dictionary, a key must be provided which is an item of the dictionary which contains a list.
"""
first_line = f.readline()
first_row = json.loads(first_line, object_pairs_hook=OrderedDict)
js = itertools.chain((first_row, ), (json.loads(l, object_pairs_hook=OrderedDict) for l in f))
fields = []
flat = []
for obj in js:
flat.append(parse_object(obj))
for key in obj.keys():
if key not in fields:
fields.append(key)
o = six.StringIO()
writer = CSVKitWriter(o)
writer.writerow(fields)
for i in flat:
row = []
for field in fields:
row.append(i.get(field, None))
writer.writerow(row)
output = o.getvalue()
o.close()
return output
| unpingco/csvkit | csvkit/convert/ndjs.py | Python | mit | 1,769 | 0.005088 |
"""
API implemented using Google Cloud Endpoints on :class:`.Anno` model
.. http:get:: /anno/1.0/anno/(id)
``anno.anno.get`` - Get the details for a specific anno
:param int id: id of the anno
:returns: details of the anno :class:`.AnnoResponseMessage`
.. http:get:: /anno/1.0/anno
``anno.anno.list`` - Get list of annos
:param str cursor: resumption point in a query
:param int limit: number of annos to be returned
:param str select: fields that you want to retrieve
:param str app: name of app for which we need annos
:param str query_type: one of the :class:`.AnnoQueryType`
:param int community: id of the community for which annos to be returned,
required only when query by **COMMUNITY** of :class:`.AnnoQueryType`
:returns: a list of annos :class:`.AnnoListMessage`
.. http:post:: /anno/1.0/anno
``anno.anno.insert`` - Insert an anno
:param: :class:`.AnnoMessage`
:returns: details of the anno :class:`.AnnoResponseMessage`
.. http:post:: /anno/1.0/anno/(id)
``anno.anno.merge`` - Edit an specific anno
:param int id: id of the anno
:param: :class:`.AnnoMergeMessage`
:returns: details of the anno :class:`.AnnoResponseMessage`
"""
__author__ = 'topcircler'
import datetime
import logging
import re
import endpoints
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext.db import BadValueError
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from message.anno_api_messages import AnnoMessage
from message.anno_api_messages import AnnoMergeMessage
from message.anno_api_messages import AnnoListMessage
from message.anno_api_messages import AnnoDashboardListMessage
from message.anno_api_messages import AnnoResponseMessage
from message.anno_api_messages import UserUnreadMessage
from message.anno_api_messages import AnnoTeamNotesMetadataMessage
from message.anno_api_messages import AnnoMentionsResponseMessage
from message.user_message import UserMessage
from message.user_message import UserListMessage
from model.anno import Anno
from model.vote import Vote
from model.flag import Flag
from model.community import Community
from model.follow_up import FollowUp
from model.userannostate import UserAnnoState
from model.tags import Tag
from model.appinfo import AppInfo
from model.user import User
from helper.settings import anno_js_client_id
from helper.utils import auth_user
from helper.utils import put_search_document
from helper.utils import extract_tags_from_text
from helper.utils import parseTeamNotesForHashtags
from helper.activity_push_notifications import ActivityPushNotifications
from helper.utils_enum import AnnoQueryType, AnnoActionType
from helper.utils_enum import SearchIndexName
@endpoints.api(name='anno', version='1.0', description='Anno API',
allowed_client_ids=[endpoints.API_EXPLORER_CLIENT_ID, anno_js_client_id])
class AnnoApi(remote.Service):
"""
Class which defines Anno API v1.
"""
anno_with_id_resource_container = endpoints.ResourceContainer(
message_types.VoidMessage,
id=messages.IntegerField(2, required=True),
team_key=messages.StringField(3),
team_notes=messages.StringField(4),
tagged_users=messages.StringField(5, repeated=True)
)
anno_list_resource_container = endpoints.ResourceContainer(
message_types.VoidMessage,
cursor=messages.StringField(2),
limit=messages.IntegerField(3),
select=messages.StringField(4),
app=messages.StringField(5),
query_type=messages.StringField(6),
community=messages.IntegerField(7),
is_plugin=messages.BooleanField(8),
team_key=messages.StringField(9),
anno_id=messages.IntegerField(10)
)
anno_update_resource_container = endpoints.ResourceContainer(
AnnoMergeMessage,
id=messages.IntegerField(2, required=True),
app_name=messages.StringField(3),
community_name=messages.StringField(4),
platform_type=messages.StringField(5)
)
anno_search_resource_container = endpoints.ResourceContainer(
search_string=messages.StringField(1, required=False),
app_name=messages.StringField(2, required=False),
order_type=messages.StringField(3, required=True),
cursor=messages.StringField(4), # can't make it work, not sure why. may check it in the future.
limit=messages.IntegerField(5),
offset=messages.IntegerField(6),
only_my_apps=messages.BooleanField(7)
)
anno_user_email_resource_container =endpoints.ResourceContainer(
user_email=messages.StringField(1),
team_key=messages.StringField(2)
)
@endpoints.method(anno_with_id_resource_container, AnnoResponseMessage, path='anno/{id}',
http_method='GET', name='anno.get')
def anno_get(self, request):
"""
Exposes an API endpoint to get an anno detail by the specified id.
"""
try:
user = auth_user(self.request_state.headers)
except Exception:
user = None
if request.id is None:
raise endpoints.BadRequestException('id field is required.')
anno = Anno.get_by_id(request.id)
if anno is None:
raise endpoints.NotFoundException('No anno entity with the id "%s" exists.' % request.id)
# set anno basic properties
anno_resp_message = anno.to_response_message(user, list_message=False)
# set anno association with followups
followups = FollowUp.find_by_anno(anno)
followup_messages = [ entity.to_message(team_key=request.team_key) for entity in followups ]
anno_resp_message.followup_list = followup_messages
# set anno association with votes/flags
# if current user exists, then fetch vote/flag.
if user is not None:
anno_resp_message.is_my_vote = Vote.is_belongs_user(anno, user)
anno_resp_message.is_my_flag = Flag.is_belongs_user(anno, user)
# update last_read of UserAnnoState
UserAnnoState.update_last_read(user=user, anno=anno)
return anno_resp_message
@endpoints.method(anno_list_resource_container, AnnoListMessage, path='anno',
http_method='GET', name='anno.list')
def anno_list(self, request):
"""
Exposes an API endpoint to retrieve a list of anno.
"""
user = auth_user(self.request_state.headers)
limit = 10
if request.limit is not None:
limit = request.limit
is_plugin = request.is_plugin or False
curs = None
if request.cursor is not None:
try:
curs = Cursor(urlsafe=request.cursor)
except BadValueError:
raise endpoints.BadRequestException('Invalid cursor %s.' % request.cursor)
select_projection = None
if request.select is not None:
select_projection = request.select.split(',')
if request.query_type == AnnoQueryType.CREATED:
return Anno.query_by_app_by_created(request.app, limit, select_projection, curs, user)
elif request.query_type == AnnoQueryType.VOTE_COUNT:
return Anno.query_by_vote_count(request.app, user)
elif request.query_type == AnnoQueryType.FLAG_COUNT:
return Anno.query_by_flag_count(request.app, user)
elif request.query_type == AnnoQueryType.ACTIVITY_COUNT:
return Anno.query_by_activity_count(request.app, user)
elif request.query_type == AnnoQueryType.LAST_ACTIVITY:
return Anno.query_by_last_activity(request.app, user)
elif request.query_type == AnnoQueryType.COUNTRY:
return Anno.query_by_country(request.app, user)
elif request.query_type == AnnoQueryType.COMMUNITY:
community = Community.get_by_id(request.community)
return Anno.query_by_community(community, limit, select_projection, curs, user)
elif request.query_type == AnnoQueryType.APP:
app = AppInfo.get(request.app)
return Anno.query_by_app(app, limit, select_projection, curs, user)
else:
return Anno.query_by_page(limit, select_projection, curs, user, is_plugin)
@endpoints.method(anno_list_resource_container, AnnoDashboardListMessage, path='anno/dashboard',
http_method='GET', name='anno.dashboard.list')
def anno_dashboard_list(self, request):
user = auth_user(self.request_state.headers)
limit = 10
if request.limit is not None:
limit = request.limit
curs = None
if request.cursor is not None:
try:
curs = Cursor(urlsafe=request.cursor)
except BadValueError:
raise endpoints.BadRequestException('Invalid cursor %s.' % request.cursor)
if request.query_type == AnnoQueryType.MY_MENTIONS:
return Anno.query_by_my_mentions_for_dashboard(limit, curs, user)
elif request.query_type == AnnoQueryType.ACTIVITY_COUNT:
return Anno.query_by_count_for_dashboard(limit, curs, user, request.query_type)
elif request.query_type == AnnoQueryType.VOTE_COUNT:
return Anno.query_by_count_for_dashboard(limit, curs, user, request.query_type)
elif request.query_type == AnnoQueryType.FLAG_COUNT:
return Anno.query_by_count_for_dashboard(limit, curs, user, request.query_type)
elif request.query_type == AnnoQueryType.ARCHIVED:
return Anno.query_by_page_for_dashboard(limit, curs, user, query_by_archived=True)
elif request.anno_id:
return Anno.query_by_anno_for_dashboard(user, request.anno_id)
else:
return Anno.query_by_page_for_dashboard(limit, curs, user)
@endpoints.method(AnnoMessage, AnnoResponseMessage, path='anno',
http_method='POST', name="anno.insert")
def anno_insert(self, request):
"""
Exposes an API endpoint to insert an anno for the current user.
if current user doesn't exist, the user will be created first.
"""
user = auth_user(self.request_state.headers)
# checking if same anno exists
exist_anno = Anno.is_anno_exists(user, request)
if exist_anno is not None:
raise endpoints.BadRequestException("Duplicate anno(%s) already exists." % exist_anno.key.id())
entity = Anno.insert_anno(request, user)
# find all hashtags
tags = extract_tags_from_text(entity.anno_text.lower())
for tag, count in tags.iteritems():
# Write the cumulative amount per tag
Tag.add_tag_total(tag, total=count)
# index this document. strange exception here.
put_search_document(entity.generate_search_document(), SearchIndexName.ANNO)
# send push notifications
ActivityPushNotifications.send_push_notification(first_user=user, anno=entity, action_type=AnnoActionType.CREATED)
return entity.to_response_message(user)
@endpoints.method(anno_update_resource_container, AnnoResponseMessage, path='anno/{id}',
http_method='POST', name="anno.merge")
def anno_merge(self, request):
"""
Exposes an API endpoint to merge(update only the specified properties) an anno.
"""
user = auth_user(self.request_state.headers)
if request.id is None:
raise endpoints.BadRequestException('id field is required.')
anno = Anno.get_by_id(request.id)
if anno is None:
raise endpoints.NotFoundException('No anno entity with the id "%s" exists.' % request.id)
anno.merge_from_message(request, user)
# set last update time & activity
anno.last_update_time = datetime.datetime.now()
anno.last_activity = 'anno'
anno.put()
# update search document.
put_search_document(anno.generate_search_document(), SearchIndexName.ANNO)
# send notifications
ActivityPushNotifications.send_push_notification(first_user=user, anno=anno, action_type=AnnoActionType.EDITED)
# update last_read of UserAnnoState
from model.userannostate import UserAnnoState
UserAnnoState.update_last_read(user=user, anno=anno)
return anno.to_response_message(user)
@endpoints.method(anno_with_id_resource_container, message_types.VoidMessage, path='anno/{id}',
http_method='DELETE', name="anno.delete")
def anno_delete(self, request):
"""
Exposes an API endpoint to delete an existing anno.
"""
user = auth_user(self.request_state.headers)
if request.id is None:
raise endpoints.BadRequestException('id field is required.')
anno = Anno.get_by_id(request.id)
if anno is None:
raise endpoints.NotFoundException('No anno entity with the id "%s" exists.' % request.id)
# send notifications
ActivityPushNotifications.send_push_notification(first_user=user, anno=anno, action_type=AnnoActionType.DELETED)
Anno.delete(anno)
return message_types.VoidMessage()
@endpoints.method(anno_list_resource_container, AnnoListMessage, path='anno_my_stuff',
http_method='GET', name='anno.mystuff')
def anno_my_stuff(self, request):
"""
Exposes an API endpoint to return all my anno list.
"""
user = auth_user(self.request_state.headers)
limit = request.limit or 10
curs = None
if request.cursor is not None:
try:
curs = Cursor(urlsafe=request.cursor)
except BadValueError:
raise endpoints.BadRequestException('Invalid cursor %s.' % request.cursor)
return Anno.query_my_anno(limit, curs, user)
@endpoints.method(anno_search_resource_container, AnnoListMessage, path='anno_search', http_method='GET',
name='anno.search')
def anno_search(self, request):
"""
Exposes and API endpoint to search anno list.
"""
user = auth_user(self.request_state.headers)
if request.order_type is None:
raise endpoints.BadRequestException('order_type field is required.')
if request.order_type != 'recent' and request.order_type != 'active' and request.order_type != 'popular':
raise endpoints.BadRequestException(
'Invalid order_type field value, valid values are "recent", "active" and "popular"')
app_set = None
logging.info("only_my_apps=%s" % request.only_my_apps)
if request.only_my_apps:
app_set = set()
for anno in Anno.query_anno_by_author(user):
app_set.add(anno.app_name)
for vote in Vote.query_vote_by_author(user):
anno = Anno.get_by_id(vote.anno_key.id())
if anno is not None:
app_set.add(anno.app_name)
for flag in Flag.query_flag_by_author(user):
anno = Anno.get_by_id(flag.anno_key.id())
if anno is not None:
app_set.add(anno.app_name)
for followup in FollowUp.query_followup_by_author(user):
anno = Anno.get_by_id(followup.anno_key.id())
if anno is not None:
app_set.add(anno.app_name)
if request.order_type == 'popular':
return Anno.query_by_popular(request.limit, request.offset,
request.search_string, request.app_name, app_set, user)
elif request.order_type == 'active':
return Anno.query_by_active(request.limit, request.offset, request.search_string, request.app_name, app_set, user)
else:
return Anno.query_by_recent(request.limit, request.offset, request.search_string, request.app_name, app_set, user)
@endpoints.method(anno_user_email_resource_container, UserUnreadMessage,
path="user/unread", http_method="GET", name="user.unread")
def get_unread_count(self, request):
return UserUnreadMessage(unread_count=UserAnnoState.get_unread_count(request))
@endpoints.method(anno_with_id_resource_container, UserListMessage,
path="anno/users/{id}", http_method="GET", name="anno.anno.users")
def getEngagedUsers(self, request):
user = auth_user(self.request_state.headers)
return UserListMessage(user_list=Anno.getEngagedUsers(anno_id=request.id, auth_user=user))
@endpoints.method(anno_with_id_resource_container, AnnoTeamNotesMetadataMessage, path='anno/teamnotes',
http_method='POST', name='anno.teamnotes.insert')
def anno_teamnotes_insert(self, request):
anno = Anno.get_by_id(request.id)
user = auth_user(self.request_state.headers)
if anno:
anno.team_notes = request.team_notes
UserAnnoState.tag_users(anno, anno.tagged_users, request.tagged_users)
anno.tagged_users = request.tagged_users
anno.put()
mentions = []
for tagged_user in request.tagged_users:
user_info = User.get_by_id(int(tagged_user))
is_auth_user = user_info.user_email == user.user_email
mentions.append(AnnoMentionsResponseMessage(id=user_info.key.id(),
display_name=user_info.display_name,
user_email=user_info.user_email,
image_url=user_info.image_url,
is_auth_user=is_auth_user))
return AnnoTeamNotesMetadataMessage(tags=parseTeamNotesForHashtags(request.team_notes),
mentions=mentions)
@endpoints.method(anno_with_id_resource_container, message_types.VoidMessage, path='anno/archive',
http_method='POST', name='anno.archive')
def anno_archive(self, request):
Anno.archive(request.id)
return message_types.VoidMessage()
| usersource/anno | anno_gec_server/api/anno_api.py | Python | mpl-2.0 | 18,371 | 0.003212 |
"""Dose GUI for TDD: colored terminal."""
from __future__ import print_function
import os, sys, subprocess, signal, colorama
from .misc import attr_item_call_auto_cache
DEFAULT_TERMINAL_WIDTH = 80
class TerminalSize(object):
r"""
Console/terminal width information getter.
There should be only one instance for this class, and it's the
``terminal_size`` object in this module, whose ``width``
attribute has the desired terminal width. The ``usable_width``
read-only property has the width that can be safely used with a
``"\n"`` at the end without skipping a line.
The ``retrieve_width`` method can be called to update the ``width``
attribute, but there's also a SIGWINCH (SIGnal: WINdow CHanged)
signal handler updating the width if that's a valid signal in the
operating system.
Several strategies for getting the terminal width are combined
in this class, all of them are tried until a width is found. When
a strategy returns ``0`` or ``None``, it means it wasn't able to
collect the console width.
Note: The ``terminal_size`` object should have been created in the
main thread of execution.
"""
width = DEFAULT_TERMINAL_WIDTH # Fallback
# Strategies are (method name without the "from_" prefix, arguments list)
if sys.platform == "win32":
strategies = [
("windows_handle", [subprocess.STD_INPUT_HANDLE]),
("windows_handle", [subprocess.STD_OUTPUT_HANDLE]),
("windows_handle", [subprocess.STD_ERROR_HANDLE]),
]
@property
def usable_width(self):
return self.width - 1
else: # Linux, OS X and Cygwin
strategies = [
("io_control", [sys.stdin]),
("io_control", [sys.stdout]),
("io_control", [sys.stderr]),
("tty_io_control", []),
]
@property
def usable_width(self):
return self.width
strategies.extend([
("tput_subprocess", []), # Cygwin "tput" works on other Windows consoles
("environment_variable", []),
])
def __init__(self):
try:
signal.signal(signal.SIGWINCH, self.retrieve_width)
except (AttributeError, ValueError): # There's no SIGWINCH in Windows
pass
self.retrieve_width()
def retrieve_width(self, signum=None, frame=None):
"""
Stores the terminal width into ``self.width``, if possible.
This function is also the SIGWINCH event handler.
"""
for method_name, args in self.strategies:
method = getattr(self, "from_" + method_name)
width = method(*args)
if width and width > 0:
self.width = width
break # Found!
os.environ["COLUMNS"] = str(self.width) # A hint for the next test job
@staticmethod
def from_environment_variable():
"""Gets the width from the ``COLUMNS`` environment variable."""
return int(os.environ.get("COLUMNS", "0"))
@staticmethod
def from_io_control(fobj):
"""
Call TIOCGWINSZ (Terminal I/O Control to Get the WINdow SiZe)
where ``fobj`` is a file object (e.g. ``sys.stdout``),
returning the terminal width assigned to that file.
See the ``ioctl``, ``ioctl_list`` and tty_ioctl`` man pages
for more information.
"""
import fcntl, termios, array
winsize = array.array("H", [0] * 4) # row, col, xpixel, ypixel
if not fcntl.ioctl(fobj.fileno(), termios.TIOCGWINSZ, winsize, True):
return winsize[1]
@classmethod
def from_tty_io_control(cls):
"""Calls cls.from_io_control for the tty file descriptor."""
with open(os.ctermid(), "rb") as fobj:
return cls.from_io_control(fobj)
@staticmethod
def from_tput_subprocess():
"""
Gets the terminal width from the ``tput`` shell command,
usually available in Linux, OS X and Cygwin (Windows).
"""
try:
# Windows require shell=True to avoid the tput extension
return int(subprocess.check_output("tput cols", shell=True))
except (OSError, # tput not found
subprocess.CalledProcessError): # tput didn't return 0
return 0
@staticmethod
def from_windows_handle(std_handle):
"""
Use the Windows Console Handles API to get the console width,
where ``std_handle`` is the WINAPI ``GetStdHandle`` input
(e.g. STD_INPUT_HANDLE).
https://msdn.microsoft.com/library/windows/desktop/ms682075
"""
from ctypes import windll, c_ushort
# https://msdn.microsoft.com/library/windows/desktop/ms683231
handle = windll.kernel32.GetStdHandle(std_handle)
# https://msdn.microsoft.com/library/windows/desktop/ms682093
info = (c_ushort * 11)() # It's a CONSOLE_SCREEN_BUFFER_INFO:
# xsize, ysize, | COORD dwSize
# xcursor, ycursor, | COORD dwCursorPosition
# attributes, | WORD wAttributes
# left, top, right, bottom, | SMALL_RECT srWindow
# xmax, ymax | COORD dwMaximumWindowSize
# https://msdn.microsoft.com/library/windows/desktop/ms683171
if windll.kernel32.GetConsoleScreenBufferInfo(handle, info):
return info[7] - info[5] + 1
terminal_size = TerminalSize()
@attr_item_call_auto_cache
def fg(color):
"""
Foreground color formatter function factory.
Each function casts from a unicode string to a colored bytestring
with the respective foreground color and foreground reset ANSI
escape codes. You can also use the ``fg.color`` or ``fg[color]``
directly as attributes/items.
The colors are the names of the ``colorama.Fore`` attributes
(case insensitive). For more information, see:
https://pypi.python.org/pypi/colorama
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
"""
ansi_code = [getattr(colorama.Fore, color.upper()), colorama.Fore.RESET]
return lambda msg: msg.join(ansi_code)
@attr_item_call_auto_cache
def log(color):
"""
Function factory for foreground-colored loggers (printers).
The ``log.color(msg)`` and ``print(fg.color(msg))`` are the
same. On Windows, the ANSI escape codes for colors are mapped to
``SetConsoleTextAttribute`` Windows Console Handles API function
calls by the ``colorama`` package.
https://msdn.microsoft.com/library/windows/desktop/ms686047
The colorama initialization is on the ``dose.__main__`` module.
See ``fg`` for more information.
"""
foreground = fg(color)
return lambda msg: print(foreground(msg))
@attr_item_call_auto_cache
def hr(color):
"""
Colored horizontal rule printer/logger factory.
The resulting function prints an entire terminal row with the given
symbol repeated. It's a terminal version of the HTML ``<hr/>``.
"""
logger = log(color)
return lambda symbol: logger(symbol * terminal_size.usable_width)
def centralize(msg):
"""Add spaces to centralize the string in the terminal."""
return msg.center(terminal_size.usable_width)
@attr_item_call_auto_cache
def clog(color):
"""Same to ``log``, but this one centralizes the message first."""
logger = log(color)
return lambda msg: logger(centralize(msg).rstrip())
| danilobellini/dose | dose/terminal.py | Python | gpl-3.0 | 7,486 | 0.002404 |
# -*- coding: utf-8 -*-
########################################################################
# #
# python-OBD: A python OBD-II serial module derived from pyobd #
# #
# Copyright 2004 Donour Sizemore (donour@uchicago.edu) #
# Copyright 2009 Secons Ltd. (www.obdtester.com) #
# Copyright 2009 Peter J. Creath #
# Copyright 2015 Brendan Whitfield (bcw7044@rit.edu) #
# #
########################################################################
# #
# utils.py #
# #
# This file is part of python-OBD (a derivative of pyOBD) #
# #
# python-OBD is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# python-OBD is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with python-OBD. If not, see <http://www.gnu.org/licenses/>. #
# #
########################################################################
import errno
import glob
import logging
import string
import sys
import serial
logger = logging.getLogger(__name__)
class OBDStatus:
""" Values for the connection status flags """
NOT_CONNECTED = "Not Connected"
ELM_CONNECTED = "ELM Connected"
OBD_CONNECTED = "OBD Connected"
CAR_CONNECTED = "Car Connected"
class BitArray:
"""
Class for representing bitarrays (inefficiently)
There's a nice C-optimized lib for this: https://github.com/ilanschnell/bitarray
but python-OBD doesn't use it enough to be worth adding the dependency.
But, if this class starts getting used too much, we should switch to that lib.
"""
def __init__(self, _bytearray):
self.bits = ""
for b in _bytearray:
v = bin(b)[2:]
self.bits += ("0" * (8 - len(v))) + v # pad it with zeros
def __getitem__(self, key):
if isinstance(key, int):
if key >= 0 and key < len(self.bits):
return self.bits[key] == "1"
else:
return False
elif isinstance(key, slice):
bits = self.bits[key]
if bits:
return [b == "1" for b in bits]
else:
return []
def num_set(self):
return self.bits.count("1")
def num_cleared(self):
return self.bits.count("0")
def value(self, start, stop):
bits = self.bits[start:stop]
if bits:
return int(bits, 2)
else:
return 0
def __len__(self):
return len(self.bits)
def __str__(self):
return self.bits
def __iter__(self):
return [b == "1" for b in self.bits].__iter__()
def bytes_to_int(bs):
""" converts a big-endian byte array into a single integer """
v = 0
p = 0
for b in reversed(bs):
v += b * (2 ** p)
p += 8
return v
def bytes_to_hex(bs):
h = ""
for b in bs:
bh = hex(b)[2:]
h += ("0" * (2 - len(bh))) + bh
return h
def twos_comp(val, num_bits):
"""compute the 2's compliment of int value val"""
if ((val & (1 << (num_bits - 1))) != 0):
val = val - (1 << num_bits)
return val
def isHex(_hex):
return all([c in string.hexdigits for c in _hex])
def contiguous(l, start, end):
""" checks that a list of integers are consequtive """
if not l:
return False
if l[0] != start:
return False
if l[-1] != end:
return False
# for consequtiveness, look at the integers in pairs
pairs = zip(l, l[1:])
if not all([p[0] + 1 == p[1] for p in pairs]):
return False
return True
def try_port(portStr):
"""returns boolean for port availability"""
try:
s = serial.Serial(portStr)
s.close() # explicit close 'cause of delayed GC in java
return True
except serial.SerialException:
pass
except OSError as e:
if e.errno != errno.ENOENT: # permit "no such file or directory" errors
raise e
return False
def scan_serial():
"""scan for available ports. return a list of serial names"""
available = []
possible_ports = []
if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
possible_ports += glob.glob("/dev/rfcomm[0-9]*")
possible_ports += glob.glob("/dev/ttyUSB[0-9]*")
elif sys.platform.startswith('win'):
possible_ports += ["\\.\COM%d" % i for i in range(256)]
elif sys.platform.startswith('darwin'):
exclude = [
'/dev/tty.Bluetooth-Incoming-Port',
'/dev/tty.Bluetooth-Modem'
]
possible_ports += [port for port in glob.glob('/dev/tty.*') if port not in exclude]
# possible_ports += glob.glob('/dev/pts/[0-9]*') # for obdsim
for port in possible_ports:
if try_port(port):
available.append(port)
return available
| brendan-w/python-OBD | obd/utils.py | Python | gpl-2.0 | 6,075 | 0.000988 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2013 Zuza Software Foundation
# Copyright 2013 Evernote Corporation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from django.conf.urls import patterns, url
from .views import (AdminTemplateView, PageCreateView, PageDeleteView,
PageUpdateView)
urlpatterns = patterns('',
url(r'^legal/agreement/$',
'staticpages.views.legal_agreement',
name='pootle-staticpages-legal-agreement'),
url(r'^(?P<virtual_path>.+)/$',
'staticpages.views.display_page',
name='pootle-staticpages-display'),
)
admin_patterns = patterns('',
url(r'^$',
AdminTemplateView.as_view(),
name='pootle-staticpages'),
url(r'^(?P<page_type>[^/]+)/add/?$',
PageCreateView.as_view(),
name='pootle-staticpages-create'),
url(r'^(?P<page_type>[^/]+)/(?P<pk>\d+)/?$',
PageUpdateView.as_view(),
name='pootle-staticpages-edit'),
url(r'^(?P<page_type>[^/]+)/(?P<pk>\d+)/delete/?$',
PageDeleteView.as_view(),
name='pootle-staticpages-delete'),
)
| evernote/pootle | pootle/apps/staticpages/urls.py | Python | gpl-2.0 | 1,786 | 0.00112 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-03 23:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20160403_2058'),
]
operations = [
migrations.AlterField(
model_name='usercompanyinfo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| nborkowska/kpir2 | kpir2/users/migrations/0004_auto_20160403_2348.py | Python | gpl-3.0 | 581 | 0.001721 |
"""Test class for ISO downloads UI
:Requirement: Isodownload
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.decorators import run_only_on, stubbed, tier1
from robottelo.test import UITestCase
class ISODownloadTestCase(UITestCase):
"""Test class for iso download feature"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_download(self):
"""Downloading ISO from export
:id: 47f20df7-f6f3-422b-b57b-3a5ef9cf62ad
:Steps:
1. find out the location where all iso's are kept
2. check whether a valid iso can be downloaded
:expectedresults: iso file is properly downloaded on your satellite
6 system
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_upload(self):
"""Uploadng the iso successfully to the sat6 system
:id: daf87a68-7c61-46f1-b4cc-021476080b6b
:Steps:
1. download the iso
2. upload it to sat6 system
:expectedresults: uploading iso to satellite6 is successful
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_mount(self):
"""Mounting iso to directory accessible to satellite6 works
:id: 44d3c8fa-c01f-438c-b83e-8f6894befbbf
:Steps:
1. download the iso
2. upload it to sat6 system
3. mount it a local sat6 directory
:expectedresults: iso is mounted to sat6 local directory
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_validate_cdn_url(self):
"""Validate that cdn url to file path works
:id: 00157f61-1557-48a7-b7c9-6dac726eff94
:Steps:
1. after mounting the iso locally try to update the cdn url
2. the path should be validated
:expectedresults: cdn url path is validated
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_check_message(self):
"""Check if proper message is displayed after successful upload
:id: 5ed31a26-b902-4352-900f-bb38eac95511
:Steps:
1. mount the iso to sat6
2. update the cdn url with file path
3. check if proper message is displayed
:expectedresults: Asserting the message after successful upload
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_enable_repo(self):
"""Enable the repositories
:id: e33e2796-0554-419f-b5a1-3e2c8e23e950
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. try to enable redhat repositories
:expectedresults: Redhat repositories are enabled
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_validate_checkboxes(self):
"""Check if enabling the checkbox works
:id: 10b19405-f82e-4f95-869d-28d91cac1e6f
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. Click the checkbox to enable redhat repositories
5. redhat repository enabled
:expectedresults: Checkbox functionality works
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_sync_repos(self):
"""Sync repos to local iso's
:id: 96266438-4a52-4222-b573-96bd7cde1700
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. try to enable redhat repositories
5. sync the repos
:expectedresults: Repos are synced after upload
:caseautomation: notautomated
:CaseImportance: Critical
"""
@stubbed()
@run_only_on('sat')
@tier1
def test_positive_disable_repo(self):
"""Disabling the repo works
:id: 075700a7-fda0-41db-b9b7-3d6b29f63784
:Steps:
1. mount iso to directory
2. update cdn url
3. upload manifest
4. try to enable redhat repositories
5. sync the contents
6. try disabling the repository
:expectedresults: Assert disabling the repo
:caseautomation: notautomated
:CaseImportance: Critical
"""
| elyezer/robottelo | tests/foreman/ui/test_isodownload.py | Python | gpl-3.0 | 4,978 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__ = 'MIT'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
import sys
import os
import codecs
# Spell checker support
try:
import enchant
except ImportError:
enchant = None
# Syntax highlight support
try:
from highlight.SyntaxHighlighter import srchiliteqt
except ImportError:
srchiliteqt = None
import numberbar
from PyQt4.Qt import QAction
from PyQt4.Qt import QApplication
from PyQt4.Qt import QEvent
from PyQt4.Qt import QMenu
from PyQt4.Qt import QMouseEvent
from PyQt4.Qt import QTextEdit
from PyQt4.Qt import QSyntaxHighlighter
from PyQt4.Qt import QTextCharFormat
from PyQt4.Qt import QTextCursor
from PyQt4.Qt import Qt
from PyQt4.Qt import QColor
from PyQt4.Qt import QPalette
from PyQt4.QtCore import pyqtSignal
from PyQt4 import QtGui, QtCore
from widgets import SearchWidget
from widgets import SearchReplaceWidget
from widgets import GotoLineWidget
class Editor(QTextEdit):
'''A QTextEdit-based editor that supports syntax highlighting and
spellchecking out of the box'''
langChanged = QtCore.pyqtSignal(QtCore.QString)
def __init__(self, *args):
QTextEdit.__init__(self, *args)
self.lastFolder = QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.DocumentsLocation)
self.docName = None
self.initDict()
def gotoLineWidget(self):
return GotoLineWidget(self)
def searchWidget(self):
'''Creates a search widget hooked to this editor (parent is None)'''
return SearchWidget(self)
def searchReplaceWidget(self):
'''Creates a search/replace widget hooked to this editor (parent is None)'''
return SearchReplaceWidget(self)
def initDict(self, lang=None):
if enchant:
if lang==None:
# Default dictionary based on the current locale.
try:
self.dict = enchant.Dict()
except enchant.DictNotFoundError:
self.dict=None
else:
self.dict = enchant.Dict(lang)
else:
self.dict=None
self.highlighter = SpellHighlighter(self.document())
if self.dict:
self.highlighter.setDict(self.dict)
self.highlighter.rehighlight()
def killDict(self):
print 'Disabling spellchecker'
self.highlighter.setDocument(None)
self.dict=None
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
# Rewrite the mouse event to a left button event so the cursor is
# moved to the location of the pointer.
event = QMouseEvent(QEvent.MouseButtonPress, event.pos(),
Qt.LeftButton, Qt.LeftButton, Qt.NoModifier)
QTextEdit.mousePressEvent(self, event)
def contextMenuEvent(self, event):
popup_menu = self.createStandardContextMenu()
pal=QApplication.instance().palette()
# This fixes Issue 20
menu_style=""" * { background-color: %s;
color: %s;}
"""%(unicode(pal.color(QPalette.Button).name()),
unicode(pal.color(QPalette.WindowText).name()))
popup_menu.setStyleSheet(menu_style)
# Select the word under the cursor.
cursor = self.textCursor()
cursor.select(QTextCursor.WordUnderCursor)
self.setTextCursor(cursor)
# Check if the selected word is misspelled and offer spelling
# suggestions if it is.
if enchant and self.dict:
if self.textCursor().hasSelection():
text = unicode(self.textCursor().selectedText())
if not self.dict.check(text):
spell_menu = QMenu(QtCore.QCoreApplication.translate('app','Spelling Suggestions'), self)
spell_menu.setStyleSheet(menu_style)
for word in self.dict.suggest(text):
action = SpellAction(word, spell_menu)
action.correct.connect(self.correctWord)
spell_menu.addAction(action)
# Only add the spelling suggests to the menu if there are
# suggestions.
if len(spell_menu.actions()) != 0:
popup_menu.insertSeparator(popup_menu.actions()[0])
popup_menu.insertMenu(popup_menu.actions()[0], spell_menu)
# FIXME: add change dict and disable spellcheck options
popup_menu.exec_(event.globalPos())
def correctWord(self, word):
'''
Replaces the selected text with word.
'''
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(word)
cursor.endEditBlock()
def save(self):
if not self.docName:
self.saveas()
else:
try:
f = QtCore.QFile(self.docName)
if not f.open(QtCore.QIODevice.WriteOnly | QtCore.QIODevice.Truncate):
QtGui.QMessageBox.information(self.parent(), "Error - Marave",
"Error saving %s."%self.docName)
else:
stream = QtCore.QTextStream(f)
encoded = stream.codec().fromUnicode(self.toPlainText())
f.write(encoded)
f.flush()
f.close()
#f=codecs.open(self.docName,'w+','utf-8')
#f.truncate()
#f.write(unicode(self.toPlainText()))
#f.close()
self.document().setModified(False)
# FIXME: doesn't belong in this class
try:
self.parent().notify(self.tr('Document saved'))
except:
pass
except Exception, e:
QtGui.QMessageBox.information(self.parent(), "Error - Marave",
"Error saving %s."%self.docName)
def saveas(self):
QtCore.QCoreApplication.instance().setOverrideCursor(QtCore.Qt.ArrowCursor)
fdialog = QtGui.QFileDialog(self.parent(), self.tr("Save as"), self.lastFolder)
fdialog.setFileMode(fdialog.AnyFile)
fdialog.setAcceptMode(fdialog.AcceptSave)
fname = None
if fdialog.exec_():
fname = unicode(fdialog.selectedFiles()[0])
print 'FNAME:', fname
#fname=unicode(QtGui.QFileDialog.getSaveFileName(self.parent(), self.tr("Save as"), self.lastFolder))
QtCore.QCoreApplication.instance().restoreOverrideCursor()
if fname:
self.docName=fname
self.save()
def new(self):
QtCore.QCoreApplication.instance().setOverrideCursor(QtCore.Qt.ArrowCursor)
try:
if self.document().isModified():
r=QtGui.QMessageBox.question(self.parent(), self.tr("New Document"), self.tr("The document \"%s\" has been modified."\
"\nDo you want to save your changes or discard them?")%self.docName or "UNNAMED",
QtGui.QMessageBox.Save|QtGui.QMessageBox.Discard|QtGui.QMessageBox.Cancel,QtGui.QMessageBox.Cancel)
if r==QtGui.QMessageBox.Save:
self.save()
elif r==QtGui.QMessageBox.Discard:
self.docName=''
self.setPlainText('')
self.parent().setWindowFilePath('Untitled')
else:
self.docName=''
self.setPlainText('')
self.parent().setWindowFilePath('Untitled')
except:
pass
QtCore.QCoreApplication.instance().restoreOverrideCursor()
def open(self, fname=None):
self.new()
if self.docName:
return
if not fname:
QtCore.QCoreApplication.instance().setOverrideCursor(QtCore.Qt.ArrowCursor)
fdialog = QtGui.QFileDialog(self.parent(), self.tr("Open File"), self.lastFolder)
fdialog.setFileMode(fdialog.AnyFile)
fname = None
if fdialog.exec_():
fname = unicode(fdialog.selectedFiles()[0])
print 'FNAME:', fname
#fname=unicode(QtGui.QFileDialog.getOpenFileName(self.parent(),
#self.tr("Open file"), self.lastFolder))
QtCore.QCoreApplication.instance().restoreOverrideCursor()
if fname:
self.docName=fname
self.lastFolder = os.path.dirname(fname)
if os.path.exists(fname):
if os.path.isfile(fname):
# If spell checking is disabled, use syntax highlighter
if not self.dict and srchiliteqt:
self.highlighter.setDocument(None)
self.highlighter=srchiliteqt.Qt4SyntaxHighlighter(self.document())
self.highlighter.setDefaultToMonosapce(False)
langName=self.highlighter.getLangDefFileFromFileName(fname)
if langName:
self.langChanged.emit(langName)
self.highlighter.init(langName)
else: # Can't figure the language
self.highlighter.setDocument(None)
# Open with QTextStream, which should use the right locale
# and try to autodetect encoding
f = QtCore.QFile(fname)
if not f.open(QtCore.QIODevice.ReadOnly | QtCore.QIODevice.Text):
# Error opening file
# FIXME: report error
QtGui.QMessageBox.information(self.parent(), "Error - Marave",
"Can't open %s."%fname)
else:
stream = QtCore.QTextStream(f)
text = unicode(stream.readAll())
self.setPlainText(text)
else:
QtGui.QMessageBox.information(self.parent(), "Error - Marave",
"%s is not a file."%fname)
# FIXME: this doesn't belong in this class
try:
self.parent().setWindowFilePath(self.docName)
except:
pass
def setHL(self, lang, style):
"""Disable spellchecking and enable syntax highlighting"""
if isinstance(self.highlighter, SpellHighlighter):
self.killDict()
self.highlighter=srchiliteqt.Qt4SyntaxHighlighter(self.document())
self.highlighter.setDefaultToMonosapce(False)
self.highlighter.setDocument(self.document())
self.highlighter.init(lang)
self.highlighter.setFormattingStyle(style)
t=self.document().toPlainText()
self.setPlainText(t)
def smaller(self):
f=self.font()
f.setPointSize(f.pointSize()-1)
self.setFont(f)
# FIXME: this doesn't belong in this class
try:
self.parent().settings.setValue('fontsize',self.font().pointSize())
self.parent().settings.sync()
except:
pass
def larger(self):
f=self.font()
f.setPointSize(f.pointSize()+1)
self.setFont(f)
# FIXME: this doesn't belong in this class
try:
self.parent().settings.setValue('fontsize',self.font().pointSize())
self.parent().settings.sync()
except:
pass
def default(self):
f=self.font()
f.setPointSize(self.defSize)
self.setFont(f)
# FIXME: this doesn't belong in this class
try:
self.parent().settings.setValue('fontsize',self.font().pointSize())
self.parent().settings.sync()
except:
pass
def mouseMoveEvent(self, ev):
# FIXME: this doesn't belong in this class
try:
self.parent().showButtons()
self.parent().showCursor()
except:
pass
return QtGui.QTextEdit.mouseMoveEvent(self, ev)
class SpellHighlighter(QSyntaxHighlighter):
WORDS = u'(?iu)[\w\']+'
def __init__(self, *args):
QSyntaxHighlighter.__init__(self, *args)
self.dict = None
def setDict(self, dict):
self.dict = dict
def highlightBlock(self, text):
if not self.dict:
return
text = unicode(text)
format = QTextCharFormat()
#format.setUnderlineColor(Qt.red)
format.setUnderlineStyle(QTextCharFormat.DotLine)
for word_object in re.finditer(self.WORDS, text):
if not self.dict.check(word_object.group()):
self.setFormat(word_object.start(),
word_object.end() - word_object.start(), format)
class SpellAction(QAction):
'''
A special QAction that returns the text in a signal.
'''
correct = pyqtSignal(unicode)
def __init__(self, *args):
QAction.__init__(self, *args)
self.triggered.connect(lambda x: self.correct.emit(
unicode(self.text())))
def main(args=sys.argv):
app = QApplication(args)
container = QtGui.QFrame()
layout=QtGui.QHBoxLayout()
editor = Editor()
nb=numberbar.NumberBar(edit=editor)
layout.setMargin(0)
layout.setSpacing(0)
layout.addWidget(nb)
layout.addWidget(editor)
container.setLayout(layout)
container.show()
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
| thegooglecodearchive/marave | marave/editor/spelltextedit.py | Python | gpl-2.0 | 13,856 | 0.009454 |
# Module containing non-deprecated functions borrowed from Numeric.
__docformat__ = "restructuredtext en"
# functions that are now methods
__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
'searchsorted', 'alen',
'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
'amax', 'amin',
]
import multiarray as mu
import umath as um
import numerictypes as nt
from numeric import asarray, array, asanyarray, concatenate
_dt_ = nt.sctype2char
import types
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = types.NoneType
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj),method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
"""
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) order or FORTRAN (column-major) order.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modiying the
# initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : {int, array of ints}
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
If `a` is an ndarray, then a view of `a` is returned; otherwise
a new array is created.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
rollaxis
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy()
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None):
"""
Indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis)
return argmax(axis)
def argmin(a, axis=None):
"""
Return the indices of the minimum values along an axis.
See Also
--------
argmax : Similar function. Please refer to `numpy.argmax` for detailed
documentation.
"""
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis)
return argmin(axis)
def searchsorted(a, v, side='left'):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the corresponding
elements in `v` were inserted before the indices, the order of `a` would
be preserved.
Parameters
----------
a : 1-D array_like
Input array, sorted in ascending order.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given. If
'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side)
return searchsorted(v, side)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new array
is filled with repeated copied of `a`. Note that this behavior is different
from a.resize(new_shape) which fills with zeros instead of repeated
copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : {tuple, int}
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The data
are repeated in the order that the data are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na: return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
Returns
-------
squeezed : ndarray
The input array, but with with all dimensions of length 1
removed. Whenever possible, a view on `a` is returned.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form `a[i,i+offset]`.
If `a` has more than two dimensions, then the axes specified
by `axis1` and `axis2` are used to determine the 2-D subarray
whose diagonal is returned. The shape of the resulting array
can be determined by removing `axis1` and `axis2` and appending
an index to the right equal to the size of the resulting diagonals.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, a 1-D array containing the diagonal is
returned. If `a` has larger dimensions, then an array of
diagonals is returned.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>> a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0,-2,-1)
array([[0, 3],
[4, 7]])
"""
return asarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
return asarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
"""
Return a flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F'}, optional
The elements of `a` are read in this order. It can be either
'C' for row-major order, or `F` for column-major order.
By default, row-major order is used.
Returns
-------
1d_array : ndarray
Output of the same dtype as `a`, and of shape ``(a.size(),)``.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
Notes
-----
In row-major order, the row index varies the slowest, and the column
index the quickest. This can be generalized to multiple dimensions,
where row-major order implies that the index along the first axis
varies slowest, and the index along the last quickest. The opposite holds
for Fortran-, or column-major, mode.
Examples
--------
If an array is in C-order (default), then `ravel` is equivalent
to ``reshape(-1)``:
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print x.reshape(-1)
[1 2 3 4 5 6]
>>> print np.ravel(x)
[1 2 3 4 5 6]
When flattening using Fortran-order, however, we see
>>> print np.ravel(x, order='F')
[1 4 2 5 3 6]
"""
return asarray(a).ravel(order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`, containing
the indices of the non-zero elements in that dimension. The
corresponding non-zero values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method.
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : integer, optional
Axis over which the sum is taken. By default `axis` is None,
and all elements are summed.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
try:
sum = a.sum
except AttributeError:
return _wrapit(a, 'sum', axis, dtype, out)
return sum(axis, dtype, out)
def product (a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def sometrue(a, axis=None, out=None):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def alltrue (a, axis=None, out=None):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def any(a,axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical OR is performed.
The default (`axis` = `None`) is to perform a logical OR
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
any : bool, ndarray
A new boolean or `ndarray` is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all array elements along a given axis evaluate
to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def all(a,axis=None, out=None):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical AND is performed.
The default (`axis` = `None`) is to perform a logical AND
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved. See `doc.ufuncs` (Section "Output
arguments") for more details.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any array element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def cumsum (a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amax : ndarray
A new array or a scalar array with the result.
See Also
--------
nanmax : nan values are ignored instead of being propagated
fmax : same behavior as the C99 fmax function
argmax : Indices of the maximum values.
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding max value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmax.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a)
3
>>> np.amax(a, axis=0)
array([2, 3])
>>> np.amax(a, axis=1)
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
try:
amax = a.max
except AttributeError:
return _wrapit(a, 'max', axis, out)
return amax(axis, out)
def amin(a, axis=None, out=None):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default a flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amin : ndarray
A new array or a scalar array with the result.
See Also
--------
nanmin: nan values are ignored instead of being propagated
fmin: same behavior as the C99 fmin function
argmin: Return the indices of the minimum values.
amax, nanmax, fmax
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding min value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmin.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
try:
amin = a.min
except AttributeError:
return _wrapit(a, 'min', axis, out)
return amin(axis, out)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
l : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a,ndmin=1))
def prod(a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis over which the product is taken. By default, the product
of all elements is calculated.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default the
input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If dtype is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a precision
less than that of the default platform integer. In that case, the
default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows of)
`a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns of)
`a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, Numpy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken
over the flattened array by default, otherwise over the specified
axis. float64 intermediate and return values are used for integer
inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary. See `doc.ufuncs` for details.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.546875
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
try:
mean = a.mean
except AttributeError:
return _wrapit(a, 'mean', axis, dtype, out)
return mean(axis, dtype, out)
def std(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as ``x.sum() / N``, where
``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof``
is used instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables. The standard deviation computed in this function
is the square root of the estimated variance, so even with ``ddof=1``, it
will not be an unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.std(a)
0.45172946707416706
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925552653
"""
try:
std = a.std
except AttributeError:
return _wrapit(a, 'std', axis, dtype, out, ddof)
return std(axis, dtype, out, ddof)
def var(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
Returns
-------
variance : ndarray, see dtype parameter above
If out=None, returns a new array containing the variance; otherwise
a reference to the output array is returned.
See Also
--------
std : Standard deviation
mean : Average
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> np.var(a)
1.25
>>> np.var(a,0)
array([ 1., 1.])
>>> np.var(a,1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.var(a)
0.20405951142311096
Computing the standard deviation in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932997387
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.20250000000000001
"""
try:
var = a.var
except AttributeError:
return _wrapit(a, 'var', axis, dtype, out, ddof)
return var(axis, dtype, out, ddof)
| Ademan/NumPy-GSoC | numpy/core/fromnumeric.py | Python | bsd-3-clause | 71,769 | 0.000195 |
import logging
import time
import gevent
import msgpack
import zmq.green as zmq
from lymph.core.components import Component
from lymph.utils.sockets import bind_zmq_socket
logger = logging.getLogger(__name__)
class MonitorPusher(Component):
def __init__(self, container, aggregator, endpoint='127.0.0.1', interval=2):
super(MonitorPusher, self).__init__()
self.container = container
self.interval = interval
ctx = zmq.Context.instance()
self.socket = ctx.socket(zmq.PUB)
self.endpoint, port = bind_zmq_socket(self.socket, endpoint)
logger.info('binding monitoring endpoint %s', self.endpoint)
self.aggregator = aggregator
def on_start(self):
self.loop_greenlet = self.container.spawn(self.loop)
def on_stop(self, **kwargs):
self.loop_greenlet.kill()
def loop(self):
last_stats = time.monotonic()
while True:
gevent.sleep(self.interval)
dt = time.monotonic() - last_stats
series = list(self.aggregator)
stats = {
'time': time.time(),
'series': series,
}
last_stats += dt
self.socket.send_multipart([b'stats', msgpack.dumps(stats)])
| vpikulik/lymph | lymph/core/monitoring/pusher.py | Python | apache-2.0 | 1,268 | 0.000789 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# cloudstack_pluginlib for openvswitch on KVM hypervisor
import ConfigParser
import logging
import os
import subprocess
from time import localtime, asctime
DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
DEFAULT_LOG_FILE = "/var/log/cloudstack_plugins.log"
PLUGIN_CONFIG_PATH = "/usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/cloudstack_plugins.conf"
OVSDB_PID_PATH = "/var/run/openvswitch/ovsdb-server.pid"
OVSDB_DAEMON_PATH = "ovsdb-server"
OVS_PID_PATH = "/var/run/openvswitch/ovs-vswitchd.pid"
OVS_DAEMON_PATH = "ovs-vswitchd"
VSCTL_PATH = "/usr/bin/ovs-vsctl"
OFCTL_PATH = "/usr/bin/ovs-ofctl"
class PluginError(Exception):
"""Base Exception class for all plugin errors."""
def __init__(self, *args):
Exception.__init__(self, *args)
def setup_logging(log_file=None):
debug = False
verbose = False
log_format = DEFAULT_LOG_FORMAT
log_date_format = DEFAULT_LOG_DATE_FORMAT
# try to read plugin configuration file
if os.path.exists(PLUGIN_CONFIG_PATH):
config = ConfigParser.ConfigParser()
config.read(PLUGIN_CONFIG_PATH)
try:
options = config.options('LOGGING')
if 'debug' in options:
debug = config.getboolean('LOGGING', 'debug')
if 'verbose' in options:
verbose = config.getboolean('LOGGING', 'verbose')
if 'format' in options:
log_format = config.get('LOGGING', 'format')
if 'date_format' in options:
log_date_format = config.get('LOGGING', 'date_format')
if 'file' in options:
log_file_2 = config.get('LOGGING', 'file')
except ValueError:
# configuration file contained invalid attributes
# ignore them
pass
except ConfigParser.NoSectionError:
# Missing 'Logging' section in configuration file
pass
root_logger = logging.root
if debug:
root_logger.setLevel(logging.DEBUG)
elif verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(log_format, log_date_format)
log_filename = log_file or log_file_2 or DEFAULT_LOG_FILE
logfile_handler = logging.FileHandler(log_filename)
logfile_handler.setFormatter(formatter)
root_logger.addHandler(logfile_handler)
def do_cmd(cmd):
"""Abstracts out the basics of issuing system commands. If the command
returns anything in stderr, a PluginError is raised with that information.
Otherwise, the output from stdout is returned.
"""
pipe = subprocess.PIPE
logging.debug("Executing:%s", cmd)
proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
stderr=pipe, close_fds=True)
ret_code = proc.wait()
err = proc.stderr.read()
if ret_code:
logging.debug("The command exited with the error code: " +
"%s (stderr output:%s)" % (ret_code, err))
raise PluginError(err)
output = proc.stdout.read()
if output.endswith('\n'):
output = output[:-1]
return output
def _is_process_run(pidFile, name):
try:
fpid = open(pidFile, "r")
pid = fpid.readline()
fpid.close()
except IOError, e:
return -1
pid = pid[:-1]
ps = os.popen("ps -ae")
for l in ps:
if pid in l and name in l:
ps.close()
return 0
ps.close()
return -2
def _is_tool_exist(name):
if os.path.exists(name):
return 0
return -1
def check_switch():
global result
ret = _is_process_run(OVSDB_PID_PATH, OVSDB_DAEMON_PATH)
if ret < 0:
if ret == -1:
return "NO_DB_PID_FILE"
if ret == -2:
return "DB_NOT_RUN"
ret = _is_process_run(OVS_PID_PATH, OVS_DAEMON_PATH)
if ret < 0:
if ret == -1:
return "NO_SWITCH_PID_FILE"
if ret == -2:
return "SWITCH_NOT_RUN"
if _is_tool_exist(VSCTL_PATH) < 0:
return "NO_VSCTL"
if _is_tool_exist(OFCTL_PATH) < 0:
return "NO_OFCTL"
return "SUCCESS"
def _build_flow_expr(**kwargs):
is_delete_expr = kwargs.get('delete', False)
flow = ""
if not is_delete_expr:
flow = "hard_timeout=%s,idle_timeout=%s,priority=%s"\
% (kwargs.get('hard_timeout', '0'),
kwargs.get('idle_timeout', '0'),
kwargs.get('priority', '1'))
in_port = 'in_port' in kwargs and ",in_port=%s" % kwargs['in_port'] or ''
dl_type = 'dl_type' in kwargs and ",dl_type=%s" % kwargs['dl_type'] or ''
dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
table = 'table' in kwargs and ",table=%s" % kwargs['table'] or ''
proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
flow = (flow + in_port + dl_type + dl_src + dl_dst +
(ip or proto) + nw_src + nw_dst)
return flow
def add_flow(bridge, **kwargs):
"""
Builds a flow expression for **kwargs and adds the flow entry
to an Open vSwitch instance
"""
flow = _build_flow_expr(**kwargs)
actions = 'actions' in kwargs and ",actions=%s" % kwargs['actions'] or ''
flow = flow + actions
addflow = [OFCTL_PATH, "add-flow", bridge, flow]
do_cmd(addflow)
def del_flows(bridge, **kwargs):
"""
Removes flows according to criteria passed as keyword.
"""
flow = _build_flow_expr(delete=True, **kwargs)
# out_port condition does not exist for all flow commands
out_port = ("out_port" in kwargs and
",out_port=%s" % kwargs['out_port'] or '')
flow = flow + out_port
delFlow = [OFCTL_PATH, 'del-flows', bridge, flow]
do_cmd(delFlow)
def del_all_flows(bridge):
delFlow = [OFCTL_PATH, "del-flows", bridge]
do_cmd(delFlow)
normalFlow = "priority=0 idle_timeout=0 hard_timeout=0 actions=normal"
add_flow(bridge, normalFlow)
def del_port(bridge, port):
delPort = [VSCTL_PATH, "del-port", bridge, port]
do_cmd(delPort)
def get_network_id_for_vif(vif_name):
domain_id, device_id = vif_name[3:len(vif_name)].split(".")
dom_uuid = do_cmd([XE_PATH, "vm-list", "dom-id=%s" % domain_id, "--minimal"])
vif_uuid = do_cmd([XE_PATH, "vif-list", "vm-uuid=%s" % dom_uuid, "device=%s" % device_id, "--minimal"])
vnet = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=other-config",
"param-key=cloudstack-network-id"])
return vnet
def get_network_id_for_tunnel_port(tunnelif_name):
vnet = do_cmd([VSCTL_PATH, "get", "interface", tunnelif_name, "options:cloudstack-network-id"])
return vnet
def clear_flooding_rules_for_port(bridge, ofport):
del_flows(bridge, in_port=ofport, table=2)
def add_flooding_rules_for_port(bridge, in_ofport, out_ofports):
action = "".join("output:%s," %ofport for ofport in out_ofports)[:-1]
add_flow(bridge, priority=1100, in_port=in_ofport, table=1, actions=action)
def get_ofport_for_vif(vif_name):
return do_cmd([VSCTL_PATH, "get", "interface", vif_name, "ofport"])
def get_macaddress_of_vif(vif_name):
domain_id, device_id = vif_name[3:len(vif_name)].split(".")
dom_uuid = do_cmd([XE_PATH, "vm-list", "dom-id=%s" % domain_id, "--minimal"])
vif_uuid = do_cmd([XE_PATH, "vif-list", "vm-uuid=%s" % dom_uuid, "device=%s" % device_id, "--minimal"])
mac = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=MAC"])
return mac
def get_vif_name_from_macaddress(macaddress):
vif_uuid = do_cmd([XE_PATH, "vif-list", "MAC=%s" % macaddress, "--minimal"])
vif_device_id = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=device"])
vm_uuid = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=vm-uuid"])
vm_domain_id = do_cmd([XE_PATH, "vm-param-get", "uuid=%s" % vm_uuid, "param-name=dom-id"])
return "vif"+vm_domain_id+"."+vif_device_id
def add_mac_lookup_table_entry(bridge, mac_address, out_of_port):
add_flow(bridge, priority=1100, dl_dst=mac_address, table=1, actions="output:%s" % out_of_port)
def delete_mac_lookup_table_entry(bridge, mac_address):
del_flows(bridge, dl_dst=mac_address, table=1)
def add_ip_lookup_table_entry(bridge, ip, dst_tier_gateway_mac, dst_vm_mac):
action_str = "mod_dl_sr:%s" % dst_tier_gateway_mac + ",mod_dl_dst:%s" % dst_vm_mac +",resubmit(,5)"
addflow = [OFCTL_PATH, "add-flow", bridge, "table=4", "nw_dst=%s" % ip, "actions=%s" %action_str]
do_cmd(addflow)
def get_vms_on_host(vpc, host_id):
all_vms = vpc.vms
vms_on_host = []
for vm in all_vms:
if vm.hostid == host_id:
vms_on_host.append(vm)
return vms_on_host
def get_network_details(vpc, network_uuid):
tiers = vpc.tiers
for tier in tiers:
if tier.networkuuid == network_uuid:
return tier
return None
class jsonLoader(object):
def __init__(self, obj):
for k in obj:
v = obj[k]
if isinstance(v, dict):
setattr(self, k, jsonLoader(v))
elif isinstance(v, (list, tuple)):
if len(v) > 0 and isinstance(v[0], dict):
setattr(self, k, [jsonLoader(elem) for elem in v])
else:
setattr(self, k, v)
else:
setattr(self, k, v)
def __getattr__(self, val):
if val in self.__dict__:
return self.__dict__[val]
else:
return None
def __repr__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v)
in self.__dict__.iteritems()))
def __str__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v)
in self.__dict__.iteritems()))
def configure_bridge_for_network_topology(bridge, this_host_id, json_config):
vpconfig = jsonLoader(json.loads(json_config)).vpc
if vpconfig is None:
logging.debug("WARNING:Can't find VPC info in json config file")
return "FAILURE:IMPROPER_JSON_CONFG_FILE"
# get the list of Vm's in the VPC from the JSON config
this_host_vms = get_vms_on_host(vpconfig, this_host_id)
for vm in this_host_vms:
for nic in vm.nics:
mac_addr = nic.macaddress
ip = nic.ipaddress
vif_name = get_vif_name_from_macaddress(mac_addr)
of_port = get_ofport_for_vif(vif_name)
network = get_network_details(vpconfig, nic.networkuuid)
# Add flow rule in L2 look up table, if the destination mac = MAC of the nic send packet on the found OFPORT
add_mac_lookup_table_entry(bridge, mac_addr, of_port)
# Add flow rule in L3 look up table: if the destination IP = VM's IP then modify the packet
# to set DST MAC = VM's MAC, SRC MAC=tier gateway MAC and send to egress table
add_ip_lookup_table_entry(bridge, ip, network.gatewaymac, mac_addr)
# Add flow entry to send with intra tier traffic from the NIC to L2 lookup path)
addflow = [OFCTL_PATH, "add-flow", bridge, "table=0", "in_port=%s" % of_port,
"nw_dst=%s" %network.cidr, "actions=resubmit(,1)"]
do_cmd(addflow)
#add flow entry to send inter-tier traffic from the NIC to egress ACL table(to L3 lookup path)
addflow = [OFCTL_PATH, "add-flow", bridge, "table=0", "in_port=%s" % of_port,
"dl_dst=%s" %network.gatewaymac, "nw_dst=%s" %vpconfig.cidr, "actions=resubmit(,3)"]
do_cmd(addflow)
# get the list of hosts on which VPC spans from the JSON config
vpc_spanning_hosts = vpconfig.hosts
for host in vpc_spanning_hosts:
if this_host_id == host.hostid:
continue
other_host_vms = get_vms_on_host(vpconfig, host.hostid)
for vm in other_host_vms:
for nic in vm.nics:
mac_addr = nic.macaddress
ip = nic.ipaddress
network = get_network_details(vpconfig, nic.networkuuid)
gre_key = network.grekey
# generate tunnel name from tunnel naming convention
tunnel_name = "t%s-%s-%s" % (gre_key, this_host_id, host.hostid)
of_port = get_ofport_for_vif(tunnel_name)
# Add flow rule in L2 look up table, if the destination mac = MAC of the nic send packet tunnel port
add_mac_lookup_table_entry(bridge, mac_addr, of_port)
# Add flow tule in L3 look up table: if the destination IP = VM's IP then modify the packet
# set DST MAC = VM's MAC, SRC MAC=tier gateway MAC and send to egress table
add_ip_lookup_table_entry(bridge, ip, network.gatewaymac, mac_addr)
return "SUCCESS: successfully configured bridge as per the VPC topology"
def get_acl(vpcconfig, required_acl_id):
acls = vpcconfig.acls
for acl in acls:
if acl.id == required_acl_id:
return acl
return None
def configure_ovs_bridge_for_routing_policies(bridge, json_config):
vpconfig = jsonLoader(json.loads(json_config)).vpc
if vpconfig is None:
logging.debug("WARNING:Can't find VPC info in json config file")
return "FAILURE:IMPROPER_JSON_CONFG_FILE"
# First flush current egress ACL's before re-applying the ACL's
del_flows(bridge, table=3)
egress_rules_added = False
ingress_rules_added = False
tiers = vpconfig.tiers
for tier in tiers:
tier_cidr = tier.cidr
acl = get_acl(vpconfig, tier.aclid)
acl_items = acl.aclitems
for acl_item in acl_items:
number = acl_item.number
action = acl_item.action
direction = acl_item.direction
source_port_start = acl_item.sourceportstart
source_port_end = acl_item.sourceportend
protocol = acl_item.protocol
source_cidrs = acl_item.sourcecidrs
acl_priority = 1000 + number
for source_cidr in source_cidrs:
if direction is "ingress":
ingress_rules_added = True
# add flow rule to do action (allow/deny) for flows where source IP of the packet is in
# source_cidr and destination ip is in tier_cidr
port = source_port_start
while (port < source_port_end):
if action is "deny":
add_flow(bridge, priority= acl_priority, table=5, nw_src=source_cidr, nw_dst=tier_cidr, tp_dst=port,
nw_proto=protocol, actions='drop')
if action is "allow":
add_flow(bridge, priority= acl_priority,table=5, nw_src=source_cidr, nw_dst=tier_cidr, tp_dst=port,
nw_proto=protocol, actions='resubmit(,1)')
port = port + 1
elif direction in "egress":
egress_rules_added = True
# add flow rule to do action (allow/deny) for flows where destination IP of the packet is in
# source_cidr and source ip is in tier_cidr
port = source_port_start
while (port < source_port_end):
if action is "deny":
add_flow(bridge, priority= acl_priority, table=5, nw_src=tier_cidr, nw_dst=source_cidr, tp_dst=port,
nw_proto=protocol, actions='drop')
if action is "allow":
add_flow(bridge, priority= acl_priority, table=5, nw_src=tier_cidr, nw_dst=source_cidr, tp_dst=port,
nw_proto=protocol, actions='resubmit(,1)')
port = port + 1
if egress_rules_added is False:
# add a default rule in egress table to forward packet to L3 lookup table
add_flow(bridge, priority=0, table=3, actions='resubmit(,4)')
if ingress_rules_added is False:
# add a default rule in egress table drop packets
add_flow(bridge, priority=0, table=5, actions='drop')
| ikoula/cloudstack | scripts/vm/network/vnet/cloudstack_pluginlib.py | Python | gpl-2.0 | 17,502 | 0.004571 |
from __future__ import absolute_import
from django.shortcuts import render_to_response
from sentry.web.frontend.base import BaseView
class AuthCloseView(BaseView):
"""This is a view to handle when sentry log in has been opened from
another window. This view loads an html page with a script that sends a message
back to the window opener and closes the window"""
def handle(self, request):
logged_in = request.user.is_authenticated()
return render_to_response('sentry/auth_close.html',
{'logged_in': logged_in})
| JackDanger/sentry | src/sentry/web/frontend/auth_close.py | Python | bsd-3-clause | 561 | 0.003565 |
import Live
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.ButtonElement import ButtonElement
#from consts import * #see below (not used)
#MANUFACTURER_ID = 71
#ABLETON_MODE = 65
#NOTE_MODE = 65 #67 = APC20 Note Mode; 65 = APC40 Ableton Mode 1
class ShiftableSelectorComponent(ModeSelectorComponent):
__doc__ = ' SelectorComponent that assigns buttons to functions based on the shift button '
#def __init__(self, select_buttons, master_button, arm_buttons, matrix, session, zooming, mixer, transport, slider_modes, mode_callback):
def __init__(self, parent, select_buttons, master_button, arm_buttons, matrix, session, zooming, mixer, slider_modes, mode_callback):
if not len(select_buttons) == 8:
raise AssertionError
if not len(arm_buttons) == 8:
raise AssertionError
ModeSelectorComponent.__init__(self)
self._toggle_pressed = False
self._note_mode_active = False
self._invert_assignment = False
self._select_buttons = select_buttons
self._master_button = master_button
self._slider_modes = slider_modes
self._arm_buttons = arm_buttons
#self._transport = transport
self._session = session
self._zooming = zooming
self._matrix = matrix
self._mixer = mixer
self._mode_callback = mode_callback
self._master_button.add_value_listener(self._master_value)
self._parent = parent #use this to call methods of parent class (APC40plus20)
def disconnect(self):
ModeSelectorComponent.disconnect(self)
self._master_button.remove_value_listener(self._master_value)
self._select_buttons = None
self._master_button = None
self._slider_modes = None
self._arm_buttons = None
#self._transport = None
self._session = None
self._zooming = None
self._matrix = None
self._mixer = None
self._mode_callback = None
self._parent = None #added
return None
def set_mode_toggle(self, button):
ModeSelectorComponent.set_mode_toggle(self, button) #called from APC20: self._shift_modes.set_mode_toggle(self._shift_button)
self.set_mode(0)
def invert_assignment(self):
self._invert_assignment = True
self._recalculate_mode()
def number_of_modes(self):
return 2
def update(self):
if self.is_enabled():
if (self._mode_index == 0): # Non-Shifted Mode
#for index in range(len(self._select_buttons)):
#strip = self._mixer.channel_strip(index)
#strip.set_select_button(None)
self._mixer.master_strip().set_select_button(None)
#self._transport.set_play_button(self._select_buttons[0])
#self._transport.set_stop_button(self._select_buttons[1])
#self._transport.set_record_button(self._select_buttons[2])
#self._transport.set_overdub_button(self._select_buttons[3])
#self._session.set_track_bank_buttons(self._select_buttons[4], self._select_buttons[5])
#self._session.set_scene_bank_buttons(self._select_buttons[6], self._select_buttons[7])
#self._zooming.set_nav_buttons(self._select_buttons[6], self._select_buttons[7], self._select_buttons[4], self._select_buttons[5])
self._on_note_mode_changed()
elif (self._mode_index == 1): # Shifted Mode
#self._transport.set_play_button(None)
#self._transport.set_stop_button(None)
#self._transport.set_record_button(None)
#self._transport.set_overdub_button(None)
#self._session.set_track_bank_buttons(None, None)
#self._session.set_scene_bank_buttons(None, None)
#self._zooming.set_nav_buttons(None, None, None, None)
#for index in range(len(self._select_buttons)):
#strip = self._mixer.channel_strip(index)
#strip.set_select_button(self._select_buttons[index])
self._mixer.master_strip().set_select_button(self._master_button)
else :
assert False
if self._mode_index == int(self._invert_assignment):
self._slider_modes.set_mode_buttons(None)
for index in range(len(self._arm_buttons)): #was: for index in range(len(self._select_buttons)):
self._mixer.channel_strip(index).set_arm_button(self._arm_buttons[index])
else:
for index in range(len(self._arm_buttons)): #was: for index in range(len(self._select_buttons)):
self._mixer.channel_strip(index).set_arm_button(None)
self._slider_modes.set_mode_buttons(self._arm_buttons)
return None
def _toggle_value(self, value): #"toggle" is shift button
if not self._mode_toggle != None:
raise AssertionError
if not value in range(128):
raise AssertionError
self._toggle_pressed = value > 0
self._recalculate_mode()
self._parent._encoder_modes.update() #added to update track control encoders on shift
return None
def _recalculate_mode(self): #called if toggle (i.e. shift) is pressed
self.set_mode((int(self._toggle_pressed) + int(self._invert_assignment)) % self.number_of_modes())
def _master_value(self, value): #this is the master_button value_listener, i.e. called when the master_button is pressed
if not self._master_button != None:
raise AssertionError
if not value in range(128):
raise AssertionError
if self.is_enabled() and self._invert_assignment == self._toggle_pressed:
if not self._master_button.is_momentary() or value > 0: #if the master button is pressed:
#for button in self._select_buttons: #turn off track select buttons (only needed for APC20)
#button.turn_off()
self._matrix.reset() #turn off the clip launch grid LEDs
#mode_byte = NOTE_MODE #= 67 for APC20 Note Mode, send as part of sysex string to enable Note Mode
if self._note_mode_active: #if note mode is already on, turn it off:
#mode_byte = ABLETON_MODE #= 65 for APC40 Ableton Mode 1
for scene_index in range(5):
scene = self._session.scene(scene_index)
for track_index in range(8):
clip_slot = scene.clip_slot(track_index)
button = self._matrix.get_button(track_index, scene_index)
clip_slot.set_launch_button(button)
button.set_enabled(True)
button.turn_off()
self._rebuild_callback()
#self._mode_callback(mode_byte) #send sysex to set Mode (NOTE_MODE or ABLETON_MODE)
self._note_mode_active = not self._note_mode_active
self._zooming.set_ignore_buttons(self._note_mode_active) #turn off matrix, scene launch, and clip stop buttons when in Note Mode
#self._transport.update() #only needed for APC20
self._on_note_mode_changed()
return None
def _on_note_mode_changed(self):
if not self._master_button != None:
raise AssertionError
if self.is_enabled() and self._invert_assignment == self._toggle_pressed:
if self._note_mode_active:
self._master_button.turn_on()
for scene_index in range(5):
#TODO: re-map scene_launch buttons to note velocity...
scene = self._session.scene(scene_index)
for track_index in range(8):
clip_slot = scene.clip_slot(track_index)
button = self._matrix.get_button(track_index, scene_index)
clip_slot.set_launch_button(None)
button.set_enabled(False)
button.set_channel(9) #remap all Note Mode notes to channel 10
if track_index < 4:
button.set_identifier(52 - (4 * scene_index) + track_index) #top row of left group (first 4 columns) notes 52 to 55
if (track_index % 2 == 0 and scene_index % 2 != 0) or (track_index % 2 != 0 and scene_index % 2 == 0):
button.send_value(1) #0=off, 1=green, 2=green blink, 3=red, 4=red blink, 5=yellow, 6=yellow blink, 7-127=green
else:
button.send_value(5)
else:
button.set_identifier(72 - (4 * scene_index) + (track_index -4)) #top row of right group (next 4 columns) notes 72 to 75
if (track_index % 2 == 0 and scene_index % 2 != 0) or (track_index % 2 != 0 and scene_index % 2 == 0):
button.send_value(1) #0=off, 1=green, 2=green blink, 3=red, 4=red blink, 5=yellow, 6=yellow blink, 7-127=green
else:
button.send_value(3)
self._rebuild_callback()
else:
self._master_button.turn_off()
return None
| jim-cooley/abletonremotescripts | remote-scripts/samples/APC40_20/ShiftableSelectorComponent.py | Python | apache-2.0 | 9,563 | 0.013071 |
# coding=utf-8
import re
RE_WHITESPACE = re.compile(r"(\s)+", re.UNICODE)
def remove_postfix(s, postfix):
if s.endswith(postfix):
return s[:len(s)-len(postfix)]
def remove_prefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
def flatten2str(obj):
if obj is None:
return ''
if isinstance(obj, str):
return obj
if isinstance(obj, (list, tuple)):
return ' '.join(obj)
return str(obj)
def compress_whitespaces(s):
return RE_WHITESPACE.sub(' ', s)
| anderscui/nails | nails/texts.py | Python | mit | 534 | 0 |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import re
import shutil
from os.path import basename, getsize, isdir, isfile, islink, join, realpath
from tempfile import mkdtemp
import click
import requests
import semantic_version
from platformio import __version__, app, exception, fs, util
from platformio.compat import hashlib_encode_data
from platformio.downloader import FileDownloader
from platformio.lockfile import LockFile
from platformio.package.exception import ManifestException
from platformio.package.manifest.parser import ManifestParserFactory
from platformio.unpacker import FileUnpacker
from platformio.vcsclient import VCSClientFactory
# pylint: disable=too-many-arguments, too-many-return-statements
class PackageRepoIterator(object):
def __init__(self, package, repositories):
assert isinstance(repositories, list)
self.package = package
self.repositories = iter(repositories)
def __iter__(self):
return self
def __next__(self):
return self.next() # pylint: disable=not-callable
@staticmethod
@util.memoized(expire="60s")
def load_manifest(url):
r = None
try:
r = requests.get(url, headers={"User-Agent": app.get_user_agent()})
r.raise_for_status()
return r.json()
except: # pylint: disable=bare-except
pass
finally:
if r:
r.close()
return None
def next(self):
repo = next(self.repositories)
manifest = repo if isinstance(repo, dict) else self.load_manifest(repo)
if manifest and self.package in manifest:
return manifest[self.package]
return next(self)
class PkgRepoMixin(object):
PIO_VERSION = semantic_version.Version(util.pepver_to_semver(__version__))
@staticmethod
def is_system_compatible(valid_systems):
if not valid_systems or "*" in valid_systems:
return True
if not isinstance(valid_systems, list):
valid_systems = list([valid_systems])
return util.get_systype() in valid_systems
def max_satisfying_repo_version(self, versions, requirements=None):
item = None
reqspec = None
try:
reqspec = (
semantic_version.SimpleSpec(requirements) if requirements else None
)
except ValueError:
pass
for v in versions:
if not self.is_system_compatible(v.get("system")):
continue
# if "platformio" in v.get("engines", {}):
# if PkgRepoMixin.PIO_VERSION not in requirements.SimpleSpec(
# v['engines']['platformio']):
# continue
specver = semantic_version.Version(v["version"])
if reqspec and specver not in reqspec:
continue
if not item or semantic_version.Version(item["version"]) < specver:
item = v
return item
def get_latest_repo_version( # pylint: disable=unused-argument
self, name, requirements, silent=False
):
version = None
for versions in PackageRepoIterator(name, self.repositories):
pkgdata = self.max_satisfying_repo_version(versions, requirements)
if not pkgdata:
continue
if (
not version
or semantic_version.compare(pkgdata["version"], version) == 1
):
version = pkgdata["version"]
return version
def get_all_repo_versions(self, name):
result = []
for versions in PackageRepoIterator(name, self.repositories):
result.extend([semantic_version.Version(v["version"]) for v in versions])
return [str(v) for v in sorted(set(result))]
class PkgInstallerMixin(object):
SRC_MANIFEST_NAME = ".piopkgmanager.json"
TMP_FOLDER_PREFIX = "_tmp_installing-"
FILE_CACHE_VALID = None # for example, 1 week = "7d"
FILE_CACHE_MAX_SIZE = 1024 * 1024 * 50 # 50 Mb
MEMORY_CACHE = {} # cache for package manifests and read dirs
def cache_get(self, key, default=None):
return self.MEMORY_CACHE.get(key, default)
def cache_set(self, key, value):
self.MEMORY_CACHE[key] = value
def cache_reset(self):
self.MEMORY_CACHE.clear()
def read_dirs(self, src_dir):
cache_key = "read_dirs-%s" % src_dir
result = self.cache_get(cache_key)
if result:
return result
result = [
join(src_dir, name)
for name in sorted(os.listdir(src_dir))
if isdir(join(src_dir, name))
]
self.cache_set(cache_key, result)
return result
def download(self, url, dest_dir, sha1=None):
cache_key_fname = app.ContentCache.key_from_args(url, "fname")
cache_key_data = app.ContentCache.key_from_args(url, "data")
if self.FILE_CACHE_VALID:
with app.ContentCache() as cc:
fname = str(cc.get(cache_key_fname))
cache_path = cc.get_cache_path(cache_key_data)
if fname and isfile(cache_path):
dst_path = join(dest_dir, fname)
shutil.copy(cache_path, dst_path)
click.echo("Using cache: %s" % cache_path)
return dst_path
with_progress = not app.is_disabled_progressbar()
try:
fd = FileDownloader(url, dest_dir)
fd.start(with_progress=with_progress)
except IOError as e:
raise_error = not with_progress
if with_progress:
try:
fd = FileDownloader(url, dest_dir)
fd.start(with_progress=False)
except IOError:
raise_error = True
if raise_error:
click.secho(
"Error: Please read http://bit.ly/package-manager-ioerror",
fg="red",
err=True,
)
raise e
if sha1:
fd.verify(sha1)
dst_path = fd.get_filepath()
if (
not self.FILE_CACHE_VALID
or getsize(dst_path) > PkgInstallerMixin.FILE_CACHE_MAX_SIZE
):
return dst_path
with app.ContentCache() as cc:
cc.set(cache_key_fname, basename(dst_path), self.FILE_CACHE_VALID)
cc.set(cache_key_data, "DUMMY", self.FILE_CACHE_VALID)
shutil.copy(dst_path, cc.get_cache_path(cache_key_data))
return dst_path
@staticmethod
def unpack(source_path, dest_dir):
with_progress = not app.is_disabled_progressbar()
try:
with FileUnpacker(source_path) as fu:
return fu.unpack(dest_dir, with_progress=with_progress)
except IOError as e:
if not with_progress:
raise e
with FileUnpacker(source_path) as fu:
return fu.unpack(dest_dir, with_progress=False)
@staticmethod
def parse_semver_version(value, raise_exception=False):
try:
try:
return semantic_version.Version(value)
except ValueError:
if "." not in str(value) and not str(value).isdigit():
raise ValueError("Invalid SemVer version %s" % value)
return semantic_version.Version.coerce(value)
except ValueError as e:
if raise_exception:
raise e
return None
@staticmethod
def parse_pkg_uri(text, requirements=None): # pylint: disable=too-many-branches
text = str(text)
name, url = None, None
# Parse requirements
req_conditions = [
"@" in text,
not requirements,
":" not in text or text.rfind("/") < text.rfind("@"),
]
if all(req_conditions):
text, requirements = text.rsplit("@", 1)
# Handle PIO Library Registry ID
if text.isdigit():
text = "id=" + text
# Parse custom name
elif "=" in text and not text.startswith("id="):
name, text = text.split("=", 1)
# Parse URL
# if valid URL with scheme vcs+protocol://
if "+" in text and text.find("+") < text.find("://"):
url = text
elif "/" in text or "\\" in text:
git_conditions = [
# Handle GitHub URL (https://github.com/user/package)
text.startswith("https://github.com/")
and not text.endswith((".zip", ".tar.gz")),
(text.split("#", 1)[0] if "#" in text else text).endswith(".git"),
]
hg_conditions = [
# Handle Developer Mbed URL
# (https://developer.mbed.org/users/user/code/package/)
# (https://os.mbed.com/users/user/code/package/)
text.startswith("https://developer.mbed.org"),
text.startswith("https://os.mbed.com"),
]
if any(git_conditions):
url = "git+" + text
elif any(hg_conditions):
url = "hg+" + text
elif "://" not in text and (isfile(text) or isdir(text)):
url = "file://" + text
elif "://" in text:
url = text
# Handle short version of GitHub URL
elif text.count("/") == 1:
url = "git+https://github.com/" + text
# Parse name from URL
if url and not name:
_url = url.split("#", 1)[0] if "#" in url else url
if _url.endswith(("\\", "/")):
_url = _url[:-1]
name = basename(_url)
if "." in name and not name.startswith("."):
name = name.rsplit(".", 1)[0]
return (name or text, requirements, url)
@staticmethod
def get_install_dirname(manifest):
name = re.sub(r"[^\da-z\_\-\. ]", "_", manifest["name"], flags=re.I)
if "id" in manifest:
name += "_ID%d" % manifest["id"]
return str(name)
@classmethod
def get_src_manifest_path(cls, pkg_dir):
if not isdir(pkg_dir):
return None
for item in os.listdir(pkg_dir):
if not isdir(join(pkg_dir, item)):
continue
if isfile(join(pkg_dir, item, cls.SRC_MANIFEST_NAME)):
return join(pkg_dir, item, cls.SRC_MANIFEST_NAME)
return None
def get_manifest_path(self, pkg_dir):
if not isdir(pkg_dir):
return None
for name in self.manifest_names:
manifest_path = join(pkg_dir, name)
if isfile(manifest_path):
return manifest_path
return None
def manifest_exists(self, pkg_dir):
return self.get_manifest_path(pkg_dir) or self.get_src_manifest_path(pkg_dir)
def load_manifest(self, pkg_dir): # pylint: disable=too-many-branches
cache_key = "load_manifest-%s" % pkg_dir
result = self.cache_get(cache_key)
if result:
return result
manifest = {}
src_manifest = None
manifest_path = self.get_manifest_path(pkg_dir)
src_manifest_path = self.get_src_manifest_path(pkg_dir)
if src_manifest_path:
src_manifest = fs.load_json(src_manifest_path)
if not manifest_path and not src_manifest_path:
return None
try:
manifest = ManifestParserFactory.new_from_file(manifest_path).as_dict()
except ManifestException:
pass
if src_manifest:
if "version" in src_manifest:
manifest["version"] = src_manifest["version"]
manifest["__src_url"] = src_manifest["url"]
# handle a custom package name
autogen_name = self.parse_pkg_uri(manifest["__src_url"])[0]
if "name" not in manifest or autogen_name != src_manifest["name"]:
manifest["name"] = src_manifest["name"]
if "name" not in manifest:
manifest["name"] = basename(pkg_dir)
if "version" not in manifest:
manifest["version"] = "0.0.0"
manifest["__pkg_dir"] = realpath(pkg_dir)
self.cache_set(cache_key, manifest)
return manifest
def get_installed(self):
items = []
for pkg_dir in self.read_dirs(self.package_dir):
if self.TMP_FOLDER_PREFIX in pkg_dir:
continue
manifest = self.load_manifest(pkg_dir)
if not manifest:
continue
assert "name" in manifest
items.append(manifest)
return items
def get_package(self, name, requirements=None, url=None):
pkg_id = int(name[3:]) if name.startswith("id=") else 0
best = None
for manifest in self.get_installed():
if url:
if manifest.get("__src_url") != url:
continue
elif pkg_id and manifest.get("id") != pkg_id:
continue
elif not pkg_id and manifest["name"] != name:
continue
elif not PkgRepoMixin.is_system_compatible(manifest.get("system")):
continue
# strict version or VCS HASH
if requirements and requirements == manifest["version"]:
return manifest
try:
if requirements and not semantic_version.SimpleSpec(requirements).match(
self.parse_semver_version(manifest["version"], raise_exception=True)
):
continue
if not best or (
self.parse_semver_version(manifest["version"], raise_exception=True)
> self.parse_semver_version(best["version"], raise_exception=True)
):
best = manifest
except ValueError:
pass
return best
def get_package_dir(self, name, requirements=None, url=None):
manifest = self.get_package(name, requirements, url)
return (
manifest.get("__pkg_dir")
if manifest and isdir(manifest.get("__pkg_dir"))
else None
)
def get_package_by_dir(self, pkg_dir):
for manifest in self.get_installed():
if manifest["__pkg_dir"] == realpath(pkg_dir):
return manifest
return None
def find_pkg_root(self, src_dir):
if self.manifest_exists(src_dir):
return src_dir
for root, _, _ in os.walk(src_dir):
if self.manifest_exists(root):
return root
raise exception.MissingPackageManifest(", ".join(self.manifest_names))
def _install_from_piorepo(self, name, requirements):
pkg_dir = None
pkgdata = None
versions = None
last_exc = None
for versions in PackageRepoIterator(name, self.repositories):
pkgdata = self.max_satisfying_repo_version(versions, requirements)
if not pkgdata:
continue
try:
pkg_dir = self._install_from_url(
name, pkgdata["url"], requirements, pkgdata.get("sha1")
)
break
except Exception as e: # pylint: disable=broad-except
last_exc = e
click.secho("Warning! Package Mirror: %s" % e, fg="yellow")
click.secho("Looking for another mirror...", fg="yellow")
if versions is None:
util.internet_on(raise_exception=True)
raise exception.UnknownPackage(
name + (". Error -> %s" % last_exc if last_exc else "")
)
if not pkgdata:
raise exception.UndefinedPackageVersion(
requirements or "latest", util.get_systype()
)
return pkg_dir
def _install_from_url(self, name, url, requirements=None, sha1=None, track=False):
tmp_dir = mkdtemp("-package", self.TMP_FOLDER_PREFIX, self.package_dir)
src_manifest_dir = None
src_manifest = {"name": name, "url": url, "requirements": requirements}
try:
if url.startswith("file://"):
_url = url[7:]
if isfile(_url):
self.unpack(_url, tmp_dir)
else:
fs.rmtree(tmp_dir)
shutil.copytree(_url, tmp_dir, symlinks=True)
elif url.startswith(("http://", "https://")):
dlpath = self.download(url, tmp_dir, sha1)
assert isfile(dlpath)
self.unpack(dlpath, tmp_dir)
os.remove(dlpath)
else:
vcs = VCSClientFactory.newClient(tmp_dir, url)
assert vcs.export()
src_manifest_dir = vcs.storage_dir
src_manifest["version"] = vcs.get_current_revision()
_tmp_dir = tmp_dir
if not src_manifest_dir:
_tmp_dir = self.find_pkg_root(tmp_dir)
src_manifest_dir = join(_tmp_dir, ".pio")
# write source data to a special manifest
if track:
self._update_src_manifest(src_manifest, src_manifest_dir)
return self._install_from_tmp_dir(_tmp_dir, requirements)
finally:
if isdir(tmp_dir):
fs.rmtree(tmp_dir)
return None
def _update_src_manifest(self, data, src_dir):
if not isdir(src_dir):
os.makedirs(src_dir)
src_manifest_path = join(src_dir, self.SRC_MANIFEST_NAME)
_data = {}
if isfile(src_manifest_path):
_data = fs.load_json(src_manifest_path)
_data.update(data)
with open(src_manifest_path, "w") as fp:
json.dump(_data, fp)
def _install_from_tmp_dir( # pylint: disable=too-many-branches
self, tmp_dir, requirements=None
):
tmp_manifest = self.load_manifest(tmp_dir)
assert set(["name", "version"]) <= set(tmp_manifest)
pkg_dirname = self.get_install_dirname(tmp_manifest)
pkg_dir = join(self.package_dir, pkg_dirname)
cur_manifest = self.load_manifest(pkg_dir)
tmp_semver = self.parse_semver_version(tmp_manifest["version"])
cur_semver = None
if cur_manifest:
cur_semver = self.parse_semver_version(cur_manifest["version"])
# package should satisfy requirements
if requirements:
mismatch_error = "Package version %s doesn't satisfy requirements %s" % (
tmp_manifest["version"],
requirements,
)
try:
assert tmp_semver and tmp_semver in semantic_version.SimpleSpec(
requirements
), mismatch_error
except (AssertionError, ValueError):
assert tmp_manifest["version"] == requirements, mismatch_error
# check if package already exists
if cur_manifest:
# 0-overwrite, 1-rename, 2-fix to a version
action = 0
if "__src_url" in cur_manifest:
if cur_manifest["__src_url"] != tmp_manifest.get("__src_url"):
action = 1
elif "__src_url" in tmp_manifest:
action = 2
else:
if tmp_semver and (not cur_semver or tmp_semver > cur_semver):
action = 1
elif tmp_semver and cur_semver and tmp_semver != cur_semver:
action = 2
# rename
if action == 1:
target_dirname = "%s@%s" % (pkg_dirname, cur_manifest["version"])
if "__src_url" in cur_manifest:
target_dirname = "%s@src-%s" % (
pkg_dirname,
hashlib.md5(
hashlib_encode_data(cur_manifest["__src_url"])
).hexdigest(),
)
shutil.move(pkg_dir, join(self.package_dir, target_dirname))
# fix to a version
elif action == 2:
target_dirname = "%s@%s" % (pkg_dirname, tmp_manifest["version"])
if "__src_url" in tmp_manifest:
target_dirname = "%s@src-%s" % (
pkg_dirname,
hashlib.md5(
hashlib_encode_data(tmp_manifest["__src_url"])
).hexdigest(),
)
pkg_dir = join(self.package_dir, target_dirname)
# remove previous/not-satisfied package
if isdir(pkg_dir):
fs.rmtree(pkg_dir)
shutil.copytree(tmp_dir, pkg_dir, symlinks=True)
try:
shutil.rmtree(tmp_dir)
except: # pylint: disable=bare-except
pass
assert isdir(pkg_dir)
self.cache_reset()
return pkg_dir
class BasePkgManager(PkgRepoMixin, PkgInstallerMixin):
# Handle circle dependencies
INSTALL_HISTORY = None
def __init__(self, package_dir, repositories=None):
self.repositories = repositories
self.package_dir = package_dir
if not isdir(self.package_dir):
os.makedirs(self.package_dir)
assert isdir(self.package_dir)
@property
def manifest_names(self):
raise NotImplementedError()
def print_message(self, message, nl=True):
click.echo("%s: %s" % (self.__class__.__name__, message), nl=nl)
def outdated(self, pkg_dir, requirements=None):
"""
Has 3 different results:
`None` - unknown package, VCS is detached to commit
`False` - package is up-to-date
`String` - a found latest version
"""
if not isdir(pkg_dir):
return None
latest = None
manifest = self.load_manifest(pkg_dir)
# skip detached package to a specific version
if "@" in pkg_dir and "__src_url" not in manifest and not requirements:
return None
if "__src_url" in manifest:
try:
vcs = VCSClientFactory.newClient(
pkg_dir, manifest["__src_url"], silent=True
)
except (AttributeError, exception.PlatformioException):
return None
if not vcs.can_be_updated:
return None
latest = vcs.get_latest_revision()
else:
try:
latest = self.get_latest_repo_version(
"id=%d" % manifest["id"] if "id" in manifest else manifest["name"],
requirements,
silent=True,
)
except (exception.PlatformioException, ValueError):
return None
if not latest:
return None
up_to_date = False
try:
assert "__src_url" not in manifest
up_to_date = self.parse_semver_version(
manifest["version"], raise_exception=True
) >= self.parse_semver_version(latest, raise_exception=True)
except (AssertionError, ValueError):
up_to_date = latest == manifest["version"]
return False if up_to_date else latest
def install(
self, name, requirements=None, silent=False, after_update=False, force=False
): # pylint: disable=unused-argument
pkg_dir = None
# interprocess lock
with LockFile(self.package_dir):
self.cache_reset()
name, requirements, url = self.parse_pkg_uri(name, requirements)
package_dir = self.get_package_dir(name, requirements, url)
# avoid circle dependencies
if not self.INSTALL_HISTORY:
self.INSTALL_HISTORY = []
history_key = "%s-%s-%s" % (name, requirements or "", url or "")
if history_key in self.INSTALL_HISTORY:
return package_dir
self.INSTALL_HISTORY.append(history_key)
if package_dir and force:
self.uninstall(package_dir)
package_dir = None
if not package_dir or not silent:
msg = "Installing " + click.style(name, fg="cyan")
if requirements:
msg += " @ " + requirements
self.print_message(msg)
if package_dir:
if not silent:
click.secho(
"{name} @ {version} is already installed".format(
**self.load_manifest(package_dir)
),
fg="yellow",
)
return package_dir
if url:
pkg_dir = self._install_from_url(name, url, requirements, track=True)
else:
pkg_dir = self._install_from_piorepo(name, requirements)
if not pkg_dir or not self.manifest_exists(pkg_dir):
raise exception.PackageInstallError(
name, requirements or "*", util.get_systype()
)
manifest = self.load_manifest(pkg_dir)
assert manifest
click.secho(
"{name} @ {version} has been successfully installed!".format(
**manifest
),
fg="green",
)
return pkg_dir
def uninstall(
self, package, requirements=None, after_update=False
): # pylint: disable=unused-argument
# interprocess lock
with LockFile(self.package_dir):
self.cache_reset()
if isdir(package) and self.get_package_by_dir(package):
pkg_dir = package
else:
name, requirements, url = self.parse_pkg_uri(package, requirements)
pkg_dir = self.get_package_dir(name, requirements, url)
if not pkg_dir:
raise exception.UnknownPackage(
"%s @ %s" % (package, requirements or "*")
)
manifest = self.load_manifest(pkg_dir)
click.echo(
"Uninstalling %s @ %s: \t"
% (click.style(manifest["name"], fg="cyan"), manifest["version"]),
nl=False,
)
if islink(pkg_dir):
os.unlink(pkg_dir)
else:
fs.rmtree(pkg_dir)
self.cache_reset()
# unfix package with the same name
pkg_dir = self.get_package_dir(manifest["name"])
if pkg_dir and "@" in pkg_dir:
shutil.move(
pkg_dir, join(self.package_dir, self.get_install_dirname(manifest))
)
self.cache_reset()
click.echo("[%s]" % click.style("OK", fg="green"))
return True
def update(self, package, requirements=None, only_check=False):
self.cache_reset()
if isdir(package) and self.get_package_by_dir(package):
pkg_dir = package
else:
pkg_dir = self.get_package_dir(*self.parse_pkg_uri(package))
if not pkg_dir:
raise exception.UnknownPackage("%s @ %s" % (package, requirements or "*"))
manifest = self.load_manifest(pkg_dir)
name = manifest["name"]
click.echo(
"{} {:<40} @ {:<15}".format(
"Checking" if only_check else "Updating",
click.style(manifest["name"], fg="cyan"),
manifest["version"],
),
nl=False,
)
if not util.internet_on():
click.echo("[%s]" % (click.style("Off-line", fg="yellow")))
return None
latest = self.outdated(pkg_dir, requirements)
if latest:
click.echo("[%s]" % (click.style(latest, fg="red")))
elif latest is False:
click.echo("[%s]" % (click.style("Up-to-date", fg="green")))
else:
click.echo("[%s]" % (click.style("Detached", fg="yellow")))
if only_check or not latest:
return True
if "__src_url" in manifest:
vcs = VCSClientFactory.newClient(pkg_dir, manifest["__src_url"])
assert vcs.update()
self._update_src_manifest(
dict(version=vcs.get_current_revision()), vcs.storage_dir
)
else:
self.uninstall(pkg_dir, after_update=True)
self.install(name, latest, after_update=True)
return True
class PackageManager(BasePkgManager):
@property
def manifest_names(self):
return ["package.json"]
| platformio/platformio | platformio/managers/package.py | Python | apache-2.0 | 29,447 | 0.000815 |
from toee import *
import tpactions
def GetActionName():
return "Divine Armor"
def GetActionDefinitionFlags():
return D20ADF_None
def GetTargetingClassification():
return D20TC_Target0
def GetActionCostType():
return D20ACT_Swift_Action
def AddToSequence(d20action, action_seq, tb_status):
action_seq.add_action(d20action)
return AEC_OK | GrognardsFromHell/TemplePlus | tpdatasrc/tpgamefiles/rules/d20_actions/action02600_feat_divine_armor.py | Python | mit | 348 | 0.04023 |
TEXT = "text"
BASH = "bash"
JSON = "json"
DOCKERENV = "dockerenv"
NAME_VALUE_DICT = "nvdict"
DEFAULT = TEXT
CHOICES = (TEXT, BASH, JSON, DOCKERENV, NAME_VALUE_DICT)
def print_dict(dictionary, format_=None):
"""Print a dictionary in a given format. Defaults to text."""
format_ = format_ or DEFAULT
if format_ == TEXT:
for key, value in iter(sorted(dictionary.items())):
print("%s = %s" % (key, value))
elif format_ == DOCKERENV:
for key, value in iter(sorted(dictionary.items())):
print("%s=%s" % (key, value))
elif format_ == BASH:
for key, value in iter(sorted(dictionary.items())):
print("export %s=%s" % (key, value))
elif format_ == JSON:
print(json.dumps(dictionary))
elif format_ == NAME_VALUE_DICT:
print("[")
for key, value in iter(sorted(dictionary.items())):
print('{"name": "%s", "value": "%s"},' % (key, value))
print("]")
def print_list(list_, format_=None):
"""Print a list in a given format. Defaults to text."""
format_ = format_ or DEFAULT
if format_ == TEXT:
for item in list_:
print(item)
elif format_ == JSON:
print(json.dumps(list_))
def print_table(rows, separator=" "):
columns = max(map(len, rows))
widths = [0] * columns
for column in range(columns):
for row in rows:
length = len(row[column])
if length > widths[column]:
widths[column] = length
for row in rows:
print separator.join(["%s%s" % (value, " " * (widths[index] - len(str(value))))
for index, value in enumerate(row)])
def print_profile(profile, format_=None):
"""Print profile header."""
format_ = format_ or DEFAULT
if format_ == TEXT:
print("[profile:%s]" % profile)
elif format_ == BASH:
print("# profile: %s" % profile)
| bearops/ebzl | ebzl/lib/format.py | Python | bsd-3-clause | 1,944 | 0.000514 |
# Copyright 2018 New Vector
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from http import HTTPStatus
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.urls import ConsentURIBuilder
from synapse.rest.client import login, room
from synapse.rest.consent import consent_resource
from synapse.server import HomeServer
from synapse.util import Clock
from tests import unittest
from tests.server import FakeSite, make_request
class ConsentResourceTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
]
user_id = True
hijack_auth = False
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
config["form_secret"] = "123abc"
# Make some temporary templates...
temp_consent_path = self.mktemp()
os.mkdir(temp_consent_path)
os.mkdir(os.path.join(temp_consent_path, "en"))
config["user_consent"] = {
"version": "1",
"template_dir": os.path.abspath(temp_consent_path),
}
with open(os.path.join(temp_consent_path, "en/1.html"), "w") as f:
f.write("{{version}},{{has_consented}}")
with open(os.path.join(temp_consent_path, "en/success.html"), "w") as f:
f.write("yay!")
hs = self.setup_test_homeserver(config=config)
return hs
def test_render_public_consent(self) -> None:
"""You can observe the terms form without specifying a user"""
resource = consent_resource.ConsentResource(self.hs)
channel = make_request(
self.reactor,
FakeSite(resource, self.reactor),
"GET",
"/consent?v=1",
shorthand=False,
)
self.assertEqual(channel.code, HTTPStatus.OK)
def test_accept_consent(self) -> None:
"""
A user can use the consent form to accept the terms.
"""
uri_builder = ConsentURIBuilder(self.hs.config)
resource = consent_resource.ConsentResource(self.hs)
# Register a user
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
# Fetch the consent page, to get the consent version
consent_uri = (
uri_builder.build_user_consent_uri(user_id).replace("_matrix/", "")
+ "&u=user"
)
channel = make_request(
self.reactor,
FakeSite(resource, self.reactor),
"GET",
consent_uri,
access_token=access_token,
shorthand=False,
)
self.assertEqual(channel.code, HTTPStatus.OK)
# Get the version from the body, and whether we've consented
version, consented = channel.result["body"].decode("ascii").split(",")
self.assertEqual(consented, "False")
# POST to the consent page, saying we've agreed
channel = make_request(
self.reactor,
FakeSite(resource, self.reactor),
"POST",
consent_uri + "&v=" + version,
access_token=access_token,
shorthand=False,
)
self.assertEqual(channel.code, HTTPStatus.OK)
# Fetch the consent page, to get the consent version -- it should have
# changed
channel = make_request(
self.reactor,
FakeSite(resource, self.reactor),
"GET",
consent_uri,
access_token=access_token,
shorthand=False,
)
self.assertEqual(channel.code, HTTPStatus.OK)
# Get the version from the body, and check that it's the version we
# agreed to, and that we've consented to it.
version, consented = channel.result["body"].decode("ascii").split(",")
self.assertEqual(consented, "True")
self.assertEqual(version, "1")
| matrix-org/synapse | tests/rest/client/test_consent.py | Python | apache-2.0 | 4,541 | 0.00044 |
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
import os.path
class Material(object):
def __init__(self):
self.name = ""
self.texture_fname = None
self.texture_id = None
class FaceGroup(object):
def __init__(self):
self.tri_indices = []
self.material_name = ""
class Model3D(object):
def __init__(self):
self.vertices = []
self.tex_coords = []
self.normals = []
self.materials = {}
self.face_groups = []
self.display_list_id = None
def __del__(self):
#Called when the model is cleaned up by Python
self.free_resources()
def free_resources(self):
# Delete the display list and textures
if self.display_list_id is not None:
glDeleteLists(self.display_list_id, 1)
self.display_list_id = None
# Delete any textures we used
for material in self.materials.values():
if material.texture_id is not None:
glDeleteTextures(material.texture_id)
# Clear all the materials
self.materials.clear()
# Clear the geometry lists
del self.vertices[:]
del self.tex_coords[:]
del self.normals[:]
del self.face_groups[:]
def read_obj(self, fname):
current_face_group = None
file_in = open(fname)
for line in file_in:
# Parse command and data from each line
words = line.split()
command = words[0]
data = words[1:]
if command == 'mtllib': # Material library
model_path = os.path.split(fname)[0]
mtllib_path = os.path.join( model_path, data[0] )
self.read_mtllib(mtllib_path)
elif command == 'v': # Vertex
x, y, z = data
vertex = (float(x), float(y), float(z))
self.vertices.append(vertex)
elif command == 'vt': # Texture coordinate
s, t = data
tex_coord = (float(s), float(t))
self.tex_coords.append(tex_coord)
elif command == 'vn': # Normal
x, y, z = data
normal = (float(x), float(y), float(z))
self.normals.append(normal)
elif command == 'usemtl' : # Use material
current_face_group = FaceGroup()
current_face_group.material_name = data[0]
self.face_groups.append( current_face_group )
elif command == 'f':
assert len(data) == 3, "Sorry, only triangles are supported"
# Parse indices from triples
for word in data:
vi, ti, ni = word.split('/')
indices = (int(vi) - 1, int(ti) - 1, int(ni) - 1)
current_face_group.tri_indices.append(indices)
for material in self.materials.values():
model_path = os.path.split(fname)[0]
texture_path = os.path.join(model_path, material.texture_fname)
texture_surface = pygame.image.load(texture_path)
texture_data = pygame.image.tostring(texture_surface, 'RGB', True)
material.texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, material.texture_id)
glTexParameteri( GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER,
GL_LINEAR)
glTexParameteri( GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
width, height = texture_surface.get_rect().size
gluBuild2DMipmaps( GL_TEXTURE_2D,
3,
width,
height,
GL_RGB,
GL_UNSIGNED_BYTE,
texture_data)
def read_mtllib(self, mtl_fname):
file_mtllib = open(mtl_fname)
for line in file_mtllib:
words = line.split()
command = words[0]
data = words[1:]
if command == 'newmtl':
material = Material()
material.name = data[0]
self.materials[data[0]] = material
elif command == 'map_Kd':
material.texture_fname = data[0]
def draw(self):
vertices = self.vertices
tex_coords = self.tex_coords
normals = self.normals
for face_group in self.face_groups:
material = self.materials[face_group.material_name]
glBindTexture(GL_TEXTURE_2D, material.texture_id)
glBegin(GL_TRIANGLES)
for vi, ti, ni in face_group.tri_indices:
glTexCoord2fv( tex_coords[ti] )
glNormal3fv( normals[ni] )
glVertex3fv( vertices[vi] )
glEnd()
def draw_quick(self):
if self.display_list_id is None:
self.display_list_id = glGenLists(1)
glNewList(self.display_list_id, GL_COMPILE)
self.draw()
glEndList()
glCallList(self.display_list_id)
| MaxWayne/Beginning-Game-Development-with-Python-and-Pygame | Chapter 12/model3d.py | Python | mit | 5,468 | 0.007315 |
# Copyright (c) 2013 The Chromium Embedded Framework Authors.
# Portions copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is (possibly, depending on python version) imported by gyp_cef
# when it creates sub-processes through the multiprocessing library.
# Importing in Python 2.6 (fixed in 2.7) on Windows doesn't search for
# imports that don't end in .py (and aren't directories with an
# __init__.py). This wrapper makes "import gyp_cef" work with those old
# versions and makes it possible to execute gyp_cef.py directly on Windows
# where the extension is useful.
import os
path = os.path.abspath(os.path.split(__file__)[0])
execfile(os.path.join(path, 'gyp_cef'))
| denzp/cef3 | tools/gyp_cef.py | Python | bsd-3-clause | 797 | 0 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.bundle_create import BundleCreate
from pants.util.contextutil import open_zip
from pants_test.backend.jvm.tasks.jvm_binary_task_test_base import JvmBinaryTaskTestBase
class TestBundleCreate(JvmBinaryTaskTestBase):
@classmethod
def task_type(cls):
return BundleCreate
def test_jvm_bundle_products(self):
jar_lib = self.make_target(spec='3rdparty/jvm/org/example:foo',
target_type=JarLibrary,
jars=[JarDependency(org='org.example', name='foo', rev='1.0.0'),
JarDependency(org='org.pantsbuild', name='bar', rev='2.0.0',
ext='zip'),
JarDependency(org='org.apache', name='baz', rev='3.0.0',
classifier='tests'),
JarDependency(org='org.gnu', name='gary', rev='4.0.0',
ext='tar.gz')])
binary_target = self.make_target(spec='//foo:foo-binary',
target_type=JvmBinary,
source='Foo.java',
dependencies=[jar_lib])
app_target = self.make_target(spec='//foo:foo-app',
target_type=JvmApp,
basename='FooApp',
dependencies=[binary_target])
context = self.context(target_roots=[app_target])
jar_artifact = self.create_artifact(org='org.example', name='foo', rev='1.0.0')
zip_artifact = self.create_artifact(org='org.pantsbuild', name='bar', rev='2.0.0', ext='zip')
bundle_artifact = self.create_artifact(org='org.apache', name='baz', rev='3.0.0',
classifier='tests')
tar_gz_artifact = self.create_artifact(org='org.gnu', name='gary', rev='4.0.0', ext='tar.gz')
classpath_products = self.ensure_classpath_products(context)
classpath_products.add_jars_for_targets(targets=[jar_lib],
conf='default',
resolved_jars=[jar_artifact,
zip_artifact,
bundle_artifact,
tar_gz_artifact])
self.add_to_runtime_classpath(context, binary_target, {'Foo.class': '', 'foo.txt': ''})
self.execute(context)
products = context.products.get('jvm_bundles')
self.assertIsNotNone(products)
product_data = products.get(app_target)
dist_root = os.path.join(self.build_root, 'dist')
self.assertEquals({dist_root: ['FooApp-bundle']}, product_data)
bundle_root = os.path.join(dist_root, 'FooApp-bundle')
self.assertEqual(sorted(['foo-binary.jar',
'libs/org.example-foo-1.0.0.jar',
'libs/org.pantsbuild-bar-2.0.0.zip',
'libs/org.apache-baz-3.0.0-tests.jar',
'libs/org.gnu-gary-4.0.0.tar.gz']),
sorted(self.iter_files(bundle_root)))
with open_zip(os.path.join(bundle_root, 'foo-binary.jar')) as jar:
self.assertEqual(sorted(['META-INF/', 'META-INF/MANIFEST.MF', 'Foo.class', 'foo.txt']),
sorted(jar.namelist()))
def test_jvm_bundle_missing_product(self):
binary_target = self.make_target(spec='//foo:foo-binary',
target_type=JvmBinary,
source='Foo.java')
app_target = self.make_target(spec='//foo:foo-app',
target_type=JvmApp,
basename='FooApp',
dependencies=[binary_target])
context = self.context(target_roots=[app_target])
jar_artifact = self.create_artifact(org='org.example', name='foo', rev='1.0.0',
materialize=False)
classpath_products = self.ensure_classpath_products(context)
classpath_products.add_jars_for_targets(targets=[binary_target],
conf='default',
resolved_jars=[jar_artifact])
self.add_to_runtime_classpath(context, binary_target, {'Foo.class': '', 'foo.txt': ''})
with self.assertRaises(BundleCreate.MissingJarError):
self.execute(context)
| slyphon/pants | tests/python/pants_test/backend/jvm/tasks/test_bundle_create.py | Python | apache-2.0 | 5,152 | 0.003882 |
from __future__ import with_statement
import logging
import time
import sys
from concurrence import unittest, Tasklet, Channel, Lock, Semaphore, TaskletPool, DeferredQueue, Deque, TimeoutError, TaskletError, JoinError, Message
class TestTaskletPool(unittest.TestCase):
def testBasic(self):
d = Deque()
def handler(i):
Tasklet.sleep(1.0)
d.append(i)
tp = TaskletPool()
N = 10
for i in range(N):
tp.defer(handler, i)
start = time.time()
xs = []
while True:
xs.append(d.popleft(True, 30))
if len(xs) == N:
break
end = time.time()
#X workers taking 1 second to process N items = Z total proc time
self.assertAlmostEqual(N / TaskletPool.INIT_WORKERS, end - start, places = 1)
self.assertEquals(45, sum(xs))
class TestDeferredQueue(unittest.TestCase):
def testDeferredQueue(self):
d = DeferredQueue()
def f(i):
pass
for i in range(10):
d.defer(f, i)
Tasklet.sleep(1)
for i in range(10):
d.defer(f, i)
Tasklet.sleep(1)
class TestPrimitives(unittest.TestCase):
def testSemaphore(self):
sema = Semaphore(4)
self.assertEquals(True, sema.acquire())
self.assertEquals(3, sema.count)
self.assertEquals(True, sema.acquire())
self.assertEquals(2, sema.count)
self.assertEquals(True, sema.acquire())
self.assertEquals(1, sema.count)
self.assertEquals(True, sema.acquire())
self.assertEquals(0, sema.count)
self.assertEquals(False, sema.acquire(False))
self.assertEquals(0, sema.count)
self.assertEquals(None, sema.release())
self.assertEquals(1, sema.count)
self.assertEquals(None, sema.release())
self.assertEquals(2, sema.count)
self.assertEquals(None, sema.release())
self.assertEquals(3, sema.count)
self.assertEquals(None, sema.release())
self.assertEquals(4, sema.count)
self.assertEquals(None, sema.release())
self.assertEquals(5, sema.count) #possible to go beyond initial count... is this ok?
sema = Semaphore(4)
xs = []
def t(x):
try:
with sema:
Tasklet.sleep(1.0)
xs.append(x)
return x
except TimeoutError:
pass
start = time.time()
for i in range(8):
Tasklet.new(t)(i)
join_result = Tasklet.join_children()
self.assertEquals(8, len(join_result))
self.assertEquals(28, sum(xs))
end = time.time()
self.assertAlmostEqual(2.0, end - start, places = 1)
def testLock(self):
lock = Lock()
self.assertEquals(True, lock.acquire())
self.assertEquals(True, lock.is_locked())
self.assertEquals(None, lock.release())
xs = []
def t(x):
try:
with lock:
Tasklet.sleep(1.0)
xs.append(x)
return x
except TimeoutError:
pass
start = time.time()
for i in range(5):
Tasklet.new(t)(i)
join_result = Tasklet.join_children()
self.assertEquals(5, len(join_result))
self.assertEquals(10, sum(xs))
end = time.time()
self.assertAlmostEqual(5.0, end - start, places = 1)
if __name__ == '__main__':
unittest.main(timeout = 100.0)
| toymachine/concurrence | test/testextra.py | Python | bsd-3-clause | 3,776 | 0.011388 |
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
from datetime import datetime, time, timedelta
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
from ShiftTimes import ShiftCorrection, ShiftDts
import os, argparse
import h5py
from h5py import File as h5
from numpy import (
mean, median, nanmean, nanmedian, std, nan,
isnan, min, max, zeros, ones, size, loadtxt
)
from os.path import isfile, isdir
if 'DISPLAY' in os.environ: # to avoid crash when running remotely
from pylab import figure, savefig, close, find, pause
import matplotlib.patches as patches
import matplotlib.transforms as transforms
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
#from z_expansion_gulisano import z as z_exp
_ERROR_ = ccl.Rn+' ### ERROR ###: '+ccl.W
def flags2nan(VAR, FLAG):
cond = VAR < FLAG
VAR = np.array(VAR)
VAR[~cond] = np.nan
return VAR
def date_to_utc(fecha):
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
sec_utc = (fecha - utc).total_seconds()
return sec_utc
def selecc_data(data, tshk):
time = data[0] #[s] utc sec
rate = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tshk_utc = (tshk - utc).total_seconds()
ti = tshk_utc - 10.*day # [seg] utc
tf = tshk_utc + 30.*day
cond = (time > ti) & (time < tf)
time = (time[cond] - tshk_utc) / day # [days] since shock
rate = rate[cond]
return (time, rate)
def selecc_window(data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
ti = tini_utc # [seg] utc
tf = tend_utc
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def enoughdata(var, fgap):
n = len(var)
ngood = len(find(~isnan(var)))
fdata = 1.*ngood/n # fraccion de data sin gaps
if fdata>=(1.-fgap):
return True
else:
return False
def averages_and_std(n_icmes, t_shck, ti_icme, dTday, nbin, t_utc, VAR, fgap):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (ti_icme[i] - t_shck[i]).total_seconds()/day # [day]
if dT>dTday:
dt = dT/nbin
t, var = selecc_window(
[t_utc, VAR],
t_shck[i], ti_icme[i]
)
if enoughdata(var, fgap): # pido q haya mas del 80% NO sean gaps
adap += [adaptar(nbin, dt, t, var)]
nok +=1
else:
continue
else:
print " i:%d ---> Este evento es muy chico!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_std, ndata]
def adaptar(n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
for i in range(n):
tmin = i*dt
tmax = (i+1.)*dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])
rr[i] = mean(r[cond])
return [tt/(n*dt), rr]
def adaptar(nwndw, dT, n, dt, t, r):
#n = int(5./dt) # nro de puntos en todo el intervalo de ploteo
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>tmin) & (t<tmax)
tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1)
rr[i] = mean(r[cond])
return [tt/dT, rr] # tiempo normalizado x la duracion de la sheath
#@profile
def adaptar_ii(nwndw, dT, n, dt, t, r, fgap):
tt = zeros(n)
rr = zeros(n)
_nbin_ = n/(1+nwndw[0]+nwndw[1]) # nro de bins en la sheath/mc
cc = (t>0.) & (t<dT) # intervalo de la sheath/mc
#print " r[cc]: ", r[cc]
if len(r[cc])==0: # no hay data en esta ventana
rr = nan*ones(n)
enough = False
else:
enough = enoughdata(r[cc], fgap) # [bool] True si hay mas del 80% de data buena.
if not(enough):
rr = nan*ones(n) # si no hay suficiente data, este evento no aporta
for i in range(n):
tmin = (i-nwndw[0]*_nbin_)*dt
tmax = tmin + dt
cond = (t>=tmin) & (t<=tmax)
#tt[i] = mean(t[cond])#; print "tt:", t[i]; pause(1) # bug
tt[i] = tmin + .5*dt # bug corregido
if enough:
#cc = ~isnan(r[cond]) # no olvidemos filtrar los gaps
#rr[i] = mean(r[cond][cc])
rr[i] = nanmean(r[cond])
return enough, [tt/dT, rr] # tiempo normalizado x la duracion de la sheath/mc/etc
#@profile
def selecc_window_ii(nwndw, data, tini, tend):
time = data[0] #[s] utc sec
y = data[1]
day = 86400. # [seg]
utc = datetime(1970, 1, 1, 0, 0, 0, 0)
tini_utc = (tini - utc).total_seconds() # [s] utc sec
tend_utc = (tend - utc).total_seconds() # [s] utc sec
dt = tend_utc - tini_utc
ti = tini_utc - nwndw[0]*dt # [seg] utc
tf = tend_utc + nwndw[1]*dt
cond = (time > ti) & (time < tf)
time = (time[cond] - tini_utc) / day # [days] since 'ti'
y = y[cond]
return (time, y)
def averages_and_std_ii(nwndw,
SELECC, #MCsig, MCwant,
n_icmes, tini, tend, dTday, nbin, t_utc, VAR):
day = 86400.
nok=0; nbad=0
adap = []
for i in range(n_icmes):
dT = (tend[i] - tini[i]).total_seconds()/day # [day]
if ((dT>dTday) & SELECC[i]):# (MCsig[i]>=MCwant)):
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = selecc_window_ii(
nwndw, # nro de veces hacia atras y adelante
[t_utc, VAR],
tini[i], tend[i]
)
adap += [adaptar(nwndw, dT, nbin, dt, t, var)] # rebinea usando 'dt' como el ancho de nuevo bineo
nok +=1
else:
print " i:%d ---> Filtramos este evento!, dT/day:%g" % (i, dT)
nbad +=1
VAR_adap = zeros(nbin*nok).reshape(nok, nbin)
for i in range(nok):
VAR_adap[i,:] = adap[i][1]
VAR_avrg = zeros(nbin)
VAR_medi = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
for i in range(nbin):
cond = ~isnan(VAR_adap.T[i,:])
ndata[i] = len(find(cond)) # nro de datos != flag
VAR_avrg[i] = mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_medi[i] = median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
tnorm = adap[0][0]
return [nok, nbad, tnorm, VAR_avrg, VAR_medi, VAR_std, ndata]
def mvs_for_each_event(VAR_adap, nbin, nwndw, Enough, verbose=False):
nok = size(VAR_adap, axis=0)
mvs = zeros(nok) # valores medios por cada evento
binsPerTimeUnit = nbin/(1+nwndw[0]+nwndw[1]) # nro de bines por u. de tiempo
start = nwndw[0]*binsPerTimeUnit # en este bin empieza la estructura (MC o sheath)
for i in range(nok):
aux = VAR_adap[i, start:start+binsPerTimeUnit] # (*)
cc = ~isnan(aux) # pick good-data only
#if len(find(cc))>1:
if Enough[i]: # solo imprimo los q tienen *suficiente data*
if verbose:
print ccl.G + "id %d/%d: %r"%(i+1, nok, aux[cc]) + ccl.W
mvs[i] = mean(aux[cc])
else:
mvs[i] = nan
#(*): esta es la serie temporal (de esta variable) para el evento "i"
pause(1)
return mvs
def diff_dates(tend, tini):
n = len(tend)
diffs = np.nan*np.ones(n)
for i in range(n):
ok = type(tend[i]) == type(tini[i]) == datetime # ambos deben ser fechas!
if ok:
diffs[i] = (tend[i] - tini[i]).total_seconds()
else:
diffs[i] = np.nan
return diffs #[sec]
def write_variable(fout, varname, dims, var, datatype, comments):
dummy = fout.createVariable(varname, datatype, dims)
dummy[:] = var
dummy.units = comments
def calc_beta(Temp, Pcc, B):
# Agarramos la definicion de OMNI, de:
# http://omniweb.gsfc.nasa.gov/ftpbrowser/magnetopause/Reference.html
# http://pamela.roma2.infn.it/index.php
# Beta = [(4.16*10**-5 * Tp) + 5.34] * Np/B**2 (B in nT)
#
beta = ((4.16*10**-5 * Temp) + 5.34) * Pcc/B**2
return beta
def thetacond(ThetaThres, ThetaSh):
"""
Set a lower threshold for shock orientation, using Wang's
catalog of shocks.
NOTE: Near 180Â means very close to the nose!
"""
if ThetaThres<=0.:
print ccl.Rn + ' ----> BAD WANG FILTER!!: ThetaThres<=0.'
print ' ----> Saliendo...' + ccl.Rn
raise SystemExit
#return ones(len(ThetaSh), dtype=bool)
else:
return (ThetaSh > ThetaThres)
def wangflag(ThetaThres):
if ThetaThres<0:
return 'NaN'
else:
return str(ThetaThres)
def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm,
SUBTITLE, YLIMS, YLAB, fname_fig):
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
ax.plot(tnorm, avrVAR, 'o-', color='black', markersize=5, label='mean')
ax.plot(tnorm, medVAR, 'o-', color='red', alpha=.5, markersize=5, markeredgecolor='none', label='median')
inf = avrVAR + stdVAR/np.sqrt(nVAR)
sup = avrVAR - stdVAR/np.sqrt(nVAR)
ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
ax.legend(loc='upper right')
ax.grid()
ax.set_ylim(YLIMS)
TITLE = SUBTITLE
ax.set_title(TITLE)
ax.set_xlabel('time normalized to MC passage time [1]', fontsize=14)
ax.set_ylabel(YLAB, fontsize=20)
savefig(fname_fig, format='png', dpi=180, bbox_inches='tight')
close()
def makefig_ii(mc, sh, YLIMS, YLAB, **kws):
"""
- ftext{bool}:
if False, we put the text in the title. Otherwise, we put
the text inside the figure, using `TEXT_LOC`{dict} as positions
- TEXT_LOC{dict}:
coordinates for the text inside the figure. The `TEXT_LOC['sh']`{2-tuple} are
the positions for the left part, and `TEXT_LOC['mc']`{2-tuple} for the right
part.
"""
#--- kws
ftext = kws.get('ftext', False)
TEXT = kws.get('TEXT', None)
TEXT_LOC = kws.get('TEXT_LOC', None)
fname_fig = kws.get('fname_fig', None)
#-------------------------------------
fmc,fsh = 3.0, 1.0 # escaleos temporales
#--- if figure is not given, create one
if 'fig' in kws:
fig, ax = kws['fig'], kws['ax']
else:
fig = figure(1, figsize=(13, 6))
ax = fig.add_subplot(111)
# catch the name of the observable
if 'varname' in kws:
varname = kws['varname']
else:
varname = fname_fig[:-4].split('_')[-1]
if(varname == 'Temp'):
mc.med /= 1.0e4; sh.med /= 1.0e4
mc.avr /= 1.0e4; sh.avr /= 1.0e4
mc.std_err /= 1.0e4; sh.std_err /= 1.0e4
YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4
if ftext:
TEXT_LOC['mc'][1] /= 1.0e4
TEXT_LOC['sh'][1] /= 1.0e4
# curvas del mc
time = fsh+fmc*mc.tnorm
cc = time>=fsh
ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=5)
ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none')
# sombra del mc
inf = mc.avr + mc.std_err/np.sqrt(mc.nValues)
sup = mc.avr - mc.std_err/np.sqrt(mc.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
# curvas del sheath
time = fsh*sh.tnorm
cc = time<=fsh
ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=5)
ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=5, markeredgecolor='none')
# sombra del sheath
inf = sh.avr + sh.std_err/np.sqrt(sh.nValues)
sup = sh.avr - sh.std_err/np.sqrt(sh.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
#trans = transforms.blended_transform_factory(
# ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=fsh, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
ax.tick_params(labelsize=17)
ax.grid()
ax.set_ylim(YLIMS)
if ftext:
ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=22)
ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=22)
else:
if TEXT is not None:
ax.set_title(
'left: '+TEXT['sh']+'\n'
'right: '+TEXT['mc']
)
else:
pass # no text anywhere
ax.set_ylabel(YLAB, fontsize=27)
# if `varname` has any of these strings, plot in log-scale.
if any([(nm in varname) for nm in \
('beta','Temp', 'rmsB', 'rmsBoB', 'ratio')]):
ax.set_yscale('log')
else:
ax.set_yscale('linear')
ax.legend(loc='best', fontsize=20)
if 'fig' not in kws: # if figure not given, save to disk
ax.set_xlim(-2.0, 7.0)
ax.set_xlabel('time normalized to sheath/MC passage [1]', fontsize=25)
savefig(fname_fig, format='png', dpi=100, bbox_inches='tight')
close()
return None
else:
# return changes of passed figure
return fig, ax
#--- chekea q el archivo no repita elementos de la 1ra columna
def check_redundancy(fname, name):
f = open(fname, 'r')
dummy = {}
for line in f:
ll = line.split(' ')
varname = ll[0]
dummy[varname] = 0
dummy_names = dummy.keys()
dummy_set = set(dummy_names)
redundancy = len(dummy_set)<len(dummy_names)
overwriting = name in dummy_set
if redundancy or overwriting:
return True
else:
return False
class general:
def __init__(self):
self.name = 'name'
class dummy1:
def __init__(self,):
pass
class dummy2 (object):
"""
can be used:
>>> dd = dummy2()
>>> dd['name'] = [3,4,5]
>>> dd['name2'].time = [0,1,2,3,4]
"""
def __init__(self):
self.this = {}
def __getitem__(self, idx):
if not idx in self.this.keys():
self.this[idx] = dummy1()
return self.this[idx]
def set(self, name, attname, value):
if not name in self.this.keys():
self.this[name] = dummy1()
setattr(self.this[name], attname, value)
def keys(self,):
return self.this.keys()
class boundaries:
def __init__(self):
print self.__dict__
print dict(self)
def ff(self):
self.fg = 0.2
def nans(sh):
return np.nan*np.ones(sh)
def grab_time_domain(adap, check=False):
"""
Search for a valid time domain for
this `varname` and return.
If `check`==True, it checks that all time domains
are the same (for all `varname`s) unless a difference
of 10 times the numerical epsilon.
"""
na = len(adap)
# grab all posible time domains
found = False
for i in range(na):
for name in adap[i].keys():
if not(found):
tarr = adap[i][name][0]
if tarr is not None:
found = True
if found:
# we found a valid time domain (`tarr`)
if check:
# assume time array is 'np.float32'
eps32 = np.finfo(np.float32).eps
for i in range(na):
for name in adap[i].keys():
tarr_ = adap[i][name][0]
if tarr_ is not None:
# they differ at most in its
# numerical epsilon
ok = (tarr_-tarr<=eps32)
assert ok.prod(),\
" we have more than 1 valid time domain!!:\n%r\n\n%r"%(
tarr_, tarr)
return tarr
#--- didn't find any valid time domain
try:
# hung in debug mode
import pdb; pdb.set_trace()
except ImportError:
# ok, get out!
raise SystemExit(
'shut! none are valid time domains:\n %r'%t_array
)
class events_mgr(object):
def __init__(self, gral, FILTER, CUTS, bd, nBin, fgap, tb, z_exp, structure='mc', fparam='mc_V', verbose=True):
"""
structure: can be 'sh.mc', 'sh.i', 'mc', 'i', refering to sheath-of-mc,
sheath-of-icme, mc, and icme, respectively. This is to
use the proper mean values calculated in each structure.
"""
self.fparam = fparam
self.structure = structure
self.data_name = gral.data_name
self.FILTER = FILTER
self.CUTS = CUTS
self.bd = bd
self.nBin = nBin
self.fgap = fgap
self.tb = tb
self.z_exp = z_exp
self.dir_plots = gral.dirs['dir_plots']
self.dir_ascii = gral.dirs['dir_ascii']
self.gral = gral
self._dirs_ = gral.dirs
self.verbose = verbose
#self.f_sc = netcdf_file(gral.fnames[gral.data_name], 'r')
self.f_events = netcdf_file(gral.fnames['table_richardson'], 'r')
print " -------> archivos input leidos!"
#--- put False to all possible data-flags (all CR detector-names
# must be included in 'self.CR_observs')
self.names_ok = ('Auger_BandMuons', 'Auger_BandScals', 'Auger_scals', \
'McMurdo', 'ACE', 'ACE_o7o6', 'ACE1sec')
for name in self.names_ok:
read_flag = 'read_'+name
setattr(self, read_flag, False) # True: if files are already read
#--- names of CR observatories
self.CR_observs = ( #must **include** the 'load_data_..()' methods
'Auger_scals', 'Auger_BandMuons', 'Auger_BandScals',\
'McMurdo')
#--- just a check for load_data_.. methods
for att_name in dir(events_mgr): # iterate on all methods
if att_name.startswith('load_data_'):
att_suffix = att_name.replace('load_data_', '')
assert att_suffix in self.names_ok,\
" [-] ERROR: one of the methods '%s' is NOT taken into account in 'self.CR_observs' (%s) " % (att_name, att_suffix)
self.data_name_ = str(self.data_name) # nombre de la data input inicial (*1)
self.IDs_locked = False # (*2)
"""
(*1): si despues cambia 'self.data_name', me voy a dar
cuenta en la "linea" FLAG_001.
(*2): lock in lock_IDs().
True: if the id's of the events have been
fixed/locked, so that later analysis is
resctricted only with theses locked id's.
"""
#++++++++++ CORRECTION OF BORDERS ++++++++++
# IMPORTANTE:
# Solo valido para los "63 eventos" (MCflag='2', y visibles en ACE)
# NOTA: dan saltos de shock mas marcados con True.
# TODO: make a copy/deepcopy of `tb` and `bd`, so that we don't
# bother the rest of data_names (i.e. Auger_scals, Auger_BandMuons,
# etc.)
if FILTER['CorrShift']:
ShiftCorrection(ShiftDts, tb.tshck)
ShiftCorrection(ShiftDts, tb.tini_icme)
ShiftCorrection(ShiftDts, tb.tend_icme)
ShiftCorrection(ShiftDts, tb.tini_mc)
ShiftCorrection(ShiftDts, tb.tend_mc)
ShiftCorrection(ShiftDts, bd.tini)
ShiftCorrection(ShiftDts, bd.tend)
def run_all(self, _data_handler):
#----- seleccion de eventos
self.filter_events()
print "\n ---> filtrado de eventos (n:%d): OK\n" % (self.n_SELECC)
#----- load data y los shiftimes "omni"
self.load_files_and_timeshift_ii(_data_handler)
#----- rebineo y promedios
self.rebine()
self.rebine_final()
#----- hacer ploteos
self.make_plots()
#----- archivos "stuff"
self.build_params_file()
#@profile
def rebine(self, collect_only=False):
"""
rebineo de c/evento
"""
nvars = self.nvars #len(VARS)
n_icmes = self.tb.n_icmes
bd = self.bd
VARS = self.VARS
nbin = self.nBin['total']
nwndw = [self.nBin['before'], self.nBin['after']]
day = 86400.
#---- quiero una lista de los eventos-id q van a incluirse en c/promedio :-)
IDs = {}
Enough, nEnough = {}, {}
self.__ADAP__ = ADAP = [] # conjunto de varios 'adap' (uno x c/variable)
for varname in VARS.keys():
IDs[varname] = []
Enough[varname] = []
nEnough[varname] = 0 # counter
# recorremos los eventos:
nok, nbad = 0, 0
nnn = 0 # nro de evento q pasan el filtro a-priori
self.out = {}
if collect_only:
self.out['events_data'] = {} # bag to save data from events
ok = np.zeros(n_icmes,dtype=np.bool) # all `False` by default
for i in range(n_icmes):
try: #no todos los elementos de 'tend' son fechas (algunos eventos no tienen fecha definida)
# this 'i'-event must be contained in our data-base
ok[i] = date_to_utc(bd.tini[i]) >= self.t_utc[0] #True
ok[i] &= date_to_utc(bd.tend[i]) <= self.t_utc[-1]
if self.IDs_locked:
ok[i] &= i in self.restricted_IDs
except: # e.g. if `bd.{tini,tend}[i]` is NaN
ok[i] = False
for i in range(n_icmes):
#np.set_printoptions(4) # nro de digitos a imprimir al usar numpy.arrays
if not (ok[i] & self.SELECC[i]): #---FILTRO--- (*1)
print ccl.Rn, " id:%d ---> ok, SELECC: "%i, ok[i], self.SELECC[i], ccl.W
nbad +=1
continue
dT = (bd.tend[i] - bd.tini[i]).total_seconds()/day # [day]
ADAP += [ {} ] # agrego un diccionario a la lista
nnn += 1
print ccl.Gn + " id:%d ---> dT/day:%g" % (i, dT) + ccl.W
print self.tb.tshck[i]
nok +=1
if collect_only:
# evdata is just a pointer
evdata = self.out['events_data']['id_%03d'%i] = dummy2() #{}
# recorremos las variables:
for varname in VARS.keys():
dt = dT*(1+nwndw[0]+nwndw[1])/nbin
t, var = self.grab_window(
nwndw=nwndw, #rango ploteo
data=[self.t_utc, VARS[varname]['value']],
tini=bd.tini[i],
tend=bd.tend[i],
vname=varname, # for ACE 1sec
)
if collect_only:
evdata.set(varname, 'time', t)
evdata.set(varname, 'data', var)
#--- read average CR rates before shock/disturbance
if self.data_name in self.CR_observs: # is it CR data?
rate_pre = getattr(self, 'rate_pre_'+self.data_name)
var = 100.*(var - rate_pre[i]) / rate_pre[i]
#--- rebinea usando 'dt' como el ancho de nuevo bineo
out = adaptar_ii(
nwndw = nwndw,
dT = dT,
n = nbin,
dt = dt,
t = t,
r = var,
fgap = self.fgap
)
enough = out[0] # True: data con menos de 100*'fgap'% de gap
Enough[varname] += [ enough ]
ADAP[nok-1][varname] = out[1] # out[1] = [tiempo, variable]
if enough:
#import pdb; pdb.set_trace()
IDs[varname] += [i]
nEnough[varname] += 1
#NOTE: `ADAP` points to `self.__ADAP__`
print " ----> len.ADAP: %d" % len(ADAP)
self.__nok__ = nok
self.__nbad__ = nbad
self.out['nok'] = nok
self.out['nbad'] = nbad
self.out['IDs'] = IDs
self.out['nEnough'] = nEnough
self.out['Enough'] = Enough
def lock_IDs(self):
"""
This assumes that 'IDs' has only *one* key.
That is, len(IDs)=1 !!
"""
IDs = self.out['IDs']
varname = IDs.keys()[0]
self.restricted_IDs = IDs[varname]
self.IDs_locked = True
def rebine_final(self):
"""
rebineo de c/evento ... PARTE FINAL
"""
nvars = self.nvars #len(VARS)
VARS = self.VARS
nbin = self.nBin['total']
nwndw = [self.nBin['before'], self.nBin['after']]
day = 86400.
## salidas del 'self.rebine()'
ADAP = self.__ADAP__
Enough = self.out['Enough']
nEnough = self.out['nEnough']
IDs = self.out['IDs']
nok = self.out['nok']
nbad = self.out['nbad']
stuff = {} #[]
# Hacemos un lugar para la data rebineada (posible uso post-analisis)
if self.data_name==self.data_name_:
self.rebined_data = {} # creamos el diccionario UNA sola vez
for varname in VARS.keys():
if self.verbose:
print ccl.On + " -------> procesando: %s" % VARS[varname]['label']
print " nEnough/nok/(nok+nbad): %d/%d/%d " % (nEnough[varname], nok, nok+nbad) + ccl.W
VAR_adap = zeros((nok, nbin)) # perfiles rebineados (*)
# (*): uno de estos por variable
# recorro los 'nok' eventos q pasaron el filtro de arriba:
for i in range(nok):
VAR_adap[i,:] = ADAP[i][varname][1] # valores rebineados de la variable "j" para el evento "i"
self.rebined_data[varname] = VAR_adap
# valores medios de esta variable para c/evento
avrVAR_adap = mvs_for_each_event(VAR_adap, nbin, nwndw, Enough[varname], self.verbose)
if self.verbose:
print " ---> (%s) avrVAR_adap[]: \n" % varname, avrVAR_adap
VAR_avrg = zeros(nbin)
VAR_avrgNorm = zeros(nbin)
VAR_medi = zeros(nbin)
VAR_std = zeros(nbin)
ndata = zeros(nbin)
# recorremos bin a bin, para calular media, mediana, error, etc...
for i in range(nbin):
cond = ~np.isnan(VAR_adap.T[i,:]) # filtro eventos q no aportan data en este bin
ndata[i] = len(find(cond)) # nro de datos != nan
VAR_avrg[i] = np.mean(VAR_adap.T[i,cond]) # promedio entre los valores q no tienen flag
VAR_avrgNorm[i] = np.mean(VAR_adap.T[i,cond]/avrVAR_adap[cond])
VAR_medi[i] = np.median(VAR_adap.T[i,cond])# mediana entre los valores q no tienen flag
VAR_std[i] = np.std(VAR_adap.T[i,cond]) # std del mismo conjunto de datos
stuff[varname] = [VAR_avrg, VAR_medi, VAR_std, ndata, avrVAR_adap]
# NOTA: chekar q 'ADAP[j][varname][0]' sea igual para TODOS los
# eventos 'j', y para TODOS los 'varname'.
self.out['dVARS'] = stuff
self.out['tnorm'] = grab_time_domain(ADAP, check=True)
"""def __getattr__(self, attname):
if attname[:10]=='load_data_':
return self.attname"""
def load_files_and_timeshift_ii(self, _data_handler, obs_check=None):
"""
INPUT
-----
* _data_handler:
class that handles the i/o of the database related to 'data_name'.
* obs_check:
if not None, is a list of strings related to the names of
the observables of our interest. The idea is to make
sure that we are asking for variables that are included
in our database `self.VARS`.
"""
read_flag = 'read_'+self.data_name # e.g. self.read_Auger
if not(read_flag in self.__dict__.keys()): # do i know u?
setattr(self, read_flag, False) #True: if files are already read
#--- read data and mark flag as read!
if not( getattr(self, read_flag) ):
attname = 'load_data_'+self.data_name
dh = _data_handler(
input=self.gral.fnames[self.data_name],
)
# point to the method that selects data from
# a given window
self.grab_window = dh.grab_block # {method}
# grab/point-to data from disk
#NOTE: if self.FILTER['CorrShift']==True, then `self.tb` and
# `self.bd` will be shifted!
out = dh.load(data_name=self.data_name, tb=self.tb, bd=self.bd)
# attribute data pointers to `self`
for nm, value in out.iteritems():
# set `t_utc` and `VAR` to `self`
setattr(self,nm,value)
# check that we are grabbing observables of our
# interest
if obs_check is not None:
for nm in obs_check:
nm_ = nm+'.'+self.data_name
assert nm_ in self.VARS.keys(),\
" %s is not database list: %r"%(nm_, self.VARS.keys())
self.nvars = len(self.VARS.keys())
# mark as read
self.read_flag = True # True: ya lei los archivos input
#--- check weird case
assert self.data_name in self.names_ok,\
_ERROR_+" not on my list!: %s" % self.data_name+\
"\n Must be one of these: %r" % [self.names_ok]
def make_plots(self):
"""
#---- generar figuras y asciis de los perfiles promedio/mediana
"""
nBin = self.nBin
fgap = self.fgap
MCwant = self.FILTER['MCwant']
ThetaThres = self.CUTS['ThetaThres']
if self.FILTER['vsw_filter']:
v_lo, v_hi = self.CUTS['v_lo'], self.CUTS['v_hi']
else:
v_lo, v_hi = 0.0, 0.0 #estos valores significan q no hay filtro
if self.FILTER['z_filter_on']:
z_lo, z_hi = self.CUTS['z_lo'], self.CUTS['z_hi']
else:
z_lo, z_hi = 0.0, 0.0
if self.FILTER['B_filter']:
B_lo, B_hi = self.CUTS['B_lo'], self.CUTS['B_hi']
else:
B_lo, B_hi = 0.0, 0.0 #estos valores significan q no hay filtro
if self.FILTER['filter_dR.icme']:
dR_lo, dR_hi = self.CUTS['dR_lo'], self.CUTS['dR_hi']
else:
dR_lo, dR_hi = 0.0, 0.0 #estos valores significan q no hay filtro
nbin = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime'] # [1] nro de bines q quiero en mi perfil promedio
#-------------------- prefijos:
# prefijo para filtro Wang:
if self.FILTER['wang']:
WangFlag = str(ThetaThres)
else:
WangFlag = 'NaN'
# prefijo gral para los nombres de los graficos:
if self.FILTER['CorrShift']:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
#-------------------------------
# nombres genericos...
DIR_FIGS = '%s/MCflag%s/%s' % (self.dir_plots, MCwant['alias'], prexShift)
DIR_FIGS += '/' + self._dirs_['suffix']
DIR_ASCII = '%s/MCflag%s/%s' % (self.dir_ascii, MCwant['alias'], prexShift)
DIR_ASCII += '/' + self._dirs_['suffix']
os.system('mkdir -p %s' % DIR_FIGS) # si no existe, lo creamos
os.system('mkdir -p %s' % DIR_ASCII) # (bis)
print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W
print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W
FNAMEs = 'MCflag%s_%dbefore.%dafter_fgap%1.1f' % (MCwant['alias'], nBin['before'], nBin['after'], fgap)
FNAMEs += '_Wang%s' % (WangFlag)
if self.FILTER['vsw_filter']: FNAMEs += '_vlo.%03.1f.vhi.%04.1f' % (v_lo, v_hi)
if self.FILTER['z_filter_on']: FNAMEs += '_zlo.%2.2f.zhi.%2.2f' % (z_lo, z_hi)
if self.FILTER['B_filter']: FNAMEs += '_Blo.%2.2f.Bhi.%2.2f' % (B_lo, B_hi)
if self.FILTER['filter_dR.icme']: FNAMEs += '_dRlo.%2.2f.dRhi.%2.2f' % (dR_lo, dR_hi)
if not self.FILTER['vsw_filter']:
FNAMEs += '_' # flag for post-processing, indicating
# there was no splitting
FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs)
FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs)
fname_nro = DIR_ASCII+'/'+'n.events_'+FNAMEs+'.txt'
#'w': write mode #'a': append mode
#---FLAG_001
if self.data_name==self.data_name_:
fnro = open(fname_nro, 'w')
else:
fnro = open(fname_nro, 'a') # si uso otra data input, voy anotando el nro
# de eventos al final del archivo 'fname_nro'
#-------------------------------------------------------------------
nvars = len(self.VARS)
for varname in self.VARS.keys():
fname_fig = '%s_%s.png' % (FNAME_FIGS, varname) #self.VARS[i][1])
print ccl.Rn+ " ------> %s" % fname_fig
ylims = self.VARS[varname]['lims'] #self.VARS[i][2]
ylabel = self.VARS[varname]['label'] #self.VARS[i][3]
average = self.out['dVARS'][varname][0]
mediana = self.out['dVARS'][varname][1] #self.out['dVARS'][i][4]
std_err = self.out['dVARS'][varname][2]
nValues = self.out['dVARS'][varname][3] # number of values aporting to each data bin
N_selec = self.out['nok'] #self.out['dVARS'][varname][0]
N_final = self.out['nEnough'][varname] #nEnough[i]
SUBTITLE = '# of selected events: %d \n\
events w/80%% of data: %d \n\
bins per time unit: %d \n\
MCflag: %s \n\
WangFlag: %s' % (N_selec, N_final, nBin['bins_per_utime'], MCwant['alias'], WangFlag)
makefig(mediana, average, std_err, nValues, self.out['tnorm'],
SUBTITLE, ylims, ylabel, fname_fig)
fdataout = '%s_%s.txt' % (FNAME_ASCII, varname) #self.VARS[i][1])
dataout = np.array([self.out['tnorm'] , mediana, average, std_err, nValues])
print " ------> %s\n" % fdataout + ccl.W
np.savetxt(fdataout, dataout.T, fmt='%12.5f')
#-------- grabamos nro de eventos selecc para esta variable
line = '%s %d %d\n' % (varname, N_final, N_selec)
fnro.write(line)
print ccl.Rn + " --> nro de eventos seleccionados: " + fname_nro + ccl.W
fnro.close()
#--- salidas (a parte de los .png)
self.DIR_ASCII = DIR_ASCII
self.FNAMEs = FNAMEs
def build_params_file(self):
"""
Construye archivo q tiene cosas de los eventos seleccionados:
- valores medios de los observables (B, Vsw, Temp, beta, etc)
- los IDs de los eventos
- duracion de los MCs y las sheaths
"""
DIR_ASCII = self.DIR_ASCII
FNAMEs = self.FNAMEs
#-------------------------------------------- begin: NC_FILE
print "\n*********************************** begin: NC_FILE"
#------- generamos registro de id's de los
# eventos q entraron en los promedios.
# Nota: un registro por variable.
fname_out = DIR_ASCII+'/'+'_stuff_'+FNAMEs+'.nc' #'./test.nc'
#---FLAG_001
if self.data_name==self.data_name_:
fout = netcdf_file(fname_out, 'w')
print "\n ----> generando: %s\n" % fname_out
else:
fout = netcdf_file(fname_out, 'a')
# modo 'a': si uso otra data input, voy anotando el nro
# de eventos al final del archivo 'fname_out'
print "\n ----> anexando en: %s\n" % fname_out
IDs = self.out['IDs']
for varname in self.VARS.keys():
print " ----> " + varname
n_events = len(IDs[varname])
dimname = 'nevents_'+varname
fout.createDimension(dimname, n_events)
print " n_events: ", n_events
prom = self.out['dVARS'][varname][4]
cc = np.isnan(prom)
print " nprom (all) : ", prom.size
prom = prom[~cc]
print " nprom (w/o nan): ", prom.size
dims = (dimname,)
write_variable(fout, varname, dims, prom, 'd',
'average_values per event')
#---------- IDs de esta variable
ids = map(int, IDs[varname])
vname = 'IDs_'+varname
write_variable(fout, vname, dims, ids, 'i',
'event IDs that enter in this parameter average')
#---------- duracion de la estructura
dtsh = np.zeros(len(ids))
dtmc = np.zeros(len(ids))
for i in range(len(ids)):
id = ids[i]
dtsh[i] = self.dt_sh[id]
dtmc[i] = self.dt_mc[id]
vname = 'dt_sheath_'+varname
write_variable(fout, vname, dims, dtsh, 'd', '[days]')
vname = 'dt_mc_'+varname
write_variable(fout, vname, dims, dtmc, 'd', '[days]')
fout.close()
print "**************************************** end: NC_FILE"
#---------------------------------------------- end: NC_FILE
def filter_events(self):
structure = self.structure
tb = self.tb
FILTER = self.FILTER
dTday = self.CUTS['dTday']
day = 86400.
AU_o_km = 1./(150.0e6)
sec_o_day = 86400.
#------------------------------------ EVENTS's PARAMETERS
#MCsig = array(f_events.variables['MC_sig'].data)# 2,1,0: MC, rotation, irregular
#Vnsh = array(f_events.variables['wang_Vsh'].data) # veloc normal del shock
ThetaSh = np.array(self.f_events.variables['wang_theta_shock'].data) # orientacion de la normal del shock
i_V = self.f_events.variables[structure+'_V'].data.copy() # velocidad de icme
i_B = self.f_events.variables[structure+'_B'].data.copy() # B del icme
i_dt = self.f_events.variables[structure+'_dt'].data.copy() # B del icme
i_dR = i_dt*(i_V*AU_o_km*sec_o_day)
# values of the observables to use for filtering
vfparam = get_fparam(self.f_events, self.fparam)
#RatePre_Names = []
#--- seteamos miembros de 'self' q se llamen 'rate_pre_...'
for vname in self.f_events.variables.keys():
if vname.startswith('rate_pre_'):
#RatePre_Names += [ vname ] # save them to make checks later
var = self.f_events.variables[vname].data.copy()
setattr(self, vname, var) # asignamos 'rate_pre_...' a 'self'
"""
self.rate_pre = self.f_events.variables['rate_pre_McMurdo'].data.copy()
self.rate_pre_Auger=self.f_events.variables['rate_pre_Auger'].data.copy()
"""
self.Afd = self.f_events.variables['A_FD'].data.copy()
#------------------------------------
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++ begin: SELECCION DE EVENTOS ++++++++++++++++++++++
#------- filter dates
BETW1998_2006 = np.ones(tb.n_icmes, dtype=bool)
if FILTER['choose_1998-2006']:
_until_jan98 = range(0, 26) # all events up to Jan/98
_after_dec06 = range(307, tb.n_icmes) # all after Dec/2006
for i in (_until_jan98 + _after_dec06):
BETW1998_2006[i] = False # 'False' to exclude events
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MC_FLAG = np.ones(tb.n_icmes, dtype=bool)
for i in range(tb.n_icmes):
MC_FLAG[i] = tb.MCsig[i] in FILTER['MCwant']['flags']
#------- excluimos eventos de 2MCs
EVENTS_with_2MCs= (26, 148, 259, 295)
MCmultiple = FILTER['Mcmultiple'] #False #True para incluir eventos multi-MC
MCmulti = np.ones(tb.n_icmes, dtype=bool) # False para eventos multi-MC (SI, escribi bien)
if(~FILTER['Mcmultiple']):
for i in EVENTS_with_2MCs:
MCmulti[i] &= False
#------- orientacion del shock (catalogo Wang)
if FILTER['wang']:
ThetaThres = self.CUTS['ThetaThres']
ThetaCond = thetacond(ThetaThres, ThetaSh) # set lower threshold
#------- duration of sheaths
self.dt_mc = diff_dates(tb.tend_mc, tb.tini_mc)/day # [day]
self.dt_sh = diff_dates(tb.tini_mc, tb.tshck)/day # [day]
dt = diff_dates(self.bd.tend, self.bd.tini)/day
DURATION = dt > dTday # sheaths>0
#------- speed of icmes
if FILTER['vsw_filter']:
v_lo = self.CUTS['v_lo']
v_hi = self.CUTS['v_hi']
SpeedCond = (vfparam>=v_lo) & (vfparam<v_hi)
#------- z expansion (a. gulisano)
z_exp = self.z_exp
if FILTER['z_filter_on']:
z_lo = self.CUTS['z_lo']
z_hi = self.CUTS['z_hi']
z_cond = (z_exp>=z_lo) & (z_exp<z_hi)
#------- <B> of icmes
if FILTER['B_filter']:
B_lo = self.CUTS['B_lo']
B_hi = self.CUTS['B_hi']
BfieldCond = (i_B>=B_lo) & (i_B<B_hi)
#------- size of icmes
if FILTER['filter_dR.icme']:
dR_lo = self.CUTS['dR_lo']
dR_hi = self.CUTS['dR_hi']
"""print " ---> i_dR: \n", i_dR
print " ---> i_dt: \n", i_dt
raw_input()"""
dRicmeCond = (i_dR>=dR_lo) & (i_dR<dR_hi)
#------- filtro total
SELECC = np.ones(tb.n_icmes, dtype=bool)
SELECC &= BETW1998_2006 # nos mantenemos en este periodo de anios
SELECC &= MCmulti # nubes multiples
SELECC &= MC_FLAG # catalogo de nubes
SELECC &= DURATION # no queremos sheaths q duran 1hr xq solo aportan ruido
if FILTER['wang']: SELECC &= ThetaCond # cerca a 180 es nariz del shock
if FILTER['vsw_filter']: SELECC &= SpeedCond
if FILTER['z_filter_on']: SELECC &= z_cond
if FILTER['B_filter']: SELECC &= BfieldCond
if FILTER['filter_dR.icme']: SELECC &= dRicmeCond
self.SELECC = SELECC
self.n_SELECC = len(find(SELECC))
#self.aux['SELECC'] = self.SELECC
#+++++++++++++++++ end: SELECCION DE EVENTOS ++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if self.n_SELECC<=0:
print ccl.Rn + "\n --------> FATAL ERROR!!!: self.n_SELECC=<0"
print " exiting....... \n" + ccl.W
raise SystemExit
def get_fparam(finp, fparam='mc_V'):
"""
you can implement more acceptable fparam values, that can
imply an operation of several keys of the finp.variable.keys()
for instance.
"""
# keys of the usual .nc file
_keys_of_netcdf_file = ['sh.mc_V', 'mc_V', 'sh.mc_B', 'mc_B']
_keys_of_netcdf_file += ['sh.i_V', 'i_V', 'sh.i_B', 'i_B']
# check if it's a valid `fparam` && extract
if fparam in _keys_of_netcdf_file:
values = finp.variables[fparam].data.copy()
else:
raise SystemExit('\n [-] Unrecognized fparam value: '+fparam+'\n')
return values
class RichTable(object):
def __init__(s, fname_rich):
s.fname_rich = fname_rich
s.tshck = []
s.tini_icme, s.tend_icme = [], []
s.tini_mc, s.tend_mc = [], []
s.Qicme = []
s.MCsig = []
s.Dst = []
def read(s):
print "\n ---> reading Richardson's table: %s" % s.fname_rich
frich = open(s.fname_rich, 'r')
print " file read."
ll, n = [], 0
for line in frich:
ll += [line.split(',')]
n +=1
print " lineas leidas: %d" % n
for i in range(1,n):
#------ fecha shock
s.tshck += [datetime.strptime(ll[i][1][1:20],"%Y-%m-%d %H:%M:%S")]
#------ fecha ini icme
ss = ll[i][2][1:11].split() # string de la fecha ini-icme
HH = int(ss[1][0:2])
MM = int(ss[1][2:4])
mm = int(ss[0].split('/')[0])
dd = int(ss[0].split('/')[1])
if mm==s.tshck[i-1].month:
yyyy = s.tshck[i-1].year
else:
yyyy = s.tshck[i-1].year + 1
s.tini_icme += [datetime(yyyy, mm, dd, HH, MM)]
#------ fecha fin icme
ss = ll[i][3][1:11].split()
HH = int(ss[1][0:2])
MM = int(ss[1][2:4])
mm = int(ss[0].split('/')[0])
dd = int(ss[0].split('/')[1])
if mm==s.tshck[i-1].month:
yyyy = s.tshck[i-1].year
elif s.tshck[i-1].month==12:
yyyy = s.tshck[i-1].year + 1
s.tend_icme += [datetime(yyyy, mm, dd, HH, MM)]
#------ fechas MCs
if ll[i][6]=='':
s.tini_mc += [nan]
s.tend_mc += [nan]
else:
hrs_ini = int(ll[i][6]) # col6 es inicio del MC
dummy = ll[i][7].split('(') # col7 es fin del MC
ndummy = len(dummy)
if ndummy==1:
hrs_end = int(ll[i][7])
else:
hrs_end = int(ll[i][7].split('(')[0][1:])
s.tini_mc += [ s.tini_icme[i-1] + timedelta(hours=hrs_ini) ]
s.tend_mc += [ s.tend_icme[i-1] + timedelta(hours=hrs_end) ]
# calidad de ICME boundaries
s.Qicme += [ ll[i][10] ] # quality of ICME boundaries
# flag de MC
s.MCsig += [ ll[i][15] ]
#if ll[i][15]=='2H':
# MCsig += [ 2 ]
#else:
# MCsig += [ int(ll[i][15]) ] # MC flag
#
s.Dst += [ int(ll[i][16]) ] # Dst
#--------------------------------------
s.MCsig = np.array(s.MCsig)
s.Dst = np.array(s.Dst)
s.n_icmes = len(s.tshck)
#
"""
col0 : id
col1 : disturbance time
col2 : ICME start
col3 : ICME end
col4 : Composition start
col5 : Composition end
col6 : MC start
col7 : MC end
col8 : BDE
col9 : BIF
col10: Quality of ICME boundaries (1=best)
col11: dV --> 'S' indica q incluye shock
col12: V_ICME
col13: V_max
col14: B
col15: MC flag --> '0', '1', '2', '2H': irregular, B-rotation, MC, or MC of "Huttunen etal05" respectively.
col16: Dst
col17: V_transit
col18: LASCO_CME --> time of associated event, generally the CME observed by SOHO/LASCO.
A veces tiene 'H' por Halo.
"""
def Add2Date(date, days, hrs=0, BadFlag=np.nan):
"""
Mapping to add `days` and `hrs` to a given
`datetime` object.
NOTE: `days` can be fractional.
"""
if type(date) is not datetime:
return BadFlag
return date + timedelta(days=days, hours=hrs)
def utc2date(t):
date_utc = datetime(1970, 1, 1, 0, 0, 0, 0)
date = date_utc + timedelta(days=(t/86400.))
return date
def date2utc(date):
date_utc = datetime(1970, 1, 1, 0, 0, 0, 0)
utcsec = (date - date_utc).total_seconds() # [utc sec]
return utcsec
def ACEepoch2utc(AceEpoch):
return AceEpoch + 820454400.0
class arg_to_datetime(argparse.Action):
"""
argparse-action to handle command-line arguments of
the form "dd/mm/yyyy" (string type), and converts
it to datetime object.
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(arg_to_datetime, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
#print '%r %r %r' % (namespace, values, option_string)
dd,mm,yyyy = map(int, values.split('/'))
value = datetime(yyyy,mm,dd)
setattr(namespace, self.dest, value)
class arg_to_utcsec(argparse.Action):
"""
argparse-action to handle command-line arguments of
the form "dd/mm/yyyy" (string type), and converts
it to UTC-seconds.
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(arg_to_utcsec, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
#print '%r %r %r' % (namespace, values, option_string)
dd,mm,yyyy = map(int, values.split('/'))
value = (datetime(yyyy,mm,dd)-datetime(1970,1,1)).total_seconds()
setattr(namespace, self.dest, value)
class My2DArray(object):
"""
wrapper around numpy array with:
- flexible number of rows
- records the maximum nrow requested
NOTE:
This was test for 1D and 2D arrays.
"""
def __init__(self, shape, dtype=np.float32):
self.this = np.empty(shape, dtype=dtype)
setattr(self, '__array__', self.this.__array__)
def resize_rows(self, nx_new=None):
""" Increment TWICE the size of axis=0, **without**
losing data.
"""
sh_new = np.copy(self.this.shape)
nx = self.this.shape[0]
if nx_new is None:
sh_new[0] = 2*sh_new[0]
elif nx_new<=nx:
return 0 # nothing to do
else:
sh_new[0] = nx_new
tmp = self.this.copy()
#print "----> tmp: ", tmp.shape
new = np.zeros(sh_new)
new[:nx] = tmp
self.this = new
"""
for some reason (probably due to numpy
implementation), if we don't do this, the:
>>> print self.__array__()
stucks truncated to the original size that was
set in __init__() time.
So we need to tell numpy our new resized shape!
"""
setattr(self, '__array__', self.this.__array__)
def __get__(self, instance, owner):
return self.this
def __getitem__(self, i):
return self.this[i]
def __setitem__(self, i, value):
"""
We can safely use:
>>> ma[n:n+m,:] = [...]
assuming n+m is greater than our size in axis=0.
"""
stop = i
if type(i)==slice:
stop = i.stop
elif type(i)==tuple:
if type(i[0])==slice:
"""
in case:
ma[n:n+m,:] = ...
"""
stop = i[0].stop
else:
stop = i[0]
#--- if requested row exceeds limits, duplicate
# our size in axis=0
if stop>=self.this.shape[0]:
nx_new = self.this.shape[0]
while nx_new<=stop:
nx_new *= 2
self.resize_rows(nx_new)
self.this[i] = value
#--- register the maximum nrow requested.
# NOTE here we are referring to size, and *not* row-index.
self.max_nrow_used = stop+1 # (row-size, not row-index)
def __getattr__(self, attnm):
return getattr(self.this, attnm)
def ACEepoch2date(ace_epoch):
"""
ace_epoch: seconds since 1/1/96
"""
date = datetime(1996,1,1) + timedelta(seconds=ace_epoch)
return date
def date2ACEepoch(date):
ace_o = datetime(1996,1,1)
return (date - ace_o).total_seconds()
#+++++++++++++++++++++++++++++++++
if __name__=='__main__':
print " ---> this is a library!\n"
#EOF
| jimsrc/seatos | shared_lib/shared_funcs.py | Python | mit | 54,269 | 0.01382 |
import os
from django.utils.translation import gettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'kk0ai8i0dm-8^%&0&+e-rsmk8#t&)6r*y!wh=xx7l12+6k5mg4'
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests.testapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demoproject.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
STATIC_URL = '/static/'
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = [
('de', _('German')),
('en', _('English')),
('fr', _('French')),
]
| raphaelm/django-i18nfield | tests/settings.py | Python | apache-2.0 | 1,801 | 0 |
from callback_event import *
def getOddNumber(k,getEvenNumber): return 1+getEvenNumber(k)
def main():
k=1
i=getOddNumber(k,double);
print(i)
i=getOddNumber(k,quadruple);
print(i)
i=getOddNumber(k,lambda x:x*8)
print(i)
if __name__=="__main__":main() | AlexYu-beta/CppTemplateProgrammingDemo | Demo1_8/demo1_8.py | Python | gpl-3.0 | 278 | 0.061151 |
"""Common profiles are defined here to be easily used within a project using --profile {name}"""
from typing import Any, Dict
black = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"ensure_newline_before_comments": True,
"line_length": 88,
}
django = {
"combine_as_imports": True,
"include_trailing_comma": True,
"multi_line_output": 5,
"line_length": 79,
}
pycharm = {
"multi_line_output": 3,
"force_grid_wrap": 2,
"lines_after_imports": 2,
}
google = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
"single_line_exclusions": ("typing",),
"order_by_type": False,
"group_by_package": True,
}
open_stack = {
"force_single_line": True,
"force_sort_within_sections": True,
"lexicographical": True,
}
plone = {
"force_alphabetical_sort": True,
"force_single_line": True,
"lines_after_imports": 2,
"line_length": 200,
}
attrs = {
"atomic": True,
"force_grid_wrap": 0,
"include_trailing_comma": True,
"lines_after_imports": 2,
"lines_between_types": 1,
"multi_line_output": 3,
"use_parentheses": True,
}
hug = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"use_parentheses": True,
"line_length": 100,
}
profiles: Dict[str, Dict[str, Any]] = {
"black": black,
"django": django,
"pycharm": pycharm,
"google": google,
"open_stack": open_stack,
"plone": plone,
"attrs": attrs,
"hug": hug,
}
| TeamSPoon/logicmoo_workspace | packs_web/butterfly/lib/python3.7/site-packages/isort/profiles.py | Python | mit | 1,601 | 0.000625 |
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for collection domain objects and methods defined on them."""
from core.domain import collection_domain
from core.domain import collection_services
from core.tests import test_utils
import feconf
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = ("""category: A category
language_code: en
nodes:
- acquired_skills:
- Skill0a
- Skill0b
exploration_id: an_exploration_id
prerequisite_skills: []
objective: An objective
schema_version: %d
tags: []
title: A title
""") % (feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
class CollectionDomainUnitTests(test_utils.GenericTestBase):
"""Test the collection domain object."""
COLLECTION_ID = 'collection_id'
EXPLORATION_ID = 'exp_id_0'
def setUp(self):
super(CollectionDomainUnitTests, self).setUp()
self.save_new_valid_collection(
self.COLLECTION_ID, 'user@example.com', title='Title',
category='Category', objective='Objective',
exploration_id=self.EXPLORATION_ID)
self.collection = collection_services.get_collection_by_id(
self.COLLECTION_ID)
def _assert_validation_error(self, expected_error_substring):
"""Checks that the collection passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.collection.validate()
def test_initial_validation(self):
"""Test validating a new, valid collection."""
self.collection.validate()
def test_title_validation(self):
self.collection.title = 0
self._assert_validation_error('Expected title to be a string')
def test_category_validation(self):
self.collection.category = 0
self._assert_validation_error('Expected category to be a string')
def test_objective_validation(self):
self.collection.objective = ''
self._assert_validation_error('objective must be specified')
self.collection.objective = 0
self._assert_validation_error('Expected objective to be a string')
def test_language_code_validation(self):
self.collection.language_code = ''
self._assert_validation_error('language must be specified')
self.collection.language_code = 0
self._assert_validation_error('Expected language code to be a string')
self.collection.language_code = 'xz'
self._assert_validation_error('Invalid language code')
def test_tags_validation(self):
self.collection.tags = 'abc'
self._assert_validation_error('Expected tags to be a list')
self.collection.tags = [2, 3]
self._assert_validation_error('Expected each tag to be a string')
self.collection.tags = ['', 'tag']
self._assert_validation_error('Tags should be non-empty')
self.collection.tags = ['234']
self._assert_validation_error(
'Tags should only contain lowercase letters and spaces')
self.collection.tags = [' abc']
self._assert_validation_error(
'Tags should not start or end with whitespace')
self.collection.tags = ['abc def']
self._assert_validation_error(
'Adjacent whitespace in tags should be collapsed')
self.collection.tags = ['abc', 'abc']
self._assert_validation_error(
'Expected tags to be unique, but found duplicates')
def test_schema_version_validation(self):
self.collection.schema_version = 'some_schema_version'
self._assert_validation_error('Expected schema version to be an int')
self.collection.schema_version = 100
self._assert_validation_error(
'Expected schema version to be %s' %
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
def test_nodes_validation(self):
self.collection.nodes = {}
self._assert_validation_error('Expected nodes to be a list')
self.collection.nodes = [
collection_domain.CollectionNode.from_dict({
'exploration_id': '0',
'prerequisite_skills': [],
'acquired_skills': ['skill0a']
}),
collection_domain.CollectionNode.from_dict({
'exploration_id': '0',
'prerequisite_skills': ['skill0a'],
'acquired_skills': ['skill0b']
})
]
self._assert_validation_error(
'There are explorations referenced in the collection more than '
'once.')
def test_initial_explorations_validation(self):
# Having no collection nodes is fine for non-strict validation.
self.collection.nodes = []
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'Expected to have at least 1 exploration in the collection.')
# If the collection has exactly one exploration and that exploration
# has prerequisite skills, then the collection should fail validation.
self.collection.add_node('exp_id_1')
self.save_new_valid_exploration(
'exp_id_1', 'user@example.com', end_state_name='End')
collection_node1 = self.collection.get_node('exp_id_1')
collection_node1.update_prerequisite_skills(['skill1a'])
self._assert_validation_error(
'Expected to have at least 1 exploration with no prerequisite '
'skills.')
def test_metadata_validation(self):
self.collection.title = ''
self.collection.objective = ''
self.collection.category = ''
self.collection.nodes = []
self.collection.add_node('exp_id_1')
# Having no title is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'A title must be specified for the collection.')
self.collection.title = 'A title'
# Having no objective is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'An objective must be specified for the collection.')
self.collection.objective = 'An objective'
# Having no category is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'A category must be specified for the collection.')
self.collection.category = 'A category'
# Now the collection passes both strict and non-strict validation.
self.collection.validate(strict=False)
self.collection.validate(strict=True)
def test_collection_completability_validation(self):
# Add another exploration, but make it impossible to reach exp_id_1.
self.collection.add_node('exp_id_1')
collection_node1 = self.collection.get_node('exp_id_1')
collection_node1.update_prerequisite_skills(['skill0a'])
self._assert_validation_error(
'Some explorations are unreachable from the initial explorations')
# Connecting the two explorations should lead to clean validation.
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.update_acquired_skills(['skill0a'])
self.collection.validate()
def test_collection_node_exploration_id_validation(self):
# Validate CollectionNode's exploration_id.
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.exploration_id = 2
self._assert_validation_error('Expected exploration ID to be a string')
def test_collection_node_prerequisite_skills_validation(self):
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.prerequisite_skills = {}
self._assert_validation_error(
'Expected prerequisite_skills to be a list')
collection_node0.prerequisite_skills = ['skill0a', 'skill0a']
self._assert_validation_error(
'The prerequisite_skills list has duplicate entries')
collection_node0.prerequisite_skills = ['skill0a', 2]
self._assert_validation_error(
'Expected all prerequisite skills to be strings')
def test_collection_node_acquired_skills_validation(self):
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.acquired_skills = {}
self._assert_validation_error('Expected acquired_skills to be a list')
collection_node0.acquired_skills = ['skill0a', 'skill0a']
self._assert_validation_error(
'The acquired_skills list has duplicate entries')
collection_node0.acquired_skills = ['skill0a', 2]
self._assert_validation_error(
'Expected all acquired skills to be strings')
def test_collection_node_skills_validation(self):
collection_node0 = self.collection.get_node('exp_id_0')
# Ensure prerequisite_skills and acquired_skills do not overlap.
collection_node0.prerequisite_skills = [
'skill0a', 'skill0b', 'skill0c']
collection_node0.acquired_skills = [
'skill0z', 'skill0b', 'skill0c', 'skill0d']
self._assert_validation_error(
'There are some skills which are both required for exploration '
'exp_id_0 and acquired after playing it: [skill0b, skill0c]')
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = collection_domain.Collection.create_default_collection('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = collection_domain.Collection.create_default_collection('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = collection_domain.Collection.create_default_collection(
'abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_collection_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
collection.
"""
self.save_new_valid_exploration(
'0', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.create_default_collection(
'0', title='title', category='category', objective='objective')
collection_dict = collection.to_dict()
collection_from_dict = collection_domain.Collection.from_dict(
collection_dict)
self.assertEqual(collection_from_dict.to_dict(), collection_dict)
def test_add_delete_node(self):
"""Test that add_node and delete_node fail in the correct situations.
"""
collection = collection_domain.Collection.create_default_collection(
'0')
self.assertEqual(len(collection.nodes), 0)
collection.add_node('test_exp')
self.assertEqual(len(collection.nodes), 1)
with self.assertRaisesRegexp(
ValueError,
'Exploration is already part of this collection: test_exp'
):
collection.add_node('test_exp')
collection.add_node('another_exp')
self.assertEqual(len(collection.nodes), 2)
collection.delete_node('another_exp')
self.assertEqual(len(collection.nodes), 1)
with self.assertRaisesRegexp(
ValueError,
'Exploration is not part of this collection: another_exp'
):
collection.delete_node('another_exp')
collection.delete_node('test_exp')
self.assertEqual(len(collection.nodes), 0)
def test_skills_property(self):
collection = collection_domain.Collection.create_default_collection(
'0')
self.assertEqual(collection.skills, [])
collection.add_node('exp_id_0')
collection.add_node('exp_id_1')
collection.get_node('exp_id_0').update_acquired_skills(
['skill0a'])
collection.get_node('exp_id_1').update_prerequisite_skills(
['skill0a'])
collection.get_node('exp_id_1').update_acquired_skills(
['skill1b', 'skill1c'])
self.assertEqual(collection.skills, ['skill0a', 'skill1b', 'skill1c'])
# Skills should be unique, even if they are duplicated across multiple
# acquired and prerequisite skill lists.
collection.add_node('exp_id_2')
collection.get_node('exp_id_2').update_acquired_skills(
['skill0a', 'skill1c'])
self.assertEqual(collection.skills, ['skill0a', 'skill1b', 'skill1c'])
class ExplorationGraphUnitTests(test_utils.GenericTestBase):
"""Test the skill graph structure within a collection."""
def test_initial_explorations(self):
"""Any exploration without prerequisites should be an initial
exploration.
"""
collection = collection_domain.Collection.create_default_collection(
'collection_id')
# If there are no explorations in the collection, there can be no
# initial explorations.
self.assertEqual(collection.nodes, [])
self.assertEqual(collection.init_exploration_ids, [])
# A freshly added exploration will be an initial one.
collection.add_node('exp_id_0')
self.assertEqual(collection.init_exploration_ids, ['exp_id_0'])
# Having prerequisites will make an exploration no longer initial.
collection.add_node('exp_id_1')
self.assertEqual(len(collection.nodes), 2)
collection.get_node('exp_id_1').update_prerequisite_skills(
['skill0a'])
self.assertEqual(collection.init_exploration_ids, ['exp_id_0'])
# There may be multiple initial explorations.
collection.add_node('exp_id_2')
self.assertEqual(
collection.init_exploration_ids, ['exp_id_0', 'exp_id_2'])
def test_next_explorations(self):
"""Explorations should be suggested based on prerequisite and
acquired skills, as well as which explorations have already been played
in the collection.
"""
collection = collection_domain.Collection.create_default_collection(
'collection_id')
# There should be no next explorations for an empty collection.
self.assertEqual(collection.get_next_exploration_ids([]), [])
# If a new exploration is added, the next exploration IDs should be the
# same as the initial explorations.
collection.add_node('exp_id_1')
self.assertEqual(collection.get_next_exploration_ids([]), ['exp_id_1'])
self.assertEqual(
collection.init_exploration_ids,
collection.get_next_exploration_ids([]))
# Completing the only exploration of the collection should lead to no
# available explorations thereafter. This test is done without any
# prerequisite or acquired skill lists.
self.assertEqual(collection.get_next_exploration_ids(['exp_id_1']), [])
# If the only exploration in the collection has a prerequisite skill,
# there are no explorations left to do.
collection_node1 = collection.get_node('exp_id_1')
collection_node1.update_prerequisite_skills(['skill0a'])
self.assertEqual(collection.get_next_exploration_ids([]), [])
# If another exploration has been added with a prerequisite that is the
# same as an acquired skill of another exploration and the exploration
# giving that skill is completed, then the first exploration should be
# the next one to complete.
collection.add_node('exp_id_2')
collection_node2 = collection.get_node('exp_id_2')
collection_node1.update_acquired_skills(['skill1b'])
collection_node2.update_prerequisite_skills(['skill1b'])
self.assertEqual(collection.get_next_exploration_ids([]), [])
self.assertEqual(collection.get_next_exploration_ids(
['exp_id_1']), ['exp_id_2'])
# If another exploration is added that has no prerequisites, the
# learner will be able to get to exp_id_1. exp_id_2 should not be
# suggested to be completed unless exp_id_1 is thereafter completed.
collection.add_node('exp_id_0')
collection_node0 = collection.get_node('exp_id_0')
collection_node0.update_acquired_skills(['skill0a'])
self.assertEqual(
collection.get_next_exploration_ids([]), ['exp_id_0'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0']), ['exp_id_1'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0', 'exp_id_1']),
['exp_id_2'])
# There may be multiple branches of initial suggested explorations.
collection.add_node('exp_id_3')
self.assertEqual(
collection.get_next_exploration_ids([]), ['exp_id_0', 'exp_id_3'])
# There may also be multiple suggested explorations at other points,
# depending on which explorations the learner has completed.
collection_node3 = collection.get_node('exp_id_3')
collection_node3.update_prerequisite_skills(['skill0c'])
collection_node0.update_acquired_skills(['skill0a', 'skill0c'])
self.assertEqual(
collection.get_next_exploration_ids([]), ['exp_id_0'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0']),
['exp_id_1', 'exp_id_3'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0', 'exp_id_3']),
['exp_id_1'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0', 'exp_id_1']),
['exp_id_2', 'exp_id_3'])
self.assertEqual(
collection.get_next_exploration_ids(
['exp_id_0', 'exp_id_1', 'exp_id_2']), ['exp_id_3'])
# If all explorations have been completed, none should be suggested.
self.assertEqual(
collection.get_next_exploration_ids(
['exp_id_0', 'exp_id_1', 'exp_id_2', 'exp_id_3']), [])
def test_next_explorations_in_sequence(self):
collection = collection_domain.Collection.create_default_collection(
'collection_id')
exploration_id = 'exp_id_0'
collection.add_node(exploration_id)
# Completing the only exploration of the collection should lead to no
# available explorations thereafter.
self.assertEqual(
collection.get_next_exploration_ids_in_sequence(exploration_id), [])
# If the current exploration has no acquired skills, a list of all
# explorations with no prerequisite skills should be returned.
collection.add_node('exp_id_1')
collection.add_node('exp_id_2')
self.assertEqual(
collection.get_next_exploration_ids_in_sequence(exploration_id),
['exp_id_1', 'exp_id_2'])
# If only one exploration in the collection has a prerequisite skill
# that is included in the user's learned skills, only that exploration
# should be returned.
collection_node0 = collection.get_node('exp_id_0')
collection_node1 = collection.get_node('exp_id_1')
collection_node0.update_acquired_skills(['skill1a'])
collection_node1.update_prerequisite_skills(['skill1a'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence(exploration_id),
['exp_id_1'])
# Given a chain of explorations in a collections where each
# exploration's acquired skills are the following exploration's
# prerequisite skills, each exploration should return the following
# exploration as a recommendation. The last exploration should
# return an empty list.
collection.add_node('exp_id_3')
collection_node2 = collection.get_node('exp_id_2')
collection_node3 = collection.get_node('exp_id_3')
collection_node1.update_acquired_skills(['skill2a'])
collection_node2.update_acquired_skills(['skill3a'])
collection_node0.update_prerequisite_skills([])
collection_node2.update_prerequisite_skills(['skill2a'])
collection_node3.update_prerequisite_skills(['skill3a'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_0'),
['exp_id_1'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_1'),
['exp_id_2'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_2'),
['exp_id_3'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_3'),
[])
def test_next_explorations_with_invalid_exploration_ids(self):
collection = collection_domain.Collection.create_default_collection(
'collection_id')
collection.add_node('exp_id_1')
# There should be one suggested exploration to complete by default.
self.assertEqual(collection.get_next_exploration_ids([]), ['exp_id_1'])
# If an invalid exploration ID is passed to get_next_exploration_ids(),
# it should be ignored. This tests the situation where an exploration
# is deleted from a collection after being completed by a user.
self.assertEqual(
collection.get_next_exploration_ids(['fake_exp_id']), ['exp_id_1'])
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of collections from YAML files."""
COLLECTION_ID = 'a_collection_id'
EXPLORATION_ID = 'an_exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
self.save_new_valid_exploration(
self.EXPLORATION_ID, 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID, title='A title', category='A category',
objective='An objective')
collection.add_node(self.EXPLORATION_ID)
self.assertEqual(len(collection.nodes), 1)
collection_node = collection.get_node(self.EXPLORATION_ID)
collection_node.update_acquired_skills(['Skill0a', 'Skill0b'])
collection.validate()
yaml_content = collection.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
collection2 = collection_domain.Collection.from_yaml(
'collection2', yaml_content)
self.assertEqual(len(collection2.nodes), 1)
yaml_content_2 = collection2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
# Should not be able to create a collection from no YAML content.
with self.assertRaises(Exception):
collection_domain.Collection.from_yaml('collection3', None)
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""category: A category
nodes:
- acquired_skills:
- Skill1
- Skill2
exploration_id: Exp1
prerequisite_skills: []
objective: ''
schema_version: 1
title: A title
""")
YAML_CONTENT_V2 = ("""category: A category
language_code: en
nodes:
- acquired_skills:
- Skill1
- Skill2
exploration_id: Exp1
prerequisite_skills: []
objective: ''
schema_version: 2
tags: []
title: A title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V1
_LATEST_YAML_CONTENT = YAML_CONTENT_V2
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V1)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V2)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
| amgowano/oppia | core/domain/collection_domain_test.py | Python | apache-2.0 | 25,252 | 0.000158 |
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
from ezdxf.entities.dxfgfx import add_entity, replace_entity
from ezdxf.entities import Point
@pytest.fixture(scope="module")
def msp():
return ezdxf.new().modelspace()
@pytest.fixture(scope="module")
def db(msp):
return msp.entitydb
def test_add_entity(msp, db):
point = msp.add_point((0, 0))
new_point = Point.new(dxfattribs={"location": (3, 3)})
add_entity(new_point, msp)
assert point in msp
assert point.dxf.handle in db
assert new_point in msp
assert new_point.dxf.handle in db
assert point.dxf.handle != new_point.dxf.handle
def test_replace_entity(msp, db):
point = msp.add_point((0, 0))
handle = point.dxf.handle
new_point = Point.new(dxfattribs={"location": (3, 3)})
replace_entity(point, new_point, msp)
assert point.is_alive is False
assert new_point in msp
assert new_point.dxf.handle in db
assert new_point.dxf.handle == handle
def test_replace_entity_without_layout(msp, db):
point = Point.new(dxfattribs={"location": (3, 3)})
db.add(point)
handle = point.dxf.handle
assert point not in msp
assert point.dxf.handle in db
new_point = Point.new(dxfattribs={"location": (3, 3)})
replace_entity(point, new_point, msp)
assert point.is_alive is False
assert new_point not in msp
assert new_point.dxf.handle in db
assert new_point.dxf.handle == handle
def test_convert_circle_to_ellipse(msp, db):
circle = msp.add_circle(center=(3, 3), radius=2)
ellipse = circle.to_ellipse(replace=False)
assert circle.dxf.handle in db
assert ellipse.dxftype() == "ELLIPSE"
assert ellipse.dxf.handle in db
assert circle in msp
assert ellipse in msp
def test_replace_circle_by_ellipse(msp, db):
circle = msp.add_circle(center=(3, 3), radius=2)
circle_handle = circle.dxf.handle
ellipse = circle.to_ellipse(replace=True)
assert circle.is_alive is False
assert ellipse.dxftype() == "ELLIPSE"
assert ellipse.dxf.handle in db
assert ellipse.dxf.handle == circle_handle
assert ellipse in msp
def test_convert_circle_to_spline(msp, db):
circle = msp.add_circle(center=(3, 3), radius=2)
spline = circle.to_spline(replace=False)
assert circle.dxf.handle in db
assert spline.dxftype() == "SPLINE"
assert spline.dxf.handle in db
assert circle in msp
assert spline in msp
def test_replace_circle_by_spline(msp, db):
circle = msp.add_circle(center=(3, 3), radius=2)
circle_handle = circle.dxf.handle
spline = circle.to_spline(replace=True)
assert circle.is_alive is False
assert spline.dxftype() == "SPLINE"
assert spline.dxf.handle in db
assert spline.dxf.handle == circle_handle
assert spline in msp
def test_convert_ellipse_to_spline(msp, db):
ellipse = msp.add_ellipse(center=(3, 3), major_axis=(2, 0), ratio=0.5)
spline = ellipse.to_spline(replace=False)
assert ellipse.dxf.handle in db
assert spline.dxftype() == "SPLINE"
assert spline.dxf.handle in db
assert ellipse in msp
assert spline in msp
def test_replace_ellipse_by_spline(msp, db):
ellipse = msp.add_ellipse(center=(3, 3), major_axis=(2, 0), ratio=0.5)
ellipse_handle = ellipse.dxf.handle
spline = ellipse.to_spline(replace=True)
assert ellipse.is_alive is False
assert spline.dxftype() == "SPLINE"
assert spline.dxf.handle in db
assert spline.dxf.handle == ellipse_handle
assert spline in msp
if __name__ == "__main__":
pytest.main([__file__])
| mozman/ezdxf | tests/test_02_dxf_graphics/test_243_replace_entity.py | Python | mit | 3,603 | 0 |
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Profile(models.Model):
"""
Extension for the user class
"""
user = models.OneToOneField(User)
full_name = models.CharField(verbose_name=_("Full name"), max_length=128, null=True)
change_password = models.BooleanField(default=False, help_text=_("User must change password on next login"))
| ewheeler/tracpro | tracpro/profiles/models.py | Python | bsd-3-clause | 509 | 0.003929 |
# $Id$
# installer for pmon
# Copyright 2014 Matthew Wall
from setup import ExtensionInstaller
def loader():
return ProcessMonitorInstaller()
class ProcessMonitorInstaller(ExtensionInstaller):
def __init__(self):
super(ProcessMonitorInstaller, self).__init__(
version="0.2",
name='pmon',
description='Collect and display process memory usage.',
author="Matthew Wall",
author_email="mwall@users.sourceforge.net",
process_services='user.pmon.ProcessMonitor',
config={
'ProcessMonitor': {
'data_binding': 'pmon_binding',
'process': 'weewxd'},
'DataBindings': {
'pmon_binding': {
'database': 'pmon_sqlite',
'table_name': 'archive',
'manager': 'weewx.manager.DaySummaryManager',
'schema': 'user.pmon.schema'}},
'Databases': {
'pmon_sqlite': {
'database_name': 'pmon.sdb',
'driver': 'weedb.sqlite'}},
'StdReport': {
'pmon': {
'skin': 'pmon',
'HTML_ROOT': 'pmon'}}},
files=[('bin/user', ['bin/user/pmon.py']),
('skins/pmon', ['skins/pmon/skin.conf',
'skins/pmon/index.html.tmpl'])]
)
| sai9/weewx-gitsvn | extensions/pmon/install.py | Python | gpl-3.0 | 1,514 | 0.001321 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# murano documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
extensions = [
'os_api_ref',
'openstackdocstheme'
]
html_theme = 'openstackdocs'
html_theme_options = {
"sidebar_mode": "toc",
}
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/murano'
openstackdocs_bug_project = 'murano'
openstackdocs_bug_tag = 'api-ref'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2016-present, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'muranodoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Murano.tex', u'OpenStack Application Catalog API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| openstack/murano | api-ref/source/conf.py | Python | apache-2.0 | 6,670 | 0 |
"""Tools for working with virtualenv environments"""
import os
import sys
import subprocess
from pip.exceptions import BadCommand
from pip.log import logger
def restart_in_venv(venv, base, site_packages, args):
"""
Restart this script using the interpreter in the given virtual environment
"""
if base and not os.path.isabs(venv) and not venv.startswith('~'):
base = os.path.expanduser(base)
# ensure we have an abs basepath at this point:
# a relative one makes no sense (or does it?)
if os.path.isabs(base):
venv = os.path.join(base, venv)
if venv.startswith('~'):
venv = os.path.expanduser(venv)
if not os.path.exists(venv):
try:
import virtualenv
except ImportError:
print 'The virtual environment does not exist: %s' % venv
print 'and virtualenv is not installed, so a new environment cannot be created'
sys.exit(3)
print 'Creating new virtualenv environment in %s' % venv
virtualenv.logger = logger
logger.indent += 2
virtualenv.create_environment(venv, site_packages=site_packages)
if sys.platform == 'win32':
python = os.path.join(venv, 'Scripts', 'python.exe')
# check for bin directory which is used in buildouts
if not os.path.exists(python):
python = os.path.join(venv, 'bin', 'python.exe')
else:
python = os.path.join(venv, 'bin', 'python')
if not os.path.exists(python):
python = venv
if not os.path.exists(python):
raise BadCommand('Cannot find virtual environment interpreter at %s' % python)
base = os.path.dirname(os.path.dirname(python))
file = os.path.join(os.path.dirname(__file__), 'runner.py')
if file.endswith('.pyc'):
file = file[:-1]
proc = subprocess.Popen(
[python, file] + args + [base, '___VENV_RESTART___'])
proc.wait()
sys.exit(proc.returncode)
| BadDNA/anolis | web/env/lib/python2.6/site-packages/pip-0.7.2-py2.6.egg/pip/venv.py | Python | bsd-3-clause | 1,972 | 0.001521 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
# Configure the baseURL
baseUrl = "https://www.expedia.es"
# Create a webDriver instance and maximize window
driver = webdriver.Firefox()
driver.maximize_window()
# Navigage to URL and put a 10 seconds implicit wait
driver.get(baseUrl)
driver.implicitly_wait(10)
# Find and click on element "Flights"
# Find departure textbox and type "Barcelona"
# Find departure textbox and type "Madrid"
# Find departure time and type "23/11/2017"
# Close Calendar
# Find the "Find" button and click on
# Quit driver
| twiindan/selenium_lessons | 04_Selenium/exercices/expedia.py | Python | apache-2.0 | 654 | 0.007645 |
# Copyright (c) 2015 Aaron Kehrer
# Licensed under the terms of the MIT License
# (see fiddle/__init__.py for details)
# Import standard library modules
import os
# Import additional modules
import chardet
from PyQt4 import QtCore, QtGui
from fiddle.controllers.Editors import *
from fiddle.config import FILE_TYPES, PLATFORM
# An iterator to update as the user creates new files
new_file_iter = 1
class FiddleTabWidget(QtGui.QTabWidget):
def __init__(self, parent=None):
super(FiddleTabWidget, self).__init__(parent)
self.parent = parent
self.setAcceptDrops(True)
self.setTabsClosable(True)
self.setMovable(True)
self.setElideMode(QtCore.Qt.ElideRight)
self.setMinimumSize(QtCore.QSize(800, 300))
self.setDocumentMode(False)
self.setAutoFillBackground(False)
self.setTabShape(QtGui.QTabWidget.Rounded)
self.setCurrentIndex(-1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(3)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
def dragEnterEvent(self, e):
"""
For drag-and-drop we need to accept drag enter events
"""
e.accept()
def dragMoveEvent(self, e):
"""
For drag-and-drop we need to accept drag move events
http://qt-project.org/forums/viewthread/3093
"""
e.accept()
def dropEvent(self, e):
"""
Handle the drop
http://qt-project.org/wiki/Drag_and_Drop_of_files
"""
# dropped files are file:// urls
if e.mimeData().hasUrls():
self._insert_list_of_files(e.mimeData().urls())
def _insert_list_of_files(self, file_list):
for filepath in file_list:
if filepath.isLocalFile():
if 'win32' in PLATFORM:
# mimedata path includes a leading slash that confuses copyfile on windows
# http://stackoverflow.com/questions/2144748/is-it-safe-to-use-sys-platform-win32-check-on-64-bit-python
fpath = filepath.path()[1:]
else:
# not windows
fpath = filepath.path()
self.parent.open_filepath(fpath)
class FiddleTabFile(QtGui.QWidget):
editor_changed = QtCore.pyqtSignal()
cursor_changed = QtCore.pyqtSignal(int, int)
find_wrapped = QtCore.pyqtSignal()
def __init__(self, parent=None, filepath=None):
super(FiddleTabFile, self).__init__(parent)
self._filepath = None
self._saved = True
self.basepath = None
self.filename = None
self.extension = None
self.encoding = 'utf-8' # Default to UTF-8 encoding
# Set the layout and insert the editor
self.editor = None
self.setLayout(QtGui.QVBoxLayout())
self.layout().setMargin(0)
self.layout().setSpacing(0)
# Find/Replace
self.find_expr = ''
self.find_forward = False
self.found_first = False
self.first_found = (0, 0) # line, col
self.filepath = filepath
self.watcher = None
@property
def filepath(self):
return self._filepath
@filepath.setter
def filepath(self, path):
global new_file_iter
if path is not None:
self._filepath = path
self.basepath, self.filename = os.path.split(path)
_, ext = os.path.splitext(path)
self.extension = ext.lower()
with open(path, 'rb') as fp:
data = fp.read()
enc = chardet.detect(data)['encoding']
self.encoding = enc if enc is not None else 'utf-8'
if '.htm' in self.extension:
self.insert_editor(HTMLEditor(parent=self))
elif self.extension == '.js':
self.insert_editor(JavascriptEditor(parent=self))
elif self.extension == '.css':
self.insert_editor(CSSEditor(parent=self))
elif self.extension == '.py':
self.insert_editor(PythonEditor(parent=self))
else:
self.insert_editor(BaseEditor(parent=self))
try:
self.editor.setText(data.decode(self.encoding))
except TypeError:
self.editor.setText('')
self._saved = True
else:
self.basepath = None
self.filename = 'new_{}.py'.format(new_file_iter)
self.extension = '.py'
self._filepath = os.path.join(os.path.expanduser('~'), self.filename)
self.insert_editor(PythonEditor(parent=self))
new_file_iter += 1
self._saved = False
@property
def saved(self):
return self._saved
@saved.setter
def saved(self, state):
self._saved = state
self.editor_changed.emit()
def insert_editor(self, editor):
if self.editor is not None and self.layout().indexOf(self.editor) >= 0:
self.layout().removeWidget(self.editor)
self.editor.deleteLater()
self.editor = None
self.editor = editor
self.editor.textChanged.connect(self._set_text_changed)
self.editor.cursorPositionChanged.connect(self._cursor_position_changed)
self.layout().addWidget(self.editor)
def save(self):
if self.basepath is None:
self.save_as()
else:
self._write_file(self.filepath)
self.saved = True
def save_as(self):
path = self.basepath or os.path.join(os.path.expanduser('~'), self.filename)
filepath = QtGui.QFileDialog.getSaveFileName(None, None, path, ';;'.join(FILE_TYPES[1:]))
if filepath is not '':
self._write_file(filepath)
self.filepath = filepath
self.saved = True
def find_text(self, expr, re, cs, wo, wrap,
in_select=False, forward=True, line=-1, index=-1, show=True, posix=False):
"""
Find the string expr and return true if expr was found, otherwise returns false.
If expr is found it becomes the current selection. This is a convenience function around the find features
built in to QsciScintilla.
http://pyqt.sourceforge.net/Docs/QScintilla2/classQsciScintilla.html
:param expr:
:param re:
:param cs:
:param wo:
:param wrap:
:param in_select:
:param forward:
:param line:
:param index:
:param show:
:param posix:
:return:
"""
# Check for new expression
if expr != self.find_expr:
self.find_expr = expr
self.found_first = False
# Check for change in direction
if forward != self.find_forward:
if self.editor.hasSelectedText():
line, idx, _, _ = self.editor.getSelection()
self.editor.setCursorPosition(line, idx)
self.find_forward = forward
self.found_first = False
if self.found_first:
f = self.editor.findNext()
c = self.editor.getCursorPosition()
if c[0] <= self.first_found[0] and forward:
self.find_wrapped.emit()
elif c[0] >= self.first_found[0] and not forward:
self.find_wrapped.emit()
return f
elif in_select:
res = self.editor.findFirstInSelection(expr, re, cs, wo, forward, show, posix)
if res:
self.found_first = True
self.first_found = self.editor.getCursorPosition()
return True
else:
self.found_first = False
return False
else:
res = self.editor.findFirst(expr, re, cs, wo, wrap, forward, line, index, show, posix)
if res:
self.found_first = True
self.first_found = self.editor.getCursorPosition()
return True
else:
self.found_first = False
return False
def replace_text(self, old_expr, new_text, re, cs, wo, wrap,
in_select=False, forward=True, line=-1, index=-1, show=True, posix=False):
if self.found_first:
# Replace the text and move to the next occurrence
self.editor.replace(new_text)
self.editor.findNext()
else:
# Find the first occurrence
self.find_text(old_expr, re, cs, wo, wrap, in_select, forward, line, index, show, posix)
def replace_all_text(self, old_expr, new_text, re, cs, wo, in_select=False):
i = 0
if in_select:
if self.editor.findFirstInSelection(old_expr, re, cs, wo, False):
self.editor.replace(new_text)
i = 1
while self.editor.findNext():
self.editor.replace(new_text)
i += 1
else:
# Start from the beginning of the document and work to the end
if self.editor.findFirst(old_expr, re, cs, wo, False, True, 0, 0):
self.editor.replace(new_text)
i = 1
while self.editor.findNext():
self.editor.replace(new_text)
i += 1
return i
def _write_file(self, filepath):
with open(filepath, 'wb') as fp:
fp.write(bytes(self.editor.text(), self.encoding))
def _set_text_changed(self):
self.editor.autoCompleteFromAll()
self.saved = False
def _cursor_position_changed(self, line, idx):
self.cursor_changed.emit(line, idx)
| akehrer/fiddle | fiddle/controllers/FiddleTabWidget.py | Python | gpl-3.0 | 9,860 | 0.001724 |
# range_ex.py Test of asynchronous mqtt client with clean session False.
# Extended version publishes SSID
# (C) Copyright Peter Hinch 2017-2019.
# Released under the MIT licence.
# Public brokers https://github.com/mqtt/mqtt.github.io/wiki/public_brokers
# This demo is for wireless range tests. If OOR the red LED will light.
# In range the blue LED will pulse for each received message.
# Uses clean sessions to avoid backlog when OOR.
# red LED: ON == WiFi fail
# blue LED pulse == message received
# Publishes connection statistics.
from mqtt_as import MQTTClient, config
from config import wifi_led, blue_led
import uasyncio as asyncio
import network
import gc
TOPIC = 'shed' # For demo publication and last will use same topic
outages = 0
rssi = -199 # Effectively zero signal in dB.
async def pulse(): # This demo pulses blue LED each time a subscribed msg arrives.
blue_led(True)
await asyncio.sleep(1)
blue_led(False)
def sub_cb(topic, msg, retained):
print((topic, msg))
asyncio.create_task(pulse())
# The only way to measure RSSI is via scan(). Alas scan() blocks so the code
# causes the obvious uasyncio issues.
async def get_rssi():
global rssi
s = network.WLAN()
ssid = config['ssid'].encode('UTF8')
while True:
try:
rssi = [x[3] for x in s.scan() if x[0] == ssid][0]
except IndexError: # ssid not found.
rssi = -199
await asyncio.sleep(30)
async def wifi_han(state):
global outages
wifi_led(not state) # Light LED when WiFi down
if state:
print('We are connected to broker.')
else:
outages += 1
print('WiFi or broker is down.')
await asyncio.sleep(1)
async def conn_han(client):
await client.subscribe('foo_topic', 1)
async def main(client):
try:
await client.connect()
except OSError:
print('Connection failed.')
return
n = 0
s = '{} repubs: {} outages: {} rssi: {}dB free: {}bytes'
while True:
await asyncio.sleep(5)
gc.collect()
m = gc.mem_free()
print('publish', n)
# If WiFi is down the following will pause for the duration.
await client.publish(TOPIC, s.format(n, client.REPUB_COUNT, outages, rssi, m), qos = 1)
n += 1
# Define configuration
config['subs_cb'] = sub_cb
config['wifi_coro'] = wifi_han
config['will'] = (TOPIC, 'Goodbye cruel world!', False, 0)
config['connect_coro'] = conn_han
config['keepalive'] = 120
# Set up client. Enable optional debug statements.
MQTTClient.DEBUG = True
client = MQTTClient(config)
asyncio.create_task(get_rssi())
try:
asyncio.run(main(client))
finally: # Prevent LmacRxBlk:1 errors.
client.close()
blue_led(True)
asyncio.new_event_loop()
| peterhinch/micropython-mqtt | mqtt_as/range_ex.py | Python | mit | 2,773 | 0.003967 |
# -*- coding: UTF-8 -*-
#--------------------------------------------------------------------------
# Copyright (c) : 2004 - 2007 Softwell sas - Milano
# Written by : Giovanni Porcari, Michele Bertoldi
# Saverio Porcari, Francesco Porcari , Francesco Cavazzana
#--------------------------------------------------------------------------
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
class GnrCustomWebPage(object):
def windowTitle(self):
return 'test remote'
def main(self, root, **kwargs):
bc = root.borderContainer()
top = bc.contentPane(region='top', height='100px')
top.button('Build', fire='build')
top.button('Add element', fire='add')
top.dataController("""var pane = genro.nodeById('remoteContent')
pane._('div',{height:'200px',width:'200px',background:'lightBlue',
border:'1px solid blue','float':'left',
remote:{'method':'test'}});
""", _fired="^add")
center = bc.contentPane(region='center').div(nodeId='remoteContent')
center.div().remote('test', _fired='^build')
def remote_test(self, pane, **kwargs):
print 'pippo'
pane.div('hello', height='40px', width='80px', background='lime') | poppogbr/genropy | packages/showcase/webpages/dev/remote.py | Python | lgpl-2.1 | 2,026 | 0.008391 |
from twisted.trial import unittest
from twisted.internet import defer
from duct.protocol.sflow import protocol
from duct.tests import globs
class Test(unittest.TestCase):
def test_decode(self):
proto = protocol.Sflow(globs.SFLOW_PACKET, '172.30.0.5')
self.assertTrue(proto.version == 5)
self.assertTrue(len(proto.samples) == 5)
| ducted/duct | duct/tests/test_sflow.py | Python | mit | 362 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.