text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# mock.py
# Test tools for mocking and patching.
# E-mail: fuzzyman AT voidspace DOT org DOT uk
#
# mock 1.0.1
# http://www.voidspace.org.uk/python/mock/
#
# Copyright (c) 2007-2013, Michael Foord & the mock team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
__all__ = (
'__version__',
'version_info',
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'CallableMixin',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
from functools import partial
import inspect
import pprint
import sys
try:
import builtins
except ImportError:
import __builtin__ as builtins
from types import ModuleType
import six
from six import wraps
# TODO(houglum): Adjust this section if we use a later version of mock.
# Manually specify version so that we don't need to rely on pbr (which works
# best when packages are installed via pip rather than direct download).
# This allows us to include mock in other projects which can be installed
# via methods other than pip (downloading a git repo, tarball, etc.).
__version__ = '2.0.0'
version_info = (2, 0, 0, 'final', 0)
import mock
try:
inspectsignature = inspect.signature
except AttributeError:
import funcsigs
inspectsignature = funcsigs.signature
# TODO: use six.
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
if six.PY2:
# Python 2's next() can't handle a non-iterator with a __next__ method.
_next = next
def next(obj, _next=_next):
if getattr(obj, '__next__', None):
return obj.__next__()
return _next(obj)
del _next
_builtins = set(name for name in dir(builtins) if not name.startswith('_'))
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
self = 'im_self'
builtin = '__builtin__'
if six.PY3:
self = '__self__'
builtin = 'builtins'
# NOTE: This FILTER_DIR is not used. The binding in mock.FILTER_DIR is.
FILTER_DIR = True
# Workaround for Python issue #12370
# Without this, the __class__ properties wouldn't be set correctly
_safe_super = super
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _get_signature_object(func, as_instance, eat_self):
"""
Given an arbitrary, possibly callable object, try to create a suitable
signature object.
Return a (reduced func, signature) tuple, or None.
"""
if isinstance(func, ClassTypes) and not as_instance:
# If it's a type and should be modelled as a type, use __init__.
try:
func = func.__init__
except AttributeError:
return None
# Skip the `self` argument in __init__
eat_self = True
elif not isinstance(func, FunctionTypes):
# If we really want to model an instance of the passed type,
# __call__ should be looked up, not __init__.
try:
func = func.__call__
except AttributeError:
return None
if eat_self:
sig_func = partial(func, None)
else:
sig_func = func
try:
return func, inspectsignature(sig_func)
except ValueError:
# Certain callable types are not supported by inspect.signature()
return None
def _check_signature(func, mock, skipfirst, instance=False):
sig = _get_signature_object(func, instance, skipfirst)
if sig is None:
return
func, sig = sig
def checksig(_mock_self, *args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
try:
funcopy.__text_signature__ = func.__text_signature__
except AttributeError:
pass
# we explicitly don't copy func.__dict__ into this copy as it would
# expose original attributes that should be mocked
try:
funcopy.__module__ = func.__module__
except AttributeError:
pass
try:
funcopy.__defaults__ = func.__defaults__
except AttributeError:
pass
try:
funcopy.__kwdefaults__ = func.__kwdefaults__
except AttributeError:
pass
if six.PY2:
funcopy.func_defaults = func.func_defaults
return
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
if six.PY3:
# *could* be broken by a class overriding __mro__ or __dict__ via
# a metaclass
for base in (obj,) + obj.__mro__:
if base.__dict__.get('__call__') is not None:
return True
else:
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _get_signature_object(original, instance, skipfirst)
if result is None:
return
func, sig = result
def checksig(*args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
six.exec_(src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(unittest.mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if six.PY2:
ClassTypes = (type, ClassType)
_allowed_names = set((
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
))
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
# Internal class to identify if we wrapped an iterator object or not.
class _MockIter(object):
def __init__(self, obj):
self.obj = iter(obj)
def __iter__(self):
return self
def __next__(self):
return next(self.obj)
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
_spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
if _eat_self is None:
_eat_self = parent is not None
self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
__dict__['_mock_unsafe'] = unsafe
if kwargs:
self.configure_mock(**kwargs)
_safe_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False,
_eat_self=False):
_spec_class = None
_spec_signature = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
res = _get_signature_object(spec,
_spec_as_instance, _eat_self)
_spec_signature = res and res[1]
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_spec_signature'] = _spec_signature
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
delegated = self._mock_delegate
if delegated is None:
return self._mock_side_effect
sf = delegated.side_effect
if (sf is not None and not callable(sf)
and not isinstance(sf, _MockIter) and not _is_exception(sf)):
sf = _MockIter(sf)
delegated.side_effect = sf
return sf
def __set_side_effect(self, value):
value = _try_iter(value)
delegated = self._mock_delegate
if delegated is None:
self._mock_side_effect = value
else:
delegated.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self, visited=None):
"Restore the mock object to its initial state."
if visited is None:
visited = []
if id(self) in visited:
return
visited.append(id(self))
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock(visited)
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock(visited)
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name in ('_mock_methods', '_mock_unsafe'):
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
if not self._mock_unsafe:
if name.startswith(('assert', 'assret')):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members."""
if not mock.FILTER_DIR and getattr(object, '__dir__', None):
# object.__dir__ is not in 2.7
return object.__dir__(self)
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if mock.FILTER_DIR:
# object.__dir__ is not in 2.7
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def _call_matcher(self, _call):
"""
Given a call (or simply a (args, kwargs) tuple), return a
comparison key suitable for matching with other calls.
This is a best effort method which relies on the spec's signature,
if available, or falls back on the arguments themselves.
"""
sig = self._spec_signature
if sig is not None:
if len(_call) == 2:
name = ''
args, kwargs = _call
else:
name, args, kwargs = _call
try:
return name, sig.bind(*args, **kwargs)
except TypeError as e:
e.__traceback__ = None
return e
else:
return _call
def assert_not_called(_mock_self):
"""assert that the mock was never called.
"""
self = _mock_self
if self.call_count != 0:
msg = ("Expected '%s' to not have been called. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
def assert_called(_mock_self):
"""assert that the mock was called at least once
"""
self = _mock_self
if self.call_count == 0:
msg = ("Expected '%s' to have been called." %
self._mock_name or 'mock')
raise AssertionError(msg)
def assert_called_once(_mock_self):
"""assert that the mock was called only once.
"""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to have been called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
def _error_message(cause):
msg = self._format_mock_failure_message(args, kwargs)
if six.PY2 and cause is not None:
# Tack on some diagnostics for Python without __cause__
msg = '%s\n%s' % (msg, str(cause))
return msg
expected = self._call_matcher((args, kwargs))
actual = self._call_matcher(self.call_args)
if expected != actual:
cause = expected if isinstance(expected, Exception) else None
six.raise_from(AssertionError(_error_message(cause)), cause)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to be called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
expected = [self._call_matcher(c) for c in calls]
cause = expected if isinstance(expected, Exception) else None
all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls)
if not any_order:
if expected not in all_calls:
six.raise_from(AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (_CallList(calls), self.mock_calls)
), cause)
return
all_calls = list(all_calls)
not_found = []
for kall in expected:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
six.raise_from(AssertionError(
'%r not all found in call list' % (tuple(not_found),)
), cause)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
expected = self._call_matcher((args, kwargs))
actual = [self._call_matcher(c) for c in self.call_args_list]
if expected not in actual:
cause = expected if isinstance(expected, Exception) else None
expected_string = self._format_mock_call_signature(args, kwargs)
six.raise_from(AssertionError(
'%s call not found' % expected_string
), cause)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_safe_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
_call = _Call((args, kwargs), two=True)
self.call_args = _call
self.call_args_list.append(_call)
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
if result is DEFAULT:
result = self.return_value
return result
ret_val = effect(*args, **kwargs)
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = []
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
extra_args = []
entered_patchers = []
exc_info = tuple()
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if name in _builtins and isinstance(target, ModuleType):
self.create = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not _callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and (not hasattr(self.target, self.attribute) or
self.attribute in ('__doc__', '__module__',
'__defaults__', '__annotations__',
'__kwdefaults__')):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.append(self)
return result
def stop(self):
"""Stop an active patch."""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
pass
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock will be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches. LIFO to unroll nested patches."""
for patch in reversed(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
# we added divmod and rdivmod here instead of numerics
# because there is no idivmod
"divmod rdivmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = (
"add sub mul matmul div floordiv mod lshift rshift and xor or pow"
)
if six.PY3:
numerics += ' truediv'
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if six.PY3:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set((
'__cmp__', '__getslice__', '__setslice__', '__coerce__', # <3.x
'__get__', '__set__', '__delete__', '__reversed__', '__missing__',
'__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__',
'__getstate__', '__setstate__', '__getformat__', '__setformat__',
'__repr__', '__dir__', '__subclasses__', '__format__',
))
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set((
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
))
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
self._mock_set_magics() # make magic work for kwargs in init
_safe_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics() # fix magic broken by upper level init
def _mock_set_magics(self):
these_magics = _magics
if getattr(self, "_mock_methods", None) is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
def encode_item(item):
if six.PY2 and isinstance(item, unicode):
return item.encode("utf-8")
else:
return item
kwargs_string = ', '.join([
'%s=%r' % (encode_item(key), value) for key, value in sorted(kwargs.items())
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
elif len_other == 2:
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
else:
return False
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def count(self, *args, **kwargs):
return self.__getattr__('count')(*args, **kwargs)
def index(self, *args, **kwargs):
return self.__getattr__('index')(*args, **kwargs)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
if _kwargs and instance:
_kwargs['_spec_as_instance'] = True
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_name = _kwargs.pop('name', _name)
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
skipfirst = _must_skip(spec, entry, is_type)
kwargs['_eat_self'] = skipfirst
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent,
**kwargs)
mock._mock_children[entry] = new
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
"""
Return whether we should skip the first argument on spec's `entry`
attribute.
"""
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
elif isinstance(getattr(result, '__get__', None), MethodWrapperTypes):
# Normal method => skip if looked up on type
# (if looked up on instance, self is already skipped)
return is_type
else:
return False
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# it is possible for objects to have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
)
MethodWrapperTypes = (
type(ANY.__eq__.__get__),
)
file_spec = None
def _iterate_read_data(read_data):
# Helper for mock_open:
# Retrieve lines from read_data via a generator so that separate calls to
# readline, read, and readlines are properly interleaved
sep = b'\n' if isinstance(read_data, bytes) else '\n'
data_as_list = [l + sep for l in read_data.split(sep)]
if data_as_list[-1] == sep:
# If the last line ended in a newline, the list comprehension will have an
# extra entry that's just a newline. Remove this.
data_as_list = data_as_list[:-1]
else:
# If there wasn't an extra newline by itself, then the file being
# emulated doesn't have a newline to end the last line remove the
# newline that our naive format() added
data_as_list[-1] = data_as_list[-1][:-1]
for line in data_as_list:
yield line
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` methoddline`, and `readlines` of the
file handle to return. This is an empty string by default.
"""
def _readlines_side_effect(*args, **kwargs):
if handle.readlines.return_value is not None:
return handle.readlines.return_value
return list(_state[0])
def _read_side_effect(*args, **kwargs):
if handle.read.return_value is not None:
return handle.read.return_value
return type(read_data)().join(_state[0])
def _readline_side_effect():
if handle.readline.return_value is not None:
while True:
yield handle.readline.return_value
for line in _state[0]:
yield line
global file_spec
if file_spec is None:
# set on first use
if six.PY3:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.__enter__.return_value = handle
_state = [_iterate_read_data(read_data), None]
handle.write.return_value = None
handle.read.return_value = None
handle.readline.return_value = None
handle.readlines.return_value = None
handle.read.side_effect = _read_side_effect
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
handle.readlines.side_effect = _readlines_side_effect
def reset_data(*args, **kwargs):
_state[0] = _iterate_read_data(read_data)
if handle.readline.side_effect == _state[1]:
# Only reset the side effect if the user hasn't overridden it.
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
return DEFAULT
mock.side_effect = reset_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
|
catapult-project/catapult
|
third_party/gsutil/third_party/mock/mock/mock.py
|
Python
|
bsd-3-clause
| 83,928 | 0.001513 |
import hashlib
from django.conf import settings
from django.core.cache import cache
from django.db.models import Count
from elasticsearch.exceptions import TransportError
from statsd import statsd
from kitsune.products.models import Topic
from kitsune.wiki.models import Document, DocumentMappingType
def topics_for(products, parent=False):
"""Returns a list of topics that apply to passed in products and topics.
:arg products: a list of Product instances
:arg parent: (optional) limit to topics with the given parent
"""
statsd.incr('wiki.facets.topics_for.db')
docs = Document.objects.filter(
locale=settings.WIKI_DEFAULT_LANGUAGE,
is_archived=False,
current_revision__isnull=False,
category__in=settings.IA_DEFAULT_CATEGORIES)
for product in products:
docs = docs.filter(products=product)
for product in products:
qs = Topic.objects.filter(product=product)
qs = (qs.filter(visible=True, document__in=docs)
.annotate(num_docs=Count('document'))
.distinct())
if parent or parent is None:
qs = qs.filter(parent=parent)
return qs
def documents_for(locale, topics=None, products=None):
"""Returns a tuple of lists of articles that apply to topics and products.
The first item in the tuple is the list of articles for the locale
specified. The second item is the list of fallback articles in en-US
that aren't localized to the specified locale. If the specified locale
is en-US, the second item will be None.
:arg locale: the locale
:arg topics: (optional) a list of Topic instances
:arg products: (optional) a list of Product instances
The articles are returned as a list of dicts with the following keys:
id
document_title
url
document_parent_id
"""
documents = _documents_for(locale, topics, products)
# For locales that aren't en-US, get the en-US documents
# to fill in for untranslated articles.
if locale != settings.WIKI_DEFAULT_LANGUAGE:
l10n_document_ids = [d['document_parent_id'] for d in documents if
'document_parent_id' in d]
en_documents = _documents_for(
locale=settings.WIKI_DEFAULT_LANGUAGE,
products=products,
topics=topics)
fallback_documents = [d for d in en_documents if
d['id'] not in l10n_document_ids]
else:
fallback_documents = None
return documents, fallback_documents
def _documents_for(locale, topics=None, products=None):
"""Returns a list of articles that apply to passed in topics and products.
"""
# First try to get the results from the cache
documents = cache.get(_documents_for_cache_key(
locale, topics, products))
if documents:
statsd.incr('wiki.facets.documents_for.cache')
return documents
try:
# Then try ES
documents = _es_documents_for(locale, topics, products)
cache.add(
_documents_for_cache_key(locale, topics, products),
documents)
statsd.incr('wiki.facets.documents_for.es')
except TransportError:
# Finally, hit the database (through cache machine)
# NOTE: The documents will be the same ones returned by ES
# but they won't be in the correct sort (by votes in the last
# 30 days). It is better to return them in the wrong order
# than not to return them at all.
documents = _db_documents_for(locale, topics, products)
statsd.incr('wiki.facets.documents_for.db')
return documents
def _es_documents_for(locale, topics=None, products=None):
"""ES implementation of documents_for."""
s = (DocumentMappingType.search()
.values_dict('id', 'document_title', 'url', 'document_parent_id',
'document_summary')
.filter(document_locale=locale, document_is_archived=False,
document_category__in=settings.IA_DEFAULT_CATEGORIES))
for topic in topics or []:
s = s.filter(topic=topic.slug)
for product in products or []:
s = s.filter(product=product.slug)
return list(s.order_by('-document_recent_helpful_votes')[:100])
def _db_documents_for(locale, topics=None, products=None):
"""DB implementation of documents_for."""
qs = Document.objects.filter(
locale=locale,
is_archived=False,
current_revision__isnull=False,
category__in=settings.IA_DEFAULT_CATEGORIES)
for topic in topics or []:
qs = qs.filter(topics=topic)
for product in products or []:
qs = qs.filter(products=product)
# Convert the results to a dicts to look like the ES results.
doc_dicts = []
for d in qs.distinct():
doc_dicts.append(dict(
id=d.id,
document_title=d.title,
url=d.get_absolute_url(),
document_parent_id=d.parent_id,
document_summary=d.current_revision.summary))
return doc_dicts
def _documents_for_cache_key(locale, topics, products):
m = hashlib.md5()
key = '{locale}:{topics}:{products}:new'.format(
locale=locale,
topics=','.join(sorted([t.slug for t in topics or []])),
products=','.join(sorted([p.slug for p in products or []])))
m.update(key)
return 'documents_for:%s' % m.hexdigest()
|
dbbhattacharya/kitsune
|
kitsune/wiki/facets.py
|
Python
|
bsd-3-clause
| 5,435 | 0 |
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
from .menu import ProposalsMenu
class ProposalsApp(CMSApp):
name = _("Proposals app")
urls = ["pyconde.proposals.urls"]
menus = [ProposalsMenu]
apphook_pool.register(ProposalsApp)
|
EuroPython/djep
|
pyconde/proposals/cms_app.py
|
Python
|
bsd-3-clause
| 327 | 0.003058 |
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from fife import fifechan
from fife.extensions.pychan.attrs import IntAttr, FloatAttr
from .widget import Widget
class Slider(Widget):
""" A slider widget
Use a callback to read out the slider value every time the marker
is moved.
New Attributes
==============
- orientation: 1 = horizontal, 0=vertical
- scale_start: float: default 0.0
- scale_end: float: default 1.0
- step_length: float: default scale_end/10
- marker_length: int: default 10
FIXME:
- update docstrings
"""
HORIZONTAL = fifechan.Slider.Horizontal
VERTICAL = fifechan.Slider.Vertical
ATTRIBUTES = Widget.ATTRIBUTES + [ IntAttr('orientation'),
FloatAttr('scale_start'),
FloatAttr('scale_end'),
FloatAttr('step_length'),
IntAttr('marker_length')
]
DEFAULT_HEXPAND = True
DEFAULT_VEXPAND = False
DEFAULT_SIZE = 10,10
DEFAULT_MIN_SIZE = 10,10
DEFAULT_SCALE_START = 0.0
DEFAULT_SCALE_END = 1.0
DEFAULT_STEP_LENGTH = 0.1
DEFAULT_MARKER_LENGTH = 10
DEFAULT_ORIENTATION = HORIZONTAL
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
fixed_size = None,
margins = None,
padding = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_color = None,
outline_color = None,
border_size = None,
outline_size = None,
position_technique = None,
is_focusable = None,
comment = None,
scale_start = None,
scale_end = None,
step_length = None,
marker_length = None,
orientation = None):
self.real_widget = fifechan.Slider(scale_start or self.DEFAULT_SCALE_START, scale_end or self.DEFAULT_SCALE_END)
self.orientation = self.DEFAULT_ORIENTATION
self.step_length = self.DEFAULT_STEP_LENGTH
self.marker_length = self.DEFAULT_MARKER_LENGTH
super(Slider, self).__init__(parent=parent,
name=name,
size=size,
min_size=min_size,
max_size=max_size,
fixed_size=fixed_size,
margins=margins,
padding=padding,
helptext=helptext,
position=position,
style=style,
hexpand=hexpand,
vexpand=vexpand,
font=font,
base_color=base_color,
background_color=background_color,
foreground_color=foreground_color,
selection_color=selection_color,
border_color=border_color,
outline_color=outline_color,
border_size=border_size,
outline_size=outline_size,
position_technique=position_technique,
is_focusable=is_focusable,
comment=comment)
if orientation is not None: self.orientation = orientation
if scale_start is not None: self.scale_start = scale_start
if scale_end is not None: self.scale_end = scale_end
if step_length is not None: self.step_length = step_length
if marker_length is not None: self.marker_length = marker_length
self.accepts_data = True
self._realSetData = self._setValue
self._realGetData = self._getValue
def clone(self, prefix):
sliderClone = Slider(None,
self._createNameWithPrefix(prefix),
self.size,
self.min_size,
self.max_size,
self.fixed_size,
self.margins,
self.padding,
self.helptext,
self.position,
self.style,
self.hexpand,
self.vexpand,
self.font,
self.base_color,
self.background_color,
self.foreground_color,
self.selection_color,
self.border_color,
self.outline_color,
self.border_size,
self.outline_size,
self.position_technique,
self.is_focusable,
self.comment,
self.scale_start,
self.scale_end,
self.step_length,
self.marker_length,
self.orientation)
return sliderClone
def _setScale(self, start, end):
"""setScale(self, double scaleStart, double scaleEnd)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScale(start, end)
def _getScaleStart(self):
"""getScaleStart(self) -> double"""
return self.real_widget.getScaleStart()
def _setScaleStart(self, start):
"""setScaleStart(self, double scaleStart)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
self.real_widget.setScaleStart(start)
scale_start = property(_getScaleStart, _setScaleStart)
def _getScaleEnd(self):
"""getScaleEnd(self) -> double"""
return self.real_widget.getScaleEnd()
def _setScaleEnd(self, end):
"""setScaleEnd(self, double scaleEnd)"""
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScaleEnd(end)
scale_end = property(_getScaleEnd, _setScaleEnd)
def _getValue(self):
"""getValue(self) -> double"""
return self.real_widget.getValue()
def _setValue(self, value):
"""setValue(self, double value)"""
if type(value) != float:
raise RuntimeError("Slider only accepts float values")
self.real_widget.setValue(value)
value = property(_getValue, _setValue)
def _setMarkerLength(self, length):
"""setMarkerLength(self, int length)"""
if type(length) != int:
raise RuntimeError("Slider only accepts int for Marker length")
self.real_widget.setMarkerLength(length)
def _getMarkerLength(self):
"""getMarkerLength(self) -> int"""
return self.real_widget.getMarkerLength()
marker_length = property(_getMarkerLength, _setMarkerLength)
def _setOrientation(self, orientation):
"""setOrientation(self, Orientation orientation)"""
self.real_widget.setOrientation(orientation)
def _getOrientation(self):
"""getOrientation(self) -> int"""
return self.real_widget.getOrientation()
orientation = property(_getOrientation, _setOrientation)
def _setStepLength(self, length):
"""setStepLength(self, double length)"""
if type(length) != float:
raise RuntimeError("Slider only accepts floats for step length")
self.real_widget.setStepLength(length)
def _getStepLength(self):
"""getStepLength(self) -> double"""
return self.real_widget.getStepLength()
step_length = property(_getStepLength, _setStepLength)
|
fifengine/fifengine
|
engine/python/fife/extensions/pychan/widgets/slider.py
|
Python
|
lgpl-2.1
| 7,472 | 0.054738 |
class Scheduler(object):
"""Define a domain."""
def __init__(self, matches, problem):
""".
PARAMETERS TYPE Potential Arguments
-----------------------------------------------
"""
self.matches = matches
schedule = []
self.allSchedules = []
for result in problem.getSolutions():
for k in result.keys():
course = k
local = result[k]
schedule.append((course, local))
self.allSchedules.append(schedule.pop())
|
renatorangel/scheduler
|
src/structures/scheduler.py
|
Python
|
apache-2.0
| 566 | 0 |
# Copyright (C) 2013-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class TestResult(object):
"""Base class to record and report test results.
Method record is to record the results of test case, and report
method is to report the recorded results by a given reporter.
"""
def record(self, parameter, result):
raise NotImplementedError("Abstract Method:record.")
def report(self, reporter, name):
"""Report the test results by reporter."""
raise NotImplementedError("Abstract Method:report.")
class SingleStatisticTestResult(TestResult):
"""Test results for the test case with a single statistic."""
def __init__(self):
super (SingleStatisticTestResult, self).__init__ ()
self.results = dict ()
def record(self, parameter, result):
if parameter in self.results:
self.results[parameter].append(result)
else:
self.results[parameter] = [result]
def report(self, reporter, name):
reporter.start()
for key in sorted(self.results.iterkeys()):
reporter.report(name, key, self.results[key])
reporter.end()
class ResultFactory(object):
"""A factory to create an instance of TestResult."""
def create_result(self):
"""Create an instance of TestResult."""
raise NotImplementedError("Abstract Method:create_result.")
class SingleStatisticResultFactory(ResultFactory):
"""A factory to create an instance of SingleStatisticTestResult."""
def create_result(self):
return SingleStatisticTestResult()
|
cupertinomiranda/binutils
|
gdb/testsuite/gdb.perf/lib/perftest/testresult.py
|
Python
|
gpl-2.0
| 2,216 | 0.002708 |
import os
PACKAGE_NAME = 'pyflux'
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(PACKAGE_NAME, parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('arma')
config.add_subpackage('ensembles')
config.add_subpackage('families')
config.add_subpackage('garch')
config.add_subpackage('gas')
config.add_subpackage('gpnarx')
config.add_subpackage('inference')
config.add_subpackage('output')
config.add_subpackage('ssm')
config.add_subpackage('tests')
config.add_subpackage('var')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
RJT1990/pyflux
|
pyflux/setup.py
|
Python
|
bsd-3-clause
| 792 | 0.002525 |
"""Tests covering utilities for integrating with the catalog service."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from collections import defaultdict
from datetime import timedelta
import mock
import six
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils.timezone import now
from opaque_keys.edx.keys import CourseKey
from course_modes.helpers import CourseMode
from course_modes.tests.factories import CourseModeFactory
from entitlements.tests.factories import CourseEntitlementFactory
from openedx.core.constants import COURSE_UNPUBLISHED
from openedx.core.djangoapps.catalog.cache import (
COURSE_PROGRAMS_CACHE_KEY_TPL,
PATHWAY_CACHE_KEY_TPL,
PROGRAM_CACHE_KEY_TPL,
PROGRAMS_BY_TYPE_CACHE_KEY_TPL,
SITE_PATHWAY_IDS_CACHE_KEY_TPL,
SITE_PROGRAM_UUIDS_CACHE_KEY_TPL
)
from openedx.core.djangoapps.catalog.models import CatalogIntegration
from openedx.core.djangoapps.catalog.tests.factories import (
CourseFactory,
CourseRunFactory,
PathwayFactory,
ProgramFactory,
ProgramTypeFactory
)
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.catalog.utils import (
child_programs,
course_run_keys_for_program,
is_course_run_in_program,
get_course_run_details,
get_course_runs,
get_course_runs_for_course,
get_currency_data,
get_localized_price_text,
get_owners_for_course,
get_pathways,
get_program_types,
get_programs,
get_programs_by_type,
get_visible_sessions_for_entitlement,
normalize_program_type,
)
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
UTILS_MODULE = 'openedx.core.djangoapps.catalog.utils'
User = get_user_model() # pylint: disable=invalid-name
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.logger.info')
@mock.patch(UTILS_MODULE + '.logger.warning')
class TestGetPrograms(CacheIsolationTestCase):
ENABLED_CACHES = ['default']
def setUp(self):
super(TestGetPrograms, self).setUp()
self.site = SiteFactory()
def test_get_many(self, mock_warning, mock_info):
programs = ProgramFactory.create_batch(3)
# Cache details for 2 of 3 programs.
partial_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs[:2]
}
cache.set_many(partial_programs, None)
# When called before UUIDs are cached, the function should return an
# empty list and log a warning.
self.assertEqual(get_programs(site=self.site), [])
mock_warning.assert_called_once_with(
u'Failed to get program UUIDs from the cache for site {}.'.format(self.site.domain)
)
mock_warning.reset_mock()
# Cache UUIDs for all 3 programs.
cache.set(
SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=self.site.domain),
[program['uuid'] for program in programs],
None
)
actual_programs = get_programs(site=self.site)
# The 2 cached programs should be returned while info and warning
# messages should be logged for the missing one.
self.assertEqual(
set(program['uuid'] for program in actual_programs),
set(program['uuid'] for program in partial_programs.values())
)
mock_info.assert_called_with('Failed to get details for 1 programs. Retrying.')
mock_warning.assert_called_with(
u'Failed to get details for program {uuid} from the cache.'.format(uuid=programs[2]['uuid'])
)
mock_warning.reset_mock()
# We can't use a set comparison here because these values are dictionaries
# and aren't hashable. We've already verified that all programs came out
# of the cache above, so all we need to do here is verify the accuracy of
# the data itself.
for program in actual_programs:
key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
self.assertEqual(program, partial_programs[key])
# Cache details for all 3 programs.
all_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs
}
cache.set_many(all_programs, None)
actual_programs = get_programs(site=self.site)
# All 3 programs should be returned.
self.assertEqual(
set(program['uuid'] for program in actual_programs),
set(program['uuid'] for program in all_programs.values())
)
self.assertFalse(mock_warning.called)
for program in actual_programs:
key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
self.assertEqual(program, all_programs[key])
@mock.patch(UTILS_MODULE + '.cache')
def test_get_many_with_missing(self, mock_cache, mock_warning, mock_info):
programs = ProgramFactory.create_batch(3)
all_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs
}
partial_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs[:2]
}
def fake_get_many(keys):
if len(keys) == 1:
return {PROGRAM_CACHE_KEY_TPL.format(uuid=programs[-1]['uuid']): programs[-1]}
else:
return partial_programs
mock_cache.get.return_value = [program['uuid'] for program in programs]
mock_cache.get_many.side_effect = fake_get_many
actual_programs = get_programs(site=self.site)
# All 3 cached programs should be returned. An info message should be
# logged about the one that was initially missing, but the code should
# be able to stitch together all the details.
self.assertEqual(
set(program['uuid'] for program in actual_programs),
set(program['uuid'] for program in all_programs.values())
)
self.assertFalse(mock_warning.called)
mock_info.assert_called_with('Failed to get details for 1 programs. Retrying.')
for program in actual_programs:
key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
self.assertEqual(program, all_programs[key])
def test_get_one(self, mock_warning, _mock_info):
expected_program = ProgramFactory()
expected_uuid = expected_program['uuid']
self.assertEqual(get_programs(uuid=expected_uuid), None)
mock_warning.assert_called_once_with(
u'Failed to get details for program {uuid} from the cache.'.format(uuid=expected_uuid)
)
mock_warning.reset_mock()
cache.set(
PROGRAM_CACHE_KEY_TPL.format(uuid=expected_uuid),
expected_program,
None
)
actual_program = get_programs(uuid=expected_uuid)
self.assertEqual(actual_program, expected_program)
self.assertFalse(mock_warning.called)
def test_get_from_course(self, mock_warning, _mock_info):
expected_program = ProgramFactory()
expected_course = expected_program['courses'][0]['course_runs'][0]['key']
self.assertEqual(get_programs(course=expected_course), [])
cache.set(
COURSE_PROGRAMS_CACHE_KEY_TPL.format(course_run_id=expected_course),
[expected_program['uuid']],
None
)
cache.set(
PROGRAM_CACHE_KEY_TPL.format(uuid=expected_program['uuid']),
expected_program,
None
)
actual_program = get_programs(course=expected_course)
self.assertEqual(actual_program, [expected_program])
self.assertFalse(mock_warning.called)
def test_get_via_uuids(self, mock_warning, _mock_info):
first_program = ProgramFactory()
second_program = ProgramFactory()
cache.set(
PROGRAM_CACHE_KEY_TPL.format(uuid=first_program['uuid']),
first_program,
None
)
cache.set(
PROGRAM_CACHE_KEY_TPL.format(uuid=second_program['uuid']),
second_program,
None
)
results = get_programs(uuids=[first_program['uuid'], second_program['uuid']])
assert first_program in results
assert second_program in results
assert not mock_warning.called
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.logger.info')
@mock.patch(UTILS_MODULE + '.logger.warning')
class TestGetPathways(CacheIsolationTestCase):
ENABLED_CACHES = ['default']
def setUp(self):
super(TestGetPathways, self).setUp()
self.site = SiteFactory()
def test_get_many(self, mock_warning, mock_info):
pathways = PathwayFactory.create_batch(3)
# Cache details for 2 of 3 programs.
partial_pathways = {
PATHWAY_CACHE_KEY_TPL.format(id=pathway['id']): pathway for pathway in pathways[:2]
}
cache.set_many(partial_pathways, None)
# When called before pathways are cached, the function should return an
# empty list and log a warning.
self.assertEqual(get_pathways(self.site), [])
mock_warning.assert_called_once_with('Failed to get credit pathway ids from the cache.')
mock_warning.reset_mock()
# Cache all 3 pathways
cache.set(
SITE_PATHWAY_IDS_CACHE_KEY_TPL.format(domain=self.site.domain),
[pathway['id'] for pathway in pathways],
None
)
actual_pathways = get_pathways(self.site)
# The 2 cached pathways should be returned while info and warning
# messages should be logged for the missing one.
self.assertEqual(
set(pathway['id'] for pathway in actual_pathways),
set(pathway['id'] for pathway in partial_pathways.values())
)
mock_info.assert_called_with('Failed to get details for 1 pathways. Retrying.')
mock_warning.assert_called_with(
u'Failed to get details for credit pathway {id} from the cache.'.format(id=pathways[2]['id'])
)
mock_warning.reset_mock()
# We can't use a set comparison here because these values are dictionaries
# and aren't hashable. We've already verified that all pathways came out
# of the cache above, so all we need to do here is verify the accuracy of
# the data itself.
for pathway in actual_pathways:
key = PATHWAY_CACHE_KEY_TPL.format(id=pathway['id'])
self.assertEqual(pathway, partial_pathways[key])
# Cache details for all 3 pathways.
all_pathways = {
PATHWAY_CACHE_KEY_TPL.format(id=pathway['id']): pathway for pathway in pathways
}
cache.set_many(all_pathways, None)
actual_pathways = get_pathways(self.site)
# All 3 pathways should be returned.
self.assertEqual(
set(pathway['id'] for pathway in actual_pathways),
set(pathway['id'] for pathway in all_pathways.values())
)
self.assertFalse(mock_warning.called)
for pathway in actual_pathways:
key = PATHWAY_CACHE_KEY_TPL.format(id=pathway['id'])
self.assertEqual(pathway, all_pathways[key])
@mock.patch(UTILS_MODULE + '.cache')
def test_get_many_with_missing(self, mock_cache, mock_warning, mock_info):
pathways = PathwayFactory.create_batch(3)
all_pathways = {
PATHWAY_CACHE_KEY_TPL.format(id=pathway['id']): pathway for pathway in pathways
}
partial_pathways = {
PATHWAY_CACHE_KEY_TPL.format(id=pathway['id']): pathway for pathway in pathways[:2]
}
def fake_get_many(keys):
if len(keys) == 1:
return {PATHWAY_CACHE_KEY_TPL.format(id=pathways[-1]['id']): pathways[-1]}
else:
return partial_pathways
mock_cache.get.return_value = [pathway['id'] for pathway in pathways]
mock_cache.get_many.side_effect = fake_get_many
actual_pathways = get_pathways(self.site)
# All 3 cached pathways should be returned. An info message should be
# logged about the one that was initially missing, but the code should
# be able to stitch together all the details.
self.assertEqual(
set(pathway['id'] for pathway in actual_pathways),
set(pathway['id'] for pathway in all_pathways.values())
)
self.assertFalse(mock_warning.called)
mock_info.assert_called_with('Failed to get details for 1 pathways. Retrying.')
for pathway in actual_pathways:
key = PATHWAY_CACHE_KEY_TPL.format(id=pathway['id'])
self.assertEqual(pathway, all_pathways[key])
def test_get_one(self, mock_warning, _mock_info):
expected_pathway = PathwayFactory()
expected_id = expected_pathway['id']
self.assertEqual(get_pathways(self.site, pathway_id=expected_id), None)
mock_warning.assert_called_once_with(
u'Failed to get details for credit pathway {id} from the cache.'.format(id=expected_id)
)
mock_warning.reset_mock()
cache.set(
PATHWAY_CACHE_KEY_TPL.format(id=expected_id),
expected_pathway,
None
)
actual_pathway = get_pathways(self.site, pathway_id=expected_id)
self.assertEqual(actual_pathway, expected_pathway)
self.assertFalse(mock_warning.called)
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetProgramTypes(CatalogIntegrationMixin, TestCase):
"""Tests covering retrieval of program types from the catalog service."""
@override_settings(COURSE_CATALOG_API_URL='https://api.example.com/v1/')
def test_get_program_types(self, mock_get_edx_api_data):
"""Verify get_program_types returns the expected list of program types."""
program_types = ProgramTypeFactory.create_batch(3)
mock_get_edx_api_data.return_value = program_types
# Catalog integration is disabled.
data = get_program_types()
self.assertEqual(data, [])
catalog_integration = self.create_catalog_integration()
UserFactory(username=catalog_integration.service_username)
data = get_program_types()
self.assertEqual(data, program_types)
program = program_types[0]
data = get_program_types(name=program['name'])
self.assertEqual(data, program)
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCurrency(CatalogIntegrationMixin, TestCase):
"""Tests covering retrieval of currency data from the catalog service."""
@override_settings(COURSE_CATALOG_API_URL='https://api.example.com/v1/')
def test_get_currency_data(self, mock_get_edx_api_data):
"""Verify get_currency_data returns the currency data."""
currency_data = {
"code": "CAD",
"rate": 1.257237,
"symbol": "$"
}
mock_get_edx_api_data.return_value = currency_data
# Catalog integration is disabled.
data = get_currency_data()
self.assertEqual(data, [])
catalog_integration = self.create_catalog_integration()
UserFactory(username=catalog_integration.service_username)
data = get_currency_data()
self.assertEqual(data, currency_data)
@mock.patch(UTILS_MODULE + '.get_currency_data')
class TestGetLocalizedPriceText(TestCase):
"""
Tests covering converting prices to a localized currency
"""
def test_localized_string(self, mock_get_currency_data):
currency_data = {
"BEL": {"rate": 0.835621, "code": "EUR", "symbol": u"\u20ac"},
"GBR": {"rate": 0.737822, "code": "GBP", "symbol": u"\u00a3"},
"CAN": {"rate": 2, "code": "CAD", "symbol": "$"},
}
mock_get_currency_data.return_value = currency_data
request = RequestFactory().get('/dummy-url')
request.session = {
'country_code': 'CA'
}
expected_result = '$20 CAD'
self.assertEqual(get_localized_price_text(10, request), expected_result)
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCourseRuns(CatalogIntegrationMixin, TestCase):
"""
Tests covering retrieval of course runs from the catalog service.
"""
def setUp(self):
super(TestGetCourseRuns, self).setUp()
self.catalog_integration = self.create_catalog_integration(cache_ttl=1)
self.user = UserFactory(username=self.catalog_integration.service_username)
def assert_contract(self, call_args):
"""
Verify that API data retrieval utility is used correctly.
"""
args, kwargs = call_args
for arg in (self.catalog_integration, 'course_runs'):
self.assertIn(arg, args)
self.assertEqual(kwargs['api']._store['base_url'], self.catalog_integration.get_internal_api_url()) # pylint: disable=protected-access
querystring = {
'page_size': 20,
'exclude_utm': 1,
}
self.assertEqual(kwargs['querystring'], querystring)
return args, kwargs
def test_config_missing(self, mock_get_edx_api_data):
"""
Verify that no errors occur when catalog config is missing.
"""
CatalogIntegration.objects.all().delete()
data = get_course_runs()
self.assertFalse(mock_get_edx_api_data.called)
self.assertEqual(data, [])
@mock.patch(UTILS_MODULE + '.logger.error')
def test_service_user_missing(self, mock_log_error, mock_get_edx_api_data):
"""
Verify that no errors occur when the catalog service user is missing.
"""
catalog_integration = self.create_catalog_integration(service_username='nonexistent-user')
data = get_course_runs()
mock_log_error.any_call(
u'Catalog service user with username [%s] does not exist. Course runs will not be retrieved.',
catalog_integration.service_username,
)
self.assertFalse(mock_get_edx_api_data.called)
self.assertEqual(data, [])
def test_get_course_runs(self, mock_get_edx_api_data):
"""
Test retrieval of course runs.
"""
catalog_course_runs = CourseRunFactory.create_batch(10)
mock_get_edx_api_data.return_value = catalog_course_runs
data = get_course_runs()
self.assertTrue(mock_get_edx_api_data.called)
self.assert_contract(mock_get_edx_api_data.call_args)
self.assertEqual(data, catalog_course_runs)
def test_get_course_runs_by_course(self, mock_get_edx_api_data):
"""
Test retrievals of run from a Course.
"""
catalog_course_runs = CourseRunFactory.create_batch(10)
catalog_course = CourseFactory(course_runs=catalog_course_runs)
mock_get_edx_api_data.return_value = catalog_course
data = get_course_runs_for_course(course_uuid=str(catalog_course['uuid']))
self.assertTrue(mock_get_edx_api_data.called)
self.assertEqual(data, catalog_course_runs)
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCourseOwners(CatalogIntegrationMixin, TestCase):
"""
Tests covering retrieval of course runs from the catalog service.
"""
def setUp(self):
super(TestGetCourseOwners, self).setUp()
self.catalog_integration = self.create_catalog_integration(cache_ttl=1)
self.user = UserFactory(username=self.catalog_integration.service_username)
def test_get_course_owners_by_course(self, mock_get_edx_api_data):
"""
Test retrieval of course runs.
"""
catalog_course_runs = CourseRunFactory.create_batch(10)
catalog_course = CourseFactory(course_runs=catalog_course_runs)
mock_get_edx_api_data.return_value = catalog_course
data = get_owners_for_course(course_uuid=str(catalog_course['uuid']))
self.assertTrue(mock_get_edx_api_data.called)
self.assertEqual(data, catalog_course['owners'])
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestSessionEntitlement(CatalogIntegrationMixin, TestCase):
"""
Test Covering data related Entitlements.
"""
def setUp(self):
super(TestSessionEntitlement, self).setUp()
self.catalog_integration = self.create_catalog_integration(cache_ttl=1)
self.user = UserFactory(username=self.catalog_integration.service_username)
self.tomorrow = now() + timedelta(days=1)
def test_get_visible_sessions_for_entitlement(self, mock_get_edx_api_data):
"""
Test retrieval of visible session entitlements.
"""
catalog_course_run = CourseRunFactory.create()
catalog_course = CourseFactory(course_runs=[catalog_course_run])
mock_get_edx_api_data.return_value = catalog_course
course_key = CourseKey.from_string(catalog_course_run.get('key'))
course_overview = CourseOverviewFactory.create(id=course_key, start=self.tomorrow)
CourseModeFactory.create(mode_slug=CourseMode.VERIFIED, min_price=100, course_id=course_overview.id)
course_enrollment = CourseEnrollmentFactory(
user=self.user, course_id=six.text_type(course_overview.id), mode=CourseMode.VERIFIED
)
entitlement = CourseEntitlementFactory(
user=self.user, enrollment_course_run=course_enrollment, mode=CourseMode.VERIFIED
)
session_entitlements = get_visible_sessions_for_entitlement(entitlement)
self.assertEqual(session_entitlements, [catalog_course_run])
def test_get_visible_sessions_for_entitlement_expired_mode(self, mock_get_edx_api_data):
"""
Test retrieval of visible session entitlements.
"""
catalog_course_run = CourseRunFactory.create()
catalog_course = CourseFactory(course_runs=[catalog_course_run])
mock_get_edx_api_data.return_value = catalog_course
course_key = CourseKey.from_string(catalog_course_run.get('key'))
course_overview = CourseOverviewFactory.create(id=course_key, start=self.tomorrow)
CourseModeFactory.create(
mode_slug=CourseMode.VERIFIED,
min_price=100,
course_id=course_overview.id,
expiration_datetime=now() - timedelta(days=1)
)
course_enrollment = CourseEnrollmentFactory(
user=self.user, course_id=six.text_type(course_overview.id), mode=CourseMode.VERIFIED
)
entitlement = CourseEntitlementFactory(
user=self.user, enrollment_course_run=course_enrollment, mode=CourseMode.VERIFIED
)
session_entitlements = get_visible_sessions_for_entitlement(entitlement)
self.assertEqual(session_entitlements, [catalog_course_run])
def test_unpublished_sessions_for_entitlement_when_enrolled(self, mock_get_edx_api_data):
"""
Test unpublished course runs are part of visible session entitlements when the user
is enrolled.
"""
catalog_course_run = CourseRunFactory.create(status=COURSE_UNPUBLISHED)
catalog_course = CourseFactory(course_runs=[catalog_course_run])
mock_get_edx_api_data.return_value = catalog_course
course_key = CourseKey.from_string(catalog_course_run.get('key'))
course_overview = CourseOverviewFactory.create(id=course_key, start=self.tomorrow)
CourseModeFactory.create(
mode_slug=CourseMode.VERIFIED,
min_price=100,
course_id=course_overview.id,
expiration_datetime=now() - timedelta(days=1)
)
course_enrollment = CourseEnrollmentFactory(
user=self.user, course_id=six.text_type(course_overview.id), mode=CourseMode.VERIFIED
)
entitlement = CourseEntitlementFactory(
user=self.user, enrollment_course_run=course_enrollment, mode=CourseMode.VERIFIED
)
session_entitlements = get_visible_sessions_for_entitlement(entitlement)
self.assertEqual(session_entitlements, [catalog_course_run])
def test_unpublished_sessions_for_entitlement(self, mock_get_edx_api_data):
"""
Test unpublished course runs are not part of visible session entitlements when the user
is not enrolled.
"""
catalog_course_run = CourseRunFactory.create(status=COURSE_UNPUBLISHED)
catalog_course = CourseFactory(course_runs=[catalog_course_run])
mock_get_edx_api_data.return_value = catalog_course
course_key = CourseKey.from_string(catalog_course_run.get('key'))
course_overview = CourseOverviewFactory.create(id=course_key, start=self.tomorrow)
CourseModeFactory.create(mode_slug=CourseMode.VERIFIED, min_price=100, course_id=course_overview.id)
entitlement = CourseEntitlementFactory(
user=self.user, mode=CourseMode.VERIFIED
)
session_entitlements = get_visible_sessions_for_entitlement(entitlement)
self.assertEqual(session_entitlements, [])
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCourseRunDetails(CatalogIntegrationMixin, TestCase):
"""
Tests covering retrieval of information about a specific course run from the catalog service.
"""
def setUp(self):
super(TestGetCourseRunDetails, self).setUp()
self.catalog_integration = self.create_catalog_integration(cache_ttl=1)
self.user = UserFactory(username=self.catalog_integration.service_username)
def test_get_course_run_details(self, mock_get_edx_api_data):
"""
Test retrieval of details about a specific course run
"""
course_run = CourseRunFactory()
course_run_details = {
'content_language': course_run['content_language'],
'weeks_to_complete': course_run['weeks_to_complete'],
'max_effort': course_run['max_effort']
}
mock_get_edx_api_data.return_value = course_run_details
data = get_course_run_details(course_run['key'], ['content_language', 'weeks_to_complete', 'max_effort'])
self.assertTrue(mock_get_edx_api_data.called)
self.assertEqual(data, course_run_details)
class TestProgramCourseRunCrawling(TestCase):
@classmethod
def setUpClass(cls):
super(TestProgramCourseRunCrawling, cls).setUpClass()
cls.grandchild_1 = {
'title': 'grandchild 1',
'curricula': [{'is_active': True, 'courses': [], 'programs': []}],
}
cls.grandchild_2 = {
'title': 'grandchild 2',
'curricula': [
{
'is_active': True,
'courses': [{
'course_runs': [
{'key': 'course-run-4'},
],
}],
'programs': [],
},
],
}
cls.grandchild_3 = {
'title': 'grandchild 3',
'curricula': [{'is_active': False}],
}
cls.child_1 = {
'title': 'child 1',
'curricula': [{'is_active': True, 'courses': [], 'programs': [cls.grandchild_1]}],
}
cls.child_2 = {
'title': 'child 2',
'curricula': [
{
'is_active': True,
'courses': [{
'course_runs': [
{'key': 'course-run-3'},
],
}],
'programs': [cls.grandchild_2, cls.grandchild_3],
},
],
}
cls.complex_program = {
'title': 'complex program',
'curricula': [
{
'is_active': True,
'courses': [{
'course_runs': [
{'key': 'course-run-2'},
],
}],
'programs': [cls.child_1, cls.child_2],
},
],
}
cls.simple_program = {
'title': 'simple program',
'curricula': [
{
'is_active': True,
'courses': [{
'course_runs': [
{'key': 'course-run-1'},
],
}],
'programs': [cls.grandchild_1]
},
],
}
cls.empty_program = {
'title': 'notice that I have a curriculum, but no programs inside it',
'curricula': [
{
'is_active': True,
'courses': [],
'programs': [],
},
],
}
def test_child_programs_no_curriculum(self):
program = {
'title': 'notice that I do not have a curriculum',
}
self.assertEqual([], child_programs(program))
def test_child_programs_no_children(self):
self.assertEqual([], child_programs(self.empty_program))
def test_child_programs_one_child(self):
self.assertEqual([self.grandchild_1], child_programs(self.simple_program))
def test_child_programs_many_children(self):
expected_children = [
self.child_1,
self.grandchild_1,
self.child_2,
self.grandchild_2,
self.grandchild_3,
]
self.assertEqual(expected_children, child_programs(self.complex_program))
def test_course_run_keys_for_program_no_courses(self):
self.assertEqual(set(), course_run_keys_for_program(self.empty_program))
def test_course_run_keys_for_program_one_course(self):
self.assertEqual({'course-run-1'}, course_run_keys_for_program(self.simple_program))
def test_course_run_keys_for_program_many_courses(self):
expected_course_runs = {
'course-run-2',
'course-run-3',
'course-run-4',
}
self.assertEqual(expected_course_runs, course_run_keys_for_program(self.complex_program))
def test_is_course_run_in_program(self):
self.assertTrue(is_course_run_in_program('course-run-4', self.complex_program))
self.assertFalse(is_course_run_in_program('course-run-5', self.complex_program))
self.assertFalse(is_course_run_in_program('course-run-4', self.simple_program))
@skip_unless_lms
class TestGetProgramsByType(CacheIsolationTestCase):
""" Test for the ``get_programs_by_type()`` function. """
ENABLED_CACHES = ['default']
@classmethod
def setUpClass(cls):
""" Sets up program data. """
super(TestGetProgramsByType, cls).setUpClass()
cls.site = SiteFactory()
cls.other_site = SiteFactory()
cls.masters_program_1 = ProgramFactory.create(type='Masters')
cls.masters_program_2 = ProgramFactory.create(type='Masters')
cls.masters_program_other_site = ProgramFactory.create(type='Masters')
cls.bachelors_program = ProgramFactory.create(type='Bachelors')
cls.no_type_program = ProgramFactory.create(type=None)
def setUp(self):
""" Loads program data into the cache before each test function. """
super(TestGetProgramsByType, self).setUp()
self.init_cache()
def init_cache(self):
""" This function plays the role of the ``cache_programs`` management command. """
all_programs = [
self.masters_program_1,
self.masters_program_2,
self.bachelors_program,
self.no_type_program,
self.masters_program_other_site
]
cached_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in all_programs
}
cache.set_many(cached_programs, None)
programs_by_type = defaultdict(list)
for program in all_programs:
program_type = normalize_program_type(program.get('type'))
site_id = self.site.id
if program == self.masters_program_other_site:
site_id = self.other_site.id
cache_key = PROGRAMS_BY_TYPE_CACHE_KEY_TPL.format(site_id=site_id, program_type=program_type)
programs_by_type[cache_key].append(program['uuid'])
cache.set_many(programs_by_type, None)
def test_get_masters_programs(self):
expected_programs = [self.masters_program_1, self.masters_program_2]
six.assertCountEqual(self, expected_programs, get_programs_by_type(self.site, 'masters'))
def test_get_bachelors_programs(self):
expected_programs = [self.bachelors_program]
self.assertEqual(expected_programs, get_programs_by_type(self.site, 'bachelors'))
def test_get_no_such_type_programs(self):
expected_programs = []
self.assertEqual(expected_programs, get_programs_by_type(self.site, 'doctorate'))
def test_get_masters_programs_other_site(self):
expected_programs = [self.masters_program_other_site]
self.assertEqual(expected_programs, get_programs_by_type(self.other_site, 'masters'))
def test_get_programs_null_type(self):
expected_programs = [self.no_type_program]
self.assertEqual(expected_programs, get_programs_by_type(self.site, None))
def test_get_programs_false_type(self):
expected_programs = []
self.assertEqual(expected_programs, get_programs_by_type(self.site, False))
def test_normalize_program_type(self):
self.assertEqual('none', normalize_program_type(None))
self.assertEqual('false', normalize_program_type(False))
self.assertEqual('true', normalize_program_type(True))
self.assertEqual('', normalize_program_type(''))
self.assertEqual('masters', normalize_program_type('Masters'))
self.assertEqual('masters', normalize_program_type('masters'))
|
ESOedX/edx-platform
|
openedx/core/djangoapps/catalog/tests/test_utils.py
|
Python
|
agpl-3.0
| 34,566 | 0.00243 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2018-09-15 15:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestioneide', '0049_auto_20180516_2118'),
]
operations = [
migrations.AddField(
model_name='empresa',
name='cif',
field=models.CharField(default=b'B12345678', max_length=9),
),
migrations.AlterField(
model_name='empresa',
name='razon_social',
field=models.CharField(default=b'ESCUELAS INTERNACIONALES E.I.D.E. S.L.', max_length=255, verbose_name=b'Raz\xc3\xb3n Social'),
),
]
|
Etxea/gestioneide
|
gestioneide/migrations/0050_auto_20180915_1745.py
|
Python
|
gpl-3.0
| 721 | 0.001387 |
from fbchat import Client, log
from getpass import getpass
from datetime import datetime
import sys, os, urllib, time, socket, shutil, requests
from glob import glob
from zipfile import ZipFile
socket.setdefaulttimeout(60)
reload(sys)
sys.setdefaultencoding("utf-8")
ending = '</div></div>'
username = str(raw_input("Username: "))
password = getpass()
client = Client(username, password)
zipping = str(raw_input("Want to save your data as a .Zip file y/n?: "))
uid = client.uid
USER = client.fetchUserInfo(client.uid)[client.uid]
self = USER.name
ID = []
NAME = []
docs = ['docx', 'doc', 'pdf', 'pptx', 'txt', 'xlsx']
media = ['mp3', 'mp4', 'aac', 'webm', 'avi', '3gp']
gen = ['jpg', 'png']
def download_file(add, name):
request = requests.get(add, timeout=60, stream=True)
#Open the output file and make sure we write in binary mode
flag = 0
with open(name, 'wb') as fh:
# Walk through the request response in chunks of 1024 * 1024 bytes, so 1MiB
for chunk in request.iter_content(1024 * 1024):
# Write the chunk to the file
flag += 1
if flag > 10:
Log_file.write("This file is bigger than 10MB so download it if you want-- " + add + '\n\n')
break
fh.write(chunk)
def make_zip():
file = open('instruction.txt', 'w')
file.write("Use your facebook password to decrypt Fb_Data.zip file")
file.close()
files = glob("Data\\*\\*\\*")
files += glob("Data\\*\\*")
files += glob("Data\\*")
zipfile = ZipFile("Fb_Data.zip", 'w')
for file in files:
if os.path.isfile(file):
zipfile.write(file)
zipfile.close()
shutil.rmtree("Data")
def do_rest(thread):
check = 0
data = str(thread).split(" ")
id = data[len(data)-1].split('(')[1].split(')')[0]
other = data[1]
name = str(data[1])
if len(data) == 4:
other = data[1] + " " + data[2]
name = str(data[1]) + '_' + str(data[2])
if len(data) == 5:
other = data[1] + " " + data[2] + " " + data[3]
name = data[1] + '_' + data[2] + '_' + data[3]
if len(data) == 6:
other = data[1] + " " + data[2] + " " + data[3] + " " + data[4]
name = data[1] + '_' + data[2] + '_' + data[3] + '_' + data[4]
starting = '<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <title>' + other + '- Messages</title> <link rel="stylesheet" href="..\\..\\style.css" type="text/css" /></head><body> <div class="contents"><h1>' + other +'</h1> <div class="thread"> Total number of messages = ' + str(thread.message_count)
Testing = Path_check(other)
folder_name = "Data\\" + other
log_file = folder_name+"\\" + name + ".txt"
filename = folder_name+"\\html\\" + name + ".htm"
file = open(filename, 'wb')
Log_file = open(log_file, 'wb')
file.write(starting)
flag = 1000
num = 0
timestamp = int(19800 + time.time())*1000
if str(data[0]) != '<GROUP':
ID.append(id)
NAME.append(other)
check = 1
while( flag > 999):
messages = client.fetchThreadMessages(thread_id=id, limit=1000, before=timestamp)
timestamp = messages[len(messages)-1].timestamp
for message in messages:
try:
if check == 0:
if message.author not in ID:
USER = client.fetchUserInfo(message.author)[message.author]
other = USER.name
ID.append(message.author)
NAME.append(other)
else:
for i in range(len(ID)):
if message.author == ID[i]:
other = NAME[i]
break
if message.extensible_attachment:
if message.extensible_attachment['story_attachment']['media']:
if message.extensible_attachment['story_attachment']['media']['is_playable']:
add = message.extensible_attachment['story_attachment']['media']['playable_url']
Filename = folder_name + "\\shares\\" + str(message.timestamp) + '.mp4'
if add is not None:
try:
download_file(add, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <video width="500" controls> <source src="..\\..\\..\\' + Filename + '" type="video/mp4"></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <video width="500" controls> <source src="..\\..\\..\\' + Filename + '" type="video/mp4"></p> \n' )
except:
Log_file.write("Getting some error now on url -: " + add + '\n\n')
else:
Log_file.write("Look at this separately--" + str(message.extensible_attachment) + '\n\n')
elif message.attachments:
for attachment in message.attachments:
# For Image
time.sleep(.1)
Filename = attachment['filename']
if Filename.split("-")[0] == 'image':
add = attachment['large_preview']['uri']
name = folder_name +"\\images\\"+ attachment['filename']+'.' +attachment['original_extension']
try:
download_file(add, name)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ name +'"> <img src="..\\..\\..\\'+ name + '" alt="Folder" width="500" > </a></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ name +'"> <img src="..\\..\\..\\'+ name + '" alt="Folder" width="500" > </a></p> \n' )
except:
Log_file.write( "Getting some error now on url -: " + add + '\n\n')
elif len(Filename.split(".")) > 1 and Filename.split(".")[len(Filename.split("."))-1] in docs:
add = attachment['url']
test = urllib.urlopen(add)
temp = test.read().split('replace("')[1]
temp = temp.split('");</script>')[0]
temp = temp.replace("\\","")
Temp = Filename
Filename = folder_name + "\\docs\\" + Filename
try:
download_file(temp, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ Filename +'">' + Temp + '</a></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ Filename +'">' + Temp + '</a></p> \n' )
except:
Log_file.write( "Getting some error now on url -: " + temp + '\n\n')
elif len(Filename.split(".")) > 1 and Filename.split(".")[len(Filename.split("."))-1] in media:
try:
add = attachment['playable_url']
Filename = folder_name + "\\media\\" + Filename
download_file(add, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <video width="500" controls> <source src="..\\..\\..\\' + Filename + '" type="video/mp4"></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <video width="500" controls> <source src="..\\..\\..\\' + Filename + '" type="video/mp4"></p> \n' )
except:
Log_file.write( "Getting some error now on url -: " + add + '\n\n')
elif Filename.split("-")[0] == 'gif':
try:
add = attachment['animated_image']['uri']
Filename = folder_name + "\\media\\" + Filename
download_file(add, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ name +'"> <img src="..\\..\\..\\'+ name + '" alt="Folder" width="500" > </a></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ name +'"> <img src="..\\..\\..\\'+ name + '" alt="Folder" width="500" > </a></p> \n' )
except:
Log_file.write( "Getting some error now on url -: " + add + '\n\n')
else:
add = attachment['url']
test = urllib.urlopen(add)
temp = test.read().split('replace("')[1]
temp = temp.split('");</script>')[0]
temp = temp.replace("\\","")
Temp = Filename
Filename = folder_name + "\\Random\\" + Filename
try:
download_file(temp, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ Filename +'">' + Temp + '</a></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ Filename +'">' + Temp + '</a></p> \n' )
except:
Log_file.write( "Getting some error now on url -: " + temp + '\n\n')
elif message.text is not None and message.sticker is None:
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p>' + message.text + ' </p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p>' + message.text + ' </p> \n' )
except:
pass
flag = len(messages)
num += flag
print num, " messages had been downloaded from today till - ",datetime.utcfromtimestamp(float(timestamp)/1000).strftime('%d-%m-%Y')
file.write(ending)
file.close()
def Path_check(name):
path = "Data"
if not os.path.exists(path):
os.mkdir(path)
source = 'Resources\\style.css'
target = 'Data'
try:
shutil.copy(source, target)
except IOError as e:
print("Unable to copy file. %s" % e)
except:
Log_file.write(("Unexpected error:" + str(sys.exc_info())))
name = path + '\\' +name
path = name
if not os.path.exists(path):
os.mkdir(path)
path = name + "\\docs"
if not os.path.exists(path):
os.mkdir(path)
path = name + "\\html"
if not os.path.exists(path):
os.mkdir(path)
path = name + "\\images"
if not os.path.exists(path):
os.mkdir(path)
path = name + "\\media"
if not os.path.exists(path):
os.mkdir(path)
path = name + "\\shares"
if not os.path.exists(path):
os.mkdir(path)
path = name + "\\Random"
if not os.path.exists(path):
os.mkdir(path)
return True
username = str(raw_input("want to download messages from a specific friend type(y/n): "))
if username.lower() == 'y':
names = str(raw_input("Name of that friends separated by a comma like - satyendra pandey, Narendra pandey--: "))
names = names.split(',')
for name in names:
thread = client.searchForThreads(name)[0]
do_rest(thread)
if zipping.lower() == 'y':
make_zip()
else:
num = int(raw_input("Number of friends from top of your chatlist:"))
if num < 20:
threads = client.fetchThreadList(limit = num)
else:
threads = client.fetchThreadList(offset = 17, limit = 3)
num = (num-20)/20
for i in range(num):
offset = 20*(i+1)
threads += client.fetchThreadList(offset = offset, limit= 20)
for thread in threads:
do_rest(thread)
if zipping.lower() == 'y':
make_zip()
|
satendrapandeymp/Facebook_message_download
|
Message_windows.py
|
Python
|
mit
| 13,157 | 0.031618 |
## @file
# Warning information of Eot
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
self.message = Str
self.FileName = File
self.LineNumber = Line
self.ToolName = 'EOT'
|
bitcrystal/buildtools-BaseTools
|
Source/Python/Eot/ParserWarning.py
|
Python
|
bsd-2-clause
| 1,000 | 0.01 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 14:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('type_page', '0004_auto_20170711_1241'),
]
operations = [
migrations.AddField(
model_name='footballtype',
name='comments',
field=models.CharField(max_length=128, null=True),
),
]
|
dumel93/project-
|
type_page/migrations/0005_footballtype_comments.py
|
Python
|
mit
| 471 | 0 |
# -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
from offset import run, maintask
from offset.time import Ticker, SECOND
@maintask
def main():
ticker = Ticker(0.1 * SECOND)
for i in range(3):
print(ticker.c.recv())
ticker.stop()
run()
|
benoitc/offset
|
examples/demo_ticker.py
|
Python
|
mit
| 302 | 0.003311 |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from bartpy.sklearnmodel import SklearnModel
def run(size=100,
alpha=0.95,
beta=2.0,
n_trees=50):
import warnings
warnings.simplefilter("error", UserWarning)
x = np.linspace(0, 5, size)
X = pd.DataFrame(x)
y = np.random.normal(0, 0.1, size=size) + np.sin(x)
model = SklearnModel(
n_samples=100,
n_burn=50,
n_trees=n_trees,
alpha=alpha,
beta=beta,
n_jobs=1,
n_chains=1)
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.33,
random_state=42,
shuffle=True)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
plt.scatter(y_test, y_pred)
plt.show()
rmse = np.sqrt(np.sum(np.square(y_test - y_pred)))
print(rmse)
if __name__ == "__main__":
run(50, 0.95, 2.0)
|
JakeColtman/bartpy
|
examples/score/core/sin.py
|
Python
|
mit
| 1,238 | 0 |
import numpy as np
# ===== Patch normalization by mean intensity ========================
def mean_intensity_norm(patch):
mu = np(np.sum(patch))*1.0/(patch.shape[0]*shape[1])
return (patch - mu[np.newaxis,np.newaxis])
|
tntrung/youCVML
|
misc/patch.py
|
Python
|
gpl-2.0
| 222 | 0.022523 |
#!/usr/bin/env python3
import os
import errno
import requests
url='http://www.jeep.com/hostd/getlocatedealers.json?zipCode=60202&zipDistance=2500'
directory_name=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'data'))
try:
os.makedirs(directory_name)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(directory_name):
pass
else:
raise
file_name=os.path.join(directory_name, 'jeep.json')
response = requests.get(url, stream=True)
with open(file_name, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
|
simon-wenmouth/dealerships
|
dealerships/manufacturers/jeep/download_data.py
|
Python
|
mit
| 628 | 0.009554 |
import commands
from dataservice.DDM import ddm
#print ddm.DQ2ProductionClient.generateUUID()
#print ddm.DQ2.getFilesFromCatalog('aho.xml')
#print ddm.DQ2ProductionClient.dq2_makeblocks('input.data')
ids=['pandatest.000003.dd.input._00047.junk','09801b0a-9fd0-4237-8caf-a37932c26e39',
'pandatest.000003.dd.input._00050.junk','6dd3d367-4aa3-4e1a-9ac3-9ad14b7311f4',
'pandatest.000003.dd.input._00037.junk','817c2c92-467b-4a1b-9482-f2ec8468cf2e',
'pandatest.000003.dd.input._00021.junk','7720527f-817e-40c7-9e29-ce237f59edfa',
'pandatest.000003.dd.input._00023.junk','5f1f9982-85a3-4d1a-9ee9-f1de22c02544',
'pandatest.000003.dd.input._00042.junk','610cc91a-c731-4bce-ac7a-ff5133e7d18b',
'pandatest.000003.dd.input._00027.junk','bd987478-3c59-4551-b12b-2853bac25613',
'pandatest.000003.dd.input._00032.junk','9d0424f3-7552-4282-92f2-dfe74e9a6c12',
'pandatest.000003.dd.input._00009.junk','dce33d4a-4569-49ee-95c5-b619b161c777',
'pandatest.000003.dd.input._00036.junk','2fc9836b-82d6-41b0-b966-a5c37662172d',
'pandatest.000003.dd.input._00031.junk','65b957e0-5ecc-44bb-a1f9-cccb61ca2d16',
'pandatest.000003.dd.input._00025.junk','be29fe82-17e2-4122-b4c8-f49a0b76c81f',
'pandatest.000003.dd.input._00029.junk','afa4322f-409b-4327-9169-229d8d48ad5a',
'pandatest.000003.dd.input._00013.junk','cf236d3b-45fd-4b58-bdfb-59abc983c886',
'pandatest.000003.dd.input._00020.junk','b02f98da-0138-4b58-89ba-a88f37214a89',
'pandatest.000003.dd.input._00001.junk','12ab5bb9-944e-4e75-bb90-b64c462d4cd8',
'pandatest.000003.dd.input._00001.junk','12ab5bb9-944e-4e75-bb90-b64c462d4cd8',
'pandatest.000003.dd.input._00006.junk','c0a422ad-e9f1-44bb-9539-cfef7e739da2',
'pandatest.000003.dd.input._00034.junk','da670db3-3638-4f06-b650-a9315eb2bd63',
'pandatest.000003.dd.input._00046.junk','2fcef270-2e41-472d-83c0-53749b401b74',
'pandatest.000003.dd.input._00012.junk','5e212fa1-201f-494d-a2b2-420b229b08fc',
'pandatest.000003.dd.input._00044.junk','87c8ebcc-a637-4204-b77b-8219e68b98d7',
'pandatest.000003.dd.input._00030.junk','87ad811f-7d39-43d9-8a13-e117079bb208',
'pandatest.000003.dd.input._00022.junk','6b902506-1ee1-46b1-a105-1521a8c0dbca',
'pandatest.000003.dd.input._00017.junk','2bbed213-943c-41be-b9d7-7d86a309b0b2',
'pandatest.000003.dd.input._00049.junk','8366e269-f9ae-4b9c-bd98-df4027c992c7',
'pandatest.000003.dd.input._00015.junk','f3c5f37c-b4c2-4933-9633-467ba3a7c364',
'pandatest.000003.dd.input._00004.junk','35d66be2-9d21-44a3-96f7-903a7abf4a87',
'pandatest.000003.dd.input._00010.junk','2279ea3e-ebbb-4b19-9a69-9868f0cce694',
'pandatest.000003.dd.input._00040.junk','a847dbbb-4f98-4b5b-b353-e29e3e3b3fd5',
'pandatest.000003.dd.input._00007.junk','abfef002-62ca-4d84-9813-6329764e38bd',
'pandatest.000003.dd.input._00048.junk','52854023-67d8-4a0f-99ac-bb1f0bd1dc98',
'pandatest.000003.dd.input._00016.junk','bddf7441-6ac9-4087-bafe-32e47448cdc1',
'pandatest.000003.dd.input._00041.junk','c76999ba-4cdf-49e9-bfa5-ff3525fbf1ab',
'pandatest.000003.dd.input._00003.junk','4865119e-367f-4dd8-bdff-505bd878dfde',
'pandatest.000003.dd.input._00019.junk','b9fce1fd-8d4c-4fc4-932f-12b13263ca0c',
'pandatest.000003.dd.input._00011.junk','f93a4e08-fd4f-45fc-b324-91ff59555b1c',
'pandatest.000003.dd.input._00018.junk','e4894561-9589-40d8-871b-b57d70564384',
'pandatest.000003.dd.input._00002.junk','58934980-5ab3-4a66-b3da-55f86d4b54bd',
'pandatest.000003.dd.input._00005.junk','5993fe60-bc8c-4fd8-aac1-dfd55700c9c3',
'pandatest.000003.dd.input._00028.junk','6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
'pandatest.000003.dd.input._00033.junk','98f79ba1-1793-4253-aac7-bdf90a51d1ee',
'pandatest.000003.dd.input._00039.junk','33660dd5-7cef-422a-a7fc-6c24cb10deb1',
'pandatest.000003.dd.input._00014.junk','5c0e9ed8-05a6-41c4-8c07-39b2be33ebc1',
'pandatest.000003.dd.input._00008.junk','b0c184d1-5f5e-45a6-9cc8-8b0f20a85463',
'pandatest.000003.dd.input._00038.junk','b9171997-4d2b-4075-b154-579ebe9438fa',
'pandatest.000003.dd.input._00026.junk','89e5bdf1-15de-44ae-a388-06c1e7d7e2fc',
'pandatest.000003.dd.input._00024.junk','c77b77a2-e6d1-4360-8751-19d9fb77e1f1',
'pandatest.000003.dd.input._00043.junk','cc6ac2a1-4616-4551-80a7-d96f79252b64',
'pandatest.000003.dd.input._00045.junk','ddbed17a-6d65-4e8d-890a-21e1eaa3e9d6',
'pandatest.000003.dd.input._00035.junk','8ed1875a-eb90-4906-8fc4-0449d300ddfe'
]
for i in range(1):
datasetName='testDQ.%s' % commands.getoutput('/usr/bin/uuidgen')
print datasetName
#['pandatest.000003.dd.input._00004.junk','35d66be2-9d21-44a3-96f7-903a7abf4a87']
#'pandatest.000003.dd.input._00028.junk','6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
# 'pandatest.000003.dd.input._00033.junk','98f79ba1-1793-4253-aac7-bdf90a51d1ee']
print (['registerNewDataset','-c',datasetName]+ids[i*2:i*2+2])
ddm.DQ2.main(['registerNewDataset','-c',datasetName]+ids[i*2:i*2+2])
'''
status,out = ddm.RepositoryClient.main(['queryDatasetByName',datasetName])
exec "vuids = %s" % out.split('\n')[0]
if vuids.has_key(datasetName):
vuid = vuids[datasetName]
print vuid
status,out = ddm.RepositoryClient.main(['resolveVUID',vuid])
status,out = ddm.DQ2.getFilesFromCatalog('baka.xml')
exec "rets = %s" % out.split('\n')[0]
print rets[0]
exec "ids = %s" % out
print ddm.DQ2.main(['addFilesToDataset',datasetName]+ids)
status,out = ddm.DQ2.main(['listFilesInDataset',datasetName])
print out
'''
print (['registerDatasetLocations','-c',datasetName,'http://dms02.usatlas.bnl.gov/sites/bnl/lrc'])
ddm.DQ2.main(['registerDatasetLocations','-c',datasetName,
'http://dms02.usatlas.bnl.gov/sites/bnl/lrc'])
print (['registerDatasetSubscription',datasetName,'http://doe-dhcp241.bu.edu:8000/dq2/'])
ddm.DQ2.main(['registerDatasetSubscription',datasetName,'http://doe-dhcp241.bu.edu:8000/dq2/'])
#print ddm.DQ2.main(['eraseDataset',datasetName])
#print ddm.DQ2.main(['eraseDataset',datasetName])
#print ddm.DQ2ProductionClient.dq2_create_dataset(datasetName)
#status,out = ddm.DQ2ProductionClient.dq2_assign_destination(datasetName,'BNL_SE')
#print out
#print ddm.DQ2.main(['eraseDataset',datasetName])
#status,out = ddm.DQ2.main(['listFilesInDataset','panda.destDB.11aed982-8079-4db9-964c-37a284b8597a'])
#print out
ddm.DQ2_iter.listFileReplicasBySites('mc11_7TeV.151900.madgraph_SM_SG_SS_direct_1200_600_395.merge.AOD.e1095_a131_s1353_a145_r2993_tid723983_00',
0,['SARA-MATRIX_DATADISK'],
0,300)
|
RRCKI/panda-server
|
pandaserver/test/testDQ.py
|
Python
|
apache-2.0
| 6,730 | 0.021545 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-09 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('find_a_pad_app', '0003_auto_20170709_1432'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='email',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='organization',
name='phone_number',
field=models.CharField(blank=True, max_length=12),
),
]
|
findapad/find_a_pad
|
find_a_pad_app/migrations/0004_auto_20170709_1432.py
|
Python
|
mit
| 653 | 0 |
from Foundation import *
from AppKit import *
class FileSettings(NSObject):
fsdefault_py = None
fsdefault_pyw = None
fsdefault_pyc = None
default_py = None
default_pyw = None
default_pyc = None
factorySettings = None
prefskey = None
settings = None
def getFactorySettingsForFileType_(cls, filetype):
if filetype == u'Python Script':
curdefault = cls.fsdefault_py
elif filetype == u'Python GUI Script':
curdefault = cls.fsdefault_pyw
elif filetype == u'Python Bytecode Document':
curdefault = cls.fsdefault_pyc
else:
NSLog(u'Funny File Type: %s\n', filetype)
curdefault = cls.fsdefault_py
filetype = u'Python Script'
if curdefault is None:
curdefault = FileSettings.alloc().initForFSDefaultFileType_(filetype)
return curdefault
getFactorySettingsForFileType_ = classmethod(getFactorySettingsForFileType_)
def getDefaultsForFileType_(cls, filetype):
if filetype == u'Python Script':
curdefault = cls.default_py
elif filetype == u'Python GUI Script':
curdefault = cls.default_pyw
elif filetype == u'Python Bytecode Document':
curdefault = cls.default_pyc
else:
NSLog(u'Funny File Type: %s', filetype)
curdefault = cls.default_py
filetype = u'Python Script'
if curdefault is None:
curdefault = FileSettings.alloc().initForDefaultFileType_(filetype)
return curdefault
getDefaultsForFileType_ = classmethod(getDefaultsForFileType_)
def newSettingsForFileType_(cls, filetype):
return FileSettings.alloc().initForFileType_(filetype)
newSettingsForFileType_ = classmethod(newSettingsForFileType_)
def initWithFileSettings_(self, source):
self = super(FileSettings, self).init()
self.settings = source.fileSettingsAsDict().copy()
self.origsource = None
return self
def initForFileType_(self, filetype):
defaults = FileSettings.getDefaultsForFileType_(filetype)
self = self.initWithFileSettings_(defaults)
self.origsource = defaults
return self
def initForFSDefaultFileType_(self, filetype):
self = super(FileSettings, self).init()
if type(self).factorySettings is None:
bndl = NSBundle.mainBundle()
path = bndl.pathForResource_ofType_(u'factorySettings', u'plist')
type(self).factorySettings = NSDictionary.dictionaryWithContentsOfFile_(path)
if type(self).factorySettings is None:
NSLog(u'Missing %s', path)
return None
dct = type(self).factorySettings.get(filetype)
if dct is None:
NSLog(u'factorySettings.plist misses file type "%s"', filetype)
return None
self.applyValuesFromDict_(dct)
interpreters = dct[u'interpreter_list']
mgr = NSFileManager.defaultManager()
self.settings['interpreter'] = u'no default found'
for filename in interpreters:
filename = filename.nsstring().stringByExpandingTildeInPath()
if mgr.fileExistsAtPath_(filename):
self.settings['interpreter'] = filename
break
self.origsource = None
return self
def applyUserDefaults_(self, filetype):
dct = NSUserDefaults.standardUserDefaults().dictionaryForKey_(filetype)
if dct:
self.applyValuesFromDict_(dct)
def initForDefaultFileType_(self, filetype):
fsdefaults = FileSettings.getFactorySettingsForFileType_(filetype)
self = self.initWithFileSettings_(fsdefaults)
if self is None:
return self
self.settings['interpreter_list'] = fsdefaults.settings['interpreter_list']
self.settings['scriptargs'] = u''
self.applyUserDefaults_(filetype)
self.prefskey = filetype
return self
def reset(self):
if self.origsource:
self.updateFromSource_(self.origsource)
else:
fsdefaults = FileSettings.getFactorySettingsForFileType_(self.prefskey)
self.updateFromSource_(fsdefaults)
def updateFromSource_(self, source):
self.settings.update(source.fileSettingsAsDict())
if self.origsource is None:
NSUserDefaults.standardUserDefaults().setObject_forKey_(self.fileSettingsAsDict(), self.prefskey)
def applyValuesFromDict_(self, dct):
if self.settings is None:
self.settings = {}
self.settings.update(dct)
def commandLineForScript_(self, script):
cur_interp = None
if self.settings['honourhashbang']:
try:
line = file(script, 'rU').next().rstrip()
except:
pass
else:
if line.startswith('#!'):
cur_interp = line[2:]
if cur_interp is None:
cur_interp = self.settings['interpreter']
cmd = []
cmd.append('"'+cur_interp.replace('"', '\\"')+'"')
if self.settings['debug']:
cmd.append('-d')
if self.settings['verbose']:
cmd.append('-v')
if self.settings['inspect']:
cmd.append('-i')
if self.settings['optimize']:
cmd.append('-O')
if self.settings['nosite']:
cmd.append('-S')
if self.settings['tabs']:
cmd.append('-t')
others = self.settings['others']
if others:
cmd.append(others)
cmd.append('"'+script.replace('"', '\\"')+'"')
cmd.append(self.settings['scriptargs'])
if self.settings['with_terminal']:
cmd.append("""&& echo "Exit status: $?" && python -c 'import sys;sys.stdin.readline()' && exit 1""")
else:
cmd.append('&')
return ' '.join(cmd)
def fileSettingsAsDict(self):
return self.settings
|
albertz/music-player
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/PyObjCLauncher/FileSettings.py
|
Python
|
bsd-2-clause
| 6,020 | 0.001495 |
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import operator
# for easier re-usage (because Python hasn't an enum type)
class Targets:
ALL_TARGETS = map(lambda x: 2 ** x , range(7))
(DESKTOP_474_GCC,
DESKTOP_480_DEFAULT,
SIMULATOR,
EMBEDDED_LINUX,
DESKTOP_521_DEFAULT,
DESKTOP_531_DEFAULT,
DESKTOP_541_GCC) = ALL_TARGETS
@staticmethod
def desktopTargetClasses():
desktopTargets = (sum(Targets.ALL_TARGETS) & ~Targets.SIMULATOR & ~Targets.EMBEDDED_LINUX)
if platform.system() == 'Darwin':
desktopTargets &= ~Targets.DESKTOP_541_GCC
return desktopTargets
@staticmethod
def qt4Classes():
return (Targets.DESKTOP_474_GCC | Targets.DESKTOP_480_DEFAULT
| Targets.SIMULATOR | Targets.EMBEDDED_LINUX)
@staticmethod
def getStringForTarget(target):
if target == Targets.DESKTOP_474_GCC:
return "Desktop 474 GCC"
elif target == Targets.DESKTOP_480_DEFAULT:
if platform.system() in ('Windows', 'Microsoft'):
return "Desktop 480 MSVC2010"
else:
return "Desktop 480 GCC"
elif target == Targets.SIMULATOR:
return "Qt Simulator"
elif target == Targets.EMBEDDED_LINUX:
return "Embedded Linux"
elif target == Targets.DESKTOP_521_DEFAULT:
return "Desktop 521 default"
elif target == Targets.DESKTOP_531_DEFAULT:
return "Desktop 531 default"
elif target == Targets.DESKTOP_541_GCC:
return "Desktop 541 GCC"
else:
return None
@staticmethod
def getTargetsAsStrings(targets):
if not isinstance(targets, (tuple,list)):
test.fatal("Wrong usage... This function handles only tuples or lists.")
return None
result = map(Targets.getStringForTarget, targets)
if None in result:
test.fatal("You've passed at least one unknown target!")
return result
@staticmethod
def intToArray(targets):
return filter(lambda x: x & targets, Targets.ALL_TARGETS)
@staticmethod
def arrayToInt(targetArr):
return reduce(operator.or_, targetArr, 0)
@staticmethod
def getDefaultKit():
return Targets.DESKTOP_521_DEFAULT
# this class holds some constants for easier usage inside the Projects view
class ProjectSettings:
BUILD = 1
RUN = 2
# this class defines some constants for the views of the creator's MainWindow
class ViewConstants:
WELCOME, EDIT, DESIGN, DEBUG, PROJECTS, HELP = range(6)
FIRST_AVAILABLE = 0
# always adjust the following to the highest value of the available ViewConstants when adding new
LAST_AVAILABLE = HELP
# this function returns a regex of the tooltip of the FancyTabBar elements
# this is needed because the keyboard shortcut is OS specific
# if the provided argument does not match any of the ViewConstants it returns None
@staticmethod
def getToolTipForViewTab(viewTab):
if viewTab == ViewConstants.WELCOME:
toolTip = ur'Switch to <b>Welcome</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.EDIT:
toolTip = ur'Switch to <b>Edit</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.DESIGN:
toolTip = ur'Switch to <b>Design</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.DEBUG:
toolTip = ur'Switch to <b>Debug</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.PROJECTS:
toolTip = ur'Switch to <b>Projects</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.HELP:
toolTip = ur'Switch to <b>Help</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
else:
return None
return toolTip % (viewTab + 1)
class SubprocessType:
QT_WIDGET=0
QT_QUICK_APPLICATION=1
QT_QUICK_UI=2
USER_DEFINED=3
@staticmethod
def getWindowType(subprocessType, qtQuickVersion="1.1"):
if subprocessType == SubprocessType.QT_WIDGET:
return "QMainWindow"
if subprocessType == SubprocessType.QT_QUICK_APPLICATION:
qqv = "2"
if qtQuickVersion[0] == "1":
qqv = "1"
return "QtQuick%sApplicationViewer" % qqv
if subprocessType == SubprocessType.QT_QUICK_UI:
if qtQuickVersion == "1.1":
return "QDeclarativeViewer"
else:
return "QQuickView"
if subprocessType == SubprocessType.USER_DEFINED:
return "user-defined"
test.fatal("Could not determine the WindowType for SubprocessType %s" % subprocessType)
return None
class QtInformation:
QT_VERSION = 0
QT_BINPATH = 1
QT_LIBPATH = 2
class LibType:
SHARED = 0
STATIC = 1
QT_PLUGIN = 2
@staticmethod
def getStringForLib(libType):
if libType == LibType.SHARED:
return "Shared Library"
if libType == LibType.STATIC:
return "Statically Linked Library"
if libType == LibType.QT_PLUGIN:
return "Qt Plugin"
return None
class Qt5Path:
DOCS = 0
EXAMPLES = 1
@staticmethod
def getPaths(pathSpec):
if pathSpec == Qt5Path.DOCS:
path52 = "/doc"
path53 = "/Docs/Qt-5.3"
path54 = "/Docs/Qt-5.4"
elif pathSpec == Qt5Path.EXAMPLES:
path52 = "/examples"
path53 = "/Examples/Qt-5.3"
path54 = "/Examples/Qt-5.4"
else:
test.fatal("Unknown pathSpec given: %s" % str(pathSpec))
return []
if platform.system() in ('Microsoft', 'Windows'):
return ["C:/Qt/Qt5.2.1/5.2.1/msvc2010" + path52,
"C:/Qt/Qt5.3.1" + path53, "C:/Qt/Qt5.4.1" + path54]
elif platform.system() == 'Linux':
if __is64BitOS__():
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/gcc_64" + path52,
"~/Qt5.3.1" + path53, "~/Qt5.4.1" + path54])
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/gcc" + path52,
"~/Qt5.3.1" + path53, "~/Qt5.4.1" + path54])
else:
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/clang_64" + path52,
"~/Qt5.3.1" + path53])
|
pivonroll/Qt_Creator
|
tests/system/shared/classes.py
|
Python
|
gpl-3.0
| 7,863 | 0.004324 |
from jobber import jobber
from redis import Redis
client = Redis()
progress = jobber.JobProgress(client=client)
progress.run()
|
tomlepaine/jobber
|
jobber/scripts/progress.py
|
Python
|
bsd-3-clause
| 130 | 0 |
#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from utils import utils, inspector
# http://www.usda.gov/oig/rptsaudits.htm
archive = 1978
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Some reports have links with a '.PDF' extension, but they can only be
# accessed using a '.pdf' extension. There is a 404 otherwise. The
# `LOWER_PDF_REPORT_IDS` constant contains a list of the report ids that this
# applies to.
# - The link to the congressional testimony statement from 2/26/2003 should
# point to http://www.usda.gov/oig/webdocs/Testimonybudgt-2004.pdf, not
# http://www.usda.gov/oig/webdocs/IGtestimony110302.pdf
SEMIANNUAL_REPORTS_URL = "http://www.usda.gov/oig/rptssarc.htm"
AGENCY_BASE_URL = "http://www.usda.gov/oig/"
TESTIMONIES_URL = "http://www.usda.gov/oig/rptsigtranscripts.htm"
INVESTIGATION_URLS = "http://www.usda.gov/oig/newinv.htm"
OTHER_REPORT_TYPES = {
"investigation": INVESTIGATION_URLS,
"semiannual_report": SEMIANNUAL_REPORTS_URL,
"testimony": TESTIMONIES_URL,
}
AGENCY_URLS = {
"AARC": "rptsauditsaarc.htm",
"AMS": "rptsauditsams.htm",
"APHIS": "rptsauditsaphis.htm",
"ARS": "rptsauditsars.htm",
"CR": "rptsauditscr.htm",
"CCC": "rptsauditsccc.htm",
"CSRE": "rptsauditscsrees.htm",
"FSA": "rptsauditsfsa.htm",
"FNS": "rptsauditsfns.htm",
"FSIS": "rptsauditsfsis.htm",
"FAS": "rptsauditsfas.htm",
"FS": "rptsauditsfs.htm",
"GIPSA": "rptsauditsgipsa.htm",
"NASS": "rptsauditsnass.htm",
"NIFA": "rptsauditsnifa.htm",
"NRCS": "rptsauditsnrcs.htm",
"REE": "rptsauditsree.htm",
"RMA": "rptsauditsrma.htm",
"RBS": "rptsauditsrbs.htm",
"RBEG": "rptsauditsrbeg.htm",
"RD": "rptsauditsrd.htm",
"RHS": "rptsauditsrhs.htm",
"RUS": "rptsauditsrus.htm",
"USDA": "rptsauditsmulti.htm",
}
AGENCY_NAMES = {
"AARC": "Alternative Agricultural Research & Comm. Center",
"AMS": "Agricultural Marketing Service",
"APHIS": "Animal Plant Health Inspection Service",
"ARS": "Agricultural Research Service",
"CR": "Civil Rights",
"CCC": "Commodity Credit Corporation",
"CSRE": "Cooperative State Research, Ed. & Extension Service",
"FSA": "Farm Service Agency",
"FNS": "Food and Nutrition Service",
"FSIS": "Food Safety and Inspection Service",
"FAS": "Foreign Agricultural Service",
"FS": "Forest Service",
"GIPSA": "Grain Inspection, Packers and Stockyards Administration",
"NASS": "National Agricultural Statistics Service",
"NIFA": "National Institute of Food and Agriculture",
"NRCS": "Natural Resources Conservation Service",
"REE": "Research, Education, and Economics",
"RMA": "Risk Management Agency",
"RBS": "Rural Business-Cooperative Service",
"RBEG": "Rural Business Enterprise Grant",
"RD": "Rural Development",
"RHS": "Rural Housing Service",
"RUS": "Rural Utilities Service",
"USDA": "USDA (Multi-Agency)",
}
REPORT_PUBLISHED_MAPPING = {
"TestimonyBlurb2": datetime.datetime(2004, 7, 14),
}
# These reports have links that end with a '.PDF' extension, but must can only
# be accessed using a '.pdf' extension.
LOWER_PDF_REPORT_IDS = [
"sarc1978_2_Part_1",
"sarc1979_2",
"sarc1980_2",
"sarc1981_2",
"sarc1982_2",
"sarc1983_2",
"sarc1984_2",
"sarc1985_2",
"sarc1986_2",
"sarc1987_2",
"sarc1988_2",
"sarc1989_2",
"sarc1990_2",
"sarc1991_2",
"sarc1992_2",
"sarc1993_2",
"sarc1994_2",
"sarc1995_2",
"sarc1996_2",
"sarc1997_2",
]
def run(options):
year_range = inspector.year_range(options, archive)
# Pull the audit reports
all_audit_reports = {}
for agency_slug, agency_path in AGENCY_URLS.items():
agency_url = urljoin(AGENCY_BASE_URL, agency_path)
doc = utils.beautifulsoup_from_url(agency_url)
results = doc.select("ul li")
if not results:
results = [ancestor_tag_by_name(x, 'tr') for x in
doc.select('img[src$="pdf-pic1.gif"]')]
if not results:
raise inspector.NoReportsFoundError("Department of Agriculture (%s)" % agency_slug)
for result in results:
report = report_from(result, agency_url, year_range,
report_type='audit', agency_slug=agency_slug)
if report:
report_id = report["report_id"]
title = report["title"]
key = (report_id, title)
if key in all_audit_reports:
all_audit_reports[key]["agency"] = all_audit_reports[key]["agency"] \
+ ", " + agency_slug.lower()
else:
all_audit_reports[key] = report
for report in all_audit_reports.values():
inspector.save_report(report)
for report_type, url in OTHER_REPORT_TYPES.items():
doc = utils.beautifulsoup_from_url(url)
results = doc.select("ul li")
if not results:
raise inspector.NoReportsFoundError("Department of Agriculture (other reports)")
for result in results:
report = report_from(result, url, year_range, report_type=report_type)
if report:
inspector.save_report(report)
DATE_FORMATS = ['%m/%d/%Y', '%m/%Y']
def report_from(result, page_url, year_range, report_type, agency_slug="agriculture"):
published_on = None
try:
# Try to find the link with text first. Sometimes there are hidden links
# (no text) that we want to ignore.
link = result.find_all("a", text=True)[0]
except IndexError:
# If none of the links have text, try the first one with an image
for temp in result.find_all("a"):
if temp.img:
link = temp
break
# Fallback: pick the first link
else:
link = result.find_all("a")[0]
report_url = urljoin(page_url, link.get('href').strip())
if result.name == 'li':
title = link.text.strip()
elif result.name == 'tr':
# Remove the date and parenthetical metadata from the result, and save
# the date for later. What's left will be the title.
published_on_element = result.strong.extract()
if result.em:
while result.em:
result.em.extract()
title = result.text.strip()
else:
title = result.text
title = title[:title.find('(')].strip()
published_on_text = published_on_element.text.strip().rstrip(":")
for date_format in DATE_FORMATS:
try:
published_on = datetime.datetime.strptime(published_on_text, date_format)
except ValueError:
pass
# Normalize titles
title = title.rstrip(",")
if title.endswith("(PDF)"):
title = title[:-5]
if title.endswith("(PDF), (Report No: 30601-01-HY, Size: 847,872 bytes)"):
title = title[:-52]
title = title.rstrip(" ")
title = title.replace("..", ".")
title = title.replace(" ", " ")
title = title.replace("REcovery", "Recovery")
title = title.replace("Directy ", "Direct ")
if title == title.upper():
title = title.title()
# These entries on the IG page have the wrong URLs associated with them. The
# correct URLs were guessed or retrieved from an earlier version of the page,
# via the Internet Archive Wayback Machine.
if (report_url == "http://www.usda.gov/oig/webdocs/IGtestimony110302.pdf" and
title == "Statement Of Phyllis K. Fong Inspector General: Before "
"The House Appropriations Subcommittee On Agriculture, Rural "
"Development, Food And Drug Administration And Related Agencies"):
report_url = "http://www.usda.gov/oig/webdocs/Testimonybudgt-2004.pdf"
elif (report_url == "http://www.usda.gov/oig/webdocs/Ebt.PDF" and
title == "Statement Of Roger C. Viadero: Before The U.S. House Of "
"Representatives Committee On Agriculture Subcommittee On Department "
"Operations, Oversight, Nutrition, And Forestry on the Urban "
"Resources Partnership Program"):
report_url = "http://www.usda.gov/oig/webdocs/URP-Testimony.PDF"
elif (report_url == "http://www.usda.gov/oig/webdocs/foodaidasst.PDF" and
title == "Testimony Of Roger C. Viadero: Before The United States "
"Senate Committee On Agriculture, Nutrition, And Forestry On The "
"Department's Processing Of Civil Rights Complaints"):
report_url = "http://www.usda.gov/oig/webdocs/IGstestimony.PDF"
elif (report_url == "http://www.usda.gov/oig/webdocs/34601-10-TE.pdf" and
title == "Rural Housing Service Single Family Housing Program - Maine"):
report_url = "http://www.usda.gov/oig/webdocs/04004-05-Hy.pdf"
elif (report_url == "http://www.usda.gov/oig/webdocs/04004-05-Hy.pdf" and
title == "Rural Development\u2019s Processing of Loan Guarantees to "
"Member of the Western Sugar Cooperative"):
report_url = "http://www.usda.gov/oig/webdocs/34601-03-Ch.pdf"
elif (report_url == "http://www.usda.gov/oig/webdocs/60801-%7E1.pdf" and
title == "Evaluation of the Office of Civil Rights\u2019 Efforts to "
"Reduce the Backlog of Program Complaints"):
report_url = "http://www.usda.gov/oig/webdocs/60801-1-HQ.pdf"
elif report_url == "http://www.usda.gwebdocs/34703-0001-31.pdf":
report_url = "http://www.usda.gov/oig/webdocs/34703-0001-31.pdf"
# This report is listed twice on the same page with slightly different titles
if title == "Animal and Plant Health Inspection Service Transition and " \
"Coordination of Border Inspection Activities Between USDA and DHS":
return
report_filename = report_url.split("/")[-1]
report_id = os.path.splitext(report_filename)[0]
# Differentiate between two letters on the same report
if report_url == "http://www.usda.gov/oig/webdocs/34099-12-TE.pdf":
report_id = "34099-12-Te_1"
elif report_url == "http://www.usda.gov/oig/webdocs/34099-12-Te.pdf":
report_id = "34099-12-Te_2"
# Skip duplicate report entry
if (report_url == "http://www.usda.gov/oig/webdocs/02007-0001-31.pdf" and
title == "ARS: U.S. Meat Animal Research Center Review - Interim Report"):
return
if title == "American Recovery and Reinvestment Act - Emergency Watershed " \
"Protection Program Floodplain Easements" and report_id == "10703-1-KC":
return
# These are just summary versions of other reports. Skip for now.
if '508 Compliant Version' in title:
return
if report_id in REPORT_PUBLISHED_MAPPING:
published_on = REPORT_PUBLISHED_MAPPING[report_id]
if not published_on:
try:
# This is for the investigation reports
published_on = datetime.datetime.strptime(result.text.strip(), '%B %Y (PDF)')
title = "Investigation Bulletins {}".format(result.text.strip())
except ValueError:
pass
if not published_on:
published_on_text = result.text.split()[0].strip()
for date_format in DATE_FORMATS:
try:
published_on = datetime.datetime.strptime(published_on_text, date_format)
except ValueError:
pass
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
if report_id in LOWER_PDF_REPORT_IDS:
report_url = ".".join([report_url.rsplit(".", 1)[0], 'pdf'])
report = {
'inspector': 'agriculture',
'inspector_url': 'http://www.usda.gov/oig/',
'agency': agency_slug.lower(),
'agency_name': AGENCY_NAMES.get(agency_slug, 'Department of Agriculture'),
'report_id': report_id,
'url': report_url,
'title': title,
'type': report_type,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def ancestor_tag_by_name(element, name):
for parent in element.parents:
if parent.name == name:
return parent
return None
utils.run(run) if (__name__ == "__main__") else None
|
divergentdave/inspectors-general
|
inspectors/agriculture.py
|
Python
|
cc0-1.0
| 11,641 | 0.007559 |
import django
from django.utils.six.moves.urllib import parse
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from django.contrib.auth import signals as auth_signals, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.test import TransactionTestCase
from django.test.utils import override_settings
from djactasauth.backends import \
FilteredModelBackend, ActAsBackend, OnlySuperuserCanActAsBackend
from djactasauth.util import act_as_login_url, get_login_url
from testapp.sixmock import patch, call
django_11_or_later = django.VERSION[:2] >= (1, 11)
def create_user(
username, password='password', is_superuser=False, is_staff=False):
user = User(username=username, is_superuser=is_superuser)
user.set_password(password)
user.save()
return user
def auth_through_backend(backend, **kwargs):
if django_11_or_later:
args = [None] # request
else:
args = []
return backend.authenticate(*args, **kwargs)
class FilteredBackendTestCase(TransactionTestCase):
def test_it_is_a_model_backend(self):
self.assertTrue(
issubclass(FilteredModelBackend, ModelBackend),
FilteredModelBackend.__mro__)
def test_can_declare_filters_which_apply_to_get_user(self):
staff = create_user(
username='staff', is_staff=True, is_superuser=False)
superuser = create_user(
username='superuser', is_staff=True, is_superuser=True)
customer = create_user(
username='customer', is_staff=False, is_superuser=False)
for u in [staff, superuser, customer]:
u.set_password('password')
u.save()
class TestFilteredBackend(FilteredModelBackend):
def __init__(self, filter_kwargs):
self.filter_kwargs = filter_kwargs
def run_scenarios_with(test_method):
self.assertEqual(staff, test_method(staff, dict()))
self.assertEqual(superuser, test_method(superuser, dict()))
self.assertEqual(customer, test_method(customer, dict()))
self.assertEqual(None, test_method(customer, dict(is_staff=True)))
self.assertEqual(
superuser, test_method(superuser, dict(is_superuser=True)))
self.assertEqual(
customer, test_method(
customer, dict(username__startswith='c')))
self.assertEqual(
None, test_method(superuser, dict(username__startswith='c')))
def get_user(user, filter_kwargs):
backend = TestFilteredBackend(filter_kwargs)
return backend.get_user(user.pk)
run_scenarios_with(get_user)
def authenticate(user, filter_kwargs):
backend = TestFilteredBackend(filter_kwargs)
return auth_through_backend(
backend=backend, username=user.username, password='password')
run_scenarios_with(authenticate)
class TestableBackend(object):
def __init__(self):
self.reset()
def authenticate(self, *a, **kw):
if django_11_or_later:
kw.pop('request')
self.calls.append((a, kw))
return self.authenticated_user
def reset(self):
self.calls = []
self.authenticated_user = None
def patched_get_backends(backends):
method_to_patch = \
'_get_backends' if django_11_or_later else 'get_backends'
return patch(
'django.contrib.auth.{}'.format(method_to_patch),
return_value=backends
)
class ActAsBackendAuthenticateTestCase(TransactionTestCase):
def setUp(self):
super(ActAsBackendAuthenticateTestCase, self).setUp()
self.first_test_backend = TestableBackend()
self.second_test_backend = TestableBackend()
self.third_test_backend_not_in_get_backends = TestableBackend()
self.act_as_auth_backend = ActAsBackend()
self.backends = [
self.first_test_backend,
self.act_as_auth_backend,
self.second_test_backend
]
def patched_get_backends(self):
return patched_get_backends(self.backends)
def test_does_not_inherit_from_any_backend(self):
self.assertEqual(
(ActAsBackend, object),
ActAsBackend.__mro__
)
def test_fails_if_multiple_act_as_backends_are_configured(self):
"""
while I can see how one could like to have multiple rules for
when one can becomes another user, I foresee complexity, unexpected
bugs, corner cases, etc. and thus would much rather place the burden
of managing the complexity/interaction between these various rules
on the user of this library - break the rules apart into multiple
methods, and compose them in your own code, so this library can
remain simple
"""
self.backends.append(ActAsBackend())
with self.patched_get_backends():
with self.assertRaises(ValueError):
auth_through_backend(
self.act_as_auth_backend,
username='foo/bar', password='password')
def test_it_tries_all_other_configured_backends(self):
with self.patched_get_backends():
auth_through_backend(
self.act_as_auth_backend,
username='foo/bar', password='password')
self.assertEqual(
[(tuple(), {'password': 'password', 'username': 'foo'})],
self.first_test_backend.calls)
self.assertEqual(
[(tuple(), {'password': 'password', 'username': 'foo'})],
self.second_test_backend.calls)
self.assertEqual([], self.third_test_backend_not_in_get_backends.calls)
def test_first_successful_backend_returned_later_ones_not_called(self):
self.first_test_backend.authenticated_user = User()
with self.patched_get_backends():
auth_through_backend(
self.act_as_auth_backend,
username='foo/bar', password='password')
self.assertEqual(
[(tuple(), {'password': 'password', 'username': 'foo'})],
self.first_test_backend.calls)
self.assertEqual([], self.second_test_backend.calls)
def test_cannot_authenticate_regular_user(self):
with self.patched_get_backends():
self.assertEqual(
None,
auth_through_backend(
self.act_as_auth_backend,
username='foo', password='password'))
self.assertEqual([], self.first_test_backend.calls)
self.assertEqual([], self.second_test_backend.calls)
def test_can_become_another_user_with_own_password(self):
create_user(username='admin', password='admin password')
user = create_user(username='user', password='user password')
self.assertEqual(
None, self.authenticate(
username='admin/user', password='user password'))
self.assertEqual(
user, self.authenticate(
username='admin/user', password='admin password'))
@patch("djactasauth.backends.log")
def test_usernames_with_multiple_sepchars_trigger_log_warning(self,
mock_log):
create_user(username='admin', password='foo')
self.assertEqual(None, self.authenticate(username='admin/user/',
password='foo'))
self.assertEqual(None, self.authenticate(username='admin//user',
password='foo'))
self.assertEqual(None, self.authenticate(username='admin/us/er',
password='foo'))
self.assertEqual(None, self.authenticate(username='/admin/user',
password='foo'))
calls = [call(ActAsBackend.too_many_sepchar_msg) for i in range(4)]
mock_log.warn.assert_has_calls(calls)
def test_cannot_become_nonexistent_user(self):
create_user(username='admin', password='password')
self.assertEqual(
None, self.authenticate(
username='admin/user', password='password'))
def test_authenticate_does_not_fire_login_signal(self):
def should_not_fire_login_signal(user, **kwargs):
self.fail(
'should not have fired login signal but did for %r' % user)
create_user(username='admin', password='admin password')
user = create_user(username='user', password='user password')
auth_signals.user_logged_in.connect(should_not_fire_login_signal)
try:
self.authenticate(username='admin/user', password='admin password')
finally:
auth_signals.user_logged_in.disconnect(
should_not_fire_login_signal)
self.assertEqual(
user, self.authenticate(
username='admin/user', password='admin password'))
def test_only_super_user_can_act_as_model_backend_regression(self):
create_user(
username='admin1', password='admin1 password', is_superuser=True)
create_user(
username='admin2', password='admin2 password', is_superuser=True)
user = create_user(
username='user', password='user password', is_superuser=False)
self.assertEqual(
None, self.authenticate(
username='user/admin1', password='user password',
backend_cls=OnlySuperuserCanActAsBackend))
self.assertEqual(
None, self.authenticate(
username='user/admin2', password='user password',
backend_cls=OnlySuperuserCanActAsBackend))
self.assertEqual(
user, self.authenticate(
backend_cls=OnlySuperuserCanActAsBackend,
username='admin1/user', password='admin1 password'))
self.assertEqual(
user, self.authenticate(
backend_cls=OnlySuperuserCanActAsBackend,
username='admin2/user', password='admin2 password'))
self.assertEqual(
None, self.authenticate(
backend_cls=OnlySuperuserCanActAsBackend,
username='admin1/admin2', password='admin1 password'))
self.assertEqual(
None, self.authenticate(
backend_cls=OnlySuperuserCanActAsBackend,
username='admin2/admin1', password='admin2 password'))
def test_can_customize_can_act_as_policy_by_subclassing(self):
alice = create_user(username='alice', password='alice')
create_user(username='bob', password='bob')
class OnlyShortUserNamesCanActAs(ActAsBackend):
def can_act_as(self, auth_user, user):
return len(auth_user.username) <= 3
self.assertEqual(
None, self.authenticate(
backend_cls=OnlyShortUserNamesCanActAs,
username='alice/bob', password='alice'))
self.assertEqual(
alice, self.authenticate(
backend_cls=OnlyShortUserNamesCanActAs,
username='bob/alice', password='bob'))
def test_when_users_none_doesnt_crash_process(self):
create_user(username='jane', password='doe')
class ShouldNotCallCanActAs(ActAsBackend):
def can_act_as(backend_self, auth_user, user):
self.fail('should not have called')
self.assertEqual(
None, self.authenticate(
backend_cls=ShouldNotCallCanActAs,
username='jane/non-existent-user', password='doe'))
def test_is_act_as_username_method(self):
def assert_classification(username, expected_to_be_act_as_username):
self.assertEqual(
expected_to_be_act_as_username,
ActAsBackend.is_act_as_username(username))
assert_classification(None, False)
assert_classification('', False)
assert_classification('user', False)
assert_classification('user/johndoe', True)
###
def authenticate(self, username, password, backend_cls=None):
if not backend_cls:
class EveryoneCanActAs(ActAsBackend):
def can_act_as(self, auth_user, user):
return True
backend_cls = EveryoneCanActAs
backend = backend_cls()
with patched_get_backends([backend, ModelBackend()]):
return auth_through_backend(
backend, username=username, password=password)
@override_settings(
AUTHENTICATION_BACKENDS=[
'djactasauth.backends.OnlySuperuserCanActAsBackend',
'django.contrib.auth.backends.ModelBackend'])
class EndToEndActAsThroughFormAndView(TransactionTestCase):
def test_login_page_is_set_up_as_expected(self):
self.goto_login_page()
response = self.login_get_response
self.assertEqual(200, response.status_code)
form = response.context['form']
self.assertTrue(
isinstance(form, AuthenticationForm), type(form).__mro__)
def test_successful_act_as_login_fires_signal_with_act_as_user(self):
logged_in_users = []
def handle_user_logged_in(user, **kwargs):
logged_in_users.append(user)
auth_signals.user_logged_in.connect(handle_user_logged_in)
create_user(username='admin', password='admin', is_superuser=True)
user = create_user(
username='user', password='user', is_superuser=False)
try:
self.goto_login_page()
self.submit_login(username='admin/user', password='admin')
self.assertEqual(302, self.login_post_response.status_code)
finally:
auth_signals.user_logged_in.disconnect(handle_user_logged_in)
self.assertEqual([user], logged_in_users)
def test_after_login_correct_user_is_passed_in_the_request_no_act_as(self):
create_user(username='admin', password='admin', is_superuser=True)
self.assert_logged_in_user_on_next_request(
username='admin', password='admin', display_user='admin')
def test_after_login_correct_user_is_passed_in_the_request_act_as(self):
create_user(username='admin', password='admin', is_superuser=True)
create_user(username='user', password='user', is_superuser=False)
self.assert_logged_in_user_on_next_request(
username='admin/user', password='admin', display_user='user')
def test_next_from_GET_is_respected_and_user_is_redirected_there(self):
create_user(username='user', password='user', is_superuser=False)
self.assert_logged_in_user_on_next_request(
username='user', password='user', display_user='user',
**{REDIRECT_FIELD_NAME: '/foo/'})
redir_to = parse.urlparse(self.login_post_response['Location'])
self.assertEqual('/foo/', redir_to.path)
def test_on_post_form_has_access_to_request(self):
self.goto_login_page()
self.submit_login(username='foo', password='bar')
response = self.login_post_response
self.assertEqual(200, response.status_code)
form = response.context['form']
self.assertTrue(hasattr(form, 'request'))
self.assertIsNotNone(form.request)
def test_can_initialize_username_from_querystring(self):
self.goto_login_page(username='foo')
form = self.login_get_response.context['form']
self.assertEqual('foo', form.initial.get('username'))
###
def assert_logged_in_user_on_next_request(
self, username, password, display_user, **query):
self.goto_login_page(**query)
self.submit_login(username=username, password=password, **query)
response_content = self.login_post_response.content.decode('ascii')
self.assertEqual(
302, self.login_post_response.status_code,
(username, password, response_content))
self.get_whoami_page()
self.assertEqual(
display_user, self.whoami_response.content.decode('ascii'))
def goto_login_page(self, **query):
url = get_login_url(**query)
self.login_get_response = self.client.get(url)
self.assertEqual(200, self.login_get_response.status_code)
def submit_login(self, username, password, **query):
url = get_login_url(**query)
self.login_post_response = self.client.post(
url, dict(username=username, password=password))
def get_whoami_page(self):
self.whoami_response = self.client.get('/whoami/')
self.assertEqual(200, self.whoami_response.status_code)
class ActAsUrlGeneratorTestCase(TransactionTestCase):
def test_generates_the_correct_url(self):
self.assertEqual(
'/login/?username=admin%2Fuser',
act_as_login_url(auth='admin', act_as='user'))
self.assertEqual(
'/login/?username=foo%2Fbar',
act_as_login_url(auth='foo', act_as='bar'))
|
PaesslerAG/django-act-as-auth
|
tests/testapp/tests.py
|
Python
|
bsd-3-clause
| 17,168 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Deprecated and experimental annotations.
Experimental: Signifies that a public API (public class, method or field) is
subject to incompatible changes, or even removal, in a future release. Note that
the presence of this annotation implies nothing about the quality or performance
of the API in question, only the fact that the API or behavior may change in any
way.
Deprecated: Signifies that users are discouraged from using a public API
typically because a better alternative exists, and the current form might be
removed in a future version.
Usage:
For internal use only; no backwards-compatibility guarantees.
Annotations come in two flavors: deprecated and experimental
The 'deprecated' annotation requires a 'since" parameter to specify
what version deprecated it.
Both 'deprecated' and 'experimental' annotations can specify the
current recommended version to use by means of a 'current' parameter.
The following example illustrates how to annotate coexisting versions of the
same function 'multiply'.::
def multiply(arg1, arg2):
print(arg1, '*', arg2, '=', end=' ')
return arg1*arg2
# This annotation marks 'old_multiply' as deprecated since 'v.1' and suggests
# using 'multiply' instead.::
@deprecated(since='v.1', current='multiply')
def old_multiply(arg1, arg2):
result = 0
for i in xrange(arg1):
result += arg2
print(arg1, '*', arg2, '(the old way)=', end=' ')
return result
# This annotation marks 'exp_multiply' as experimental and suggests
# using 'multiply' instead.::
@experimental(since='v.1', current='multiply')
def exp_multiply(arg1, arg2):
print(arg1, '*', arg2, '(the experimental way)=', end=' ')
return (arg1*arg2)*(arg1/arg2)*(arg2/arg1)
# If a custom message is needed, on both annotations types the
# arg custom_message can be used.::
@experimental(since='v.1', current='multiply'
custom_message='Experimental since %since%
Please use %current% insted.')
def exp_multiply(arg1, arg2):
print(arg1, '*', arg2, '(the experimental way)=', end=' ')
return (arg1*arg2)*(arg1/arg2)*(arg2/arg1)
# Set a warning filter to control how often warnings are produced.::
warnings.simplefilter("always")
print(multiply(5, 6))
print(old_multiply(5,6))
print(exp_multiply(5,6))
"""
# pytype: skip-file
import warnings
from functools import partial
from functools import wraps
class BeamDeprecationWarning(DeprecationWarning):
"""Beam-specific deprecation warnings."""
# Don't ignore BeamDeprecationWarnings.
warnings.simplefilter('once', BeamDeprecationWarning)
def annotate(label, since, current, extra_message, custom_message=None):
"""Decorates an API with a deprecated or experimental annotation.
Args:
label: the kind of annotation ('deprecated' or 'experimental').
since: the version that causes the annotation.
current: the suggested replacement function.
extra_message: an optional additional message.
custom_message: if the default message does not suffice, the message
can be changed using this argument. A string
whit replacement tokens.
A replecement string is were the previus args will
be located on the custom message.
The following replacement strings can be used:
%name% -> API.__name__
%since% -> since (Mandatory for the decapreted annotation)
%current% -> current
%extra% -> extra_message
Returns:
The decorator for the API.
"""
def _annotate(fnc):
@wraps(fnc)
def inner(*args, **kwargs):
if label == 'deprecated':
warning_type = BeamDeprecationWarning
else:
warning_type = FutureWarning
if custom_message is None:
message = '%s is %s' % (fnc.__name__, label)
if label == 'deprecated':
message += ' since %s' % since
message += '. Use %s instead.' % current if current else '.'
if extra_message:
message += ' ' + extra_message
else:
if label == 'deprecated' and '%since%' not in custom_message:
raise TypeError(
"Replacement string %since% not found on \
custom message")
emptyArg = lambda x: '' if x is None else x
message = custom_message\
.replace('%name%', fnc.__name__)\
.replace('%since%', emptyArg(since))\
.replace('%current%', emptyArg(current))\
.replace('%extra%', emptyArg(extra_message))
warnings.warn(message, warning_type, stacklevel=2)
return fnc(*args, **kwargs)
return inner
return _annotate
# Use partial application to customize each annotation.
# 'current' will be optional in both deprecated and experimental
# while 'since' will be mandatory for deprecated.
deprecated = partial(
annotate, label='deprecated', current=None, extra_message=None)
experimental = partial(
annotate,
label='experimental',
current=None,
since=None,
extra_message=None)
|
lukecwik/incubator-beam
|
sdks/python/apache_beam/utils/annotations.py
|
Python
|
apache-2.0
| 5,756 | 0.003648 |
import MySQLdb
class DatabaseHandler:
def __init__(self):
pass
def is_delete(self, tableName):
reservedTableNameList = ["mantis_user_table", "mantis_tokens_table", "mantis_config_table"]
isDeleteFlag = 1
for name in reservedTableNameList:
isIdentical = cmp(tableName, name)
if isIdentical == 0:
isDeleteFlag = 0
break
return isDeleteFlag
def Clean_Database(self, hostUrl, account, password, databaseName):
print 'clean database1'
db = MySQLdb.connect(host=hostUrl, user=account, passwd=password, db=databaseName)
cursor = db.cursor()
cursor.execute("Show Tables from " + databaseName)
result = cursor.fetchall()
for record in result:
tableName = record[0]
isDelete = self.is_delete(tableName)
if isDelete == 0:
print "Reserve " + tableName
else :
print "TRUNCATE TABLE `" + tableName + "`"
cursor.execute("TRUNCATE TABLE `" + tableName + "`")
print 'Add admin'
cursor.execute("INSERT INTO `account` VALUES (1, 'admin', 'admin', 'example@ezScrum.tw', '21232f297a57a5a743894a0e4a801fc3', 1, 1379910191599, 1379910191599)")
cursor.execute("INSERT INTO `system` VALUES (1, 1)")
db.commit()
#if __name__ == '__main__':
# databaseHandler = DatabaseHandler()
# databaseHandler.clean_database("localhost", "spark", "spark", "robottest")
|
ezScrum/ezScrum
|
robotTesting/keywords/lib/DatabaseHandler.py
|
Python
|
gpl-2.0
| 1,547 | 0.007111 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-27 13:08
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0003_auto_20170726_1348'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='parent',
field=models.ForeignKey(default=-1, null=True, on_delete=django.db.models.deletion.CASCADE,
to='comments.Comment'),
),
]
|
Bugheist/website
|
comments/migrations/0004_auto_20170727_1308.py
|
Python
|
agpl-3.0
| 559 | 0.001789 |
import flask; from flask import request
import os
import urllib.parse
from voussoirkit import flasktools
from voussoirkit import gentools
from voussoirkit import stringtools
import etiquette
from .. import common
site = common.site
session_manager = common.session_manager
# Individual albums ################################################################################
@site.route('/album/<album_id>')
def get_album_html(album_id):
album = common.P_album(album_id, response_type='html')
response = common.render_template(
request,
'album.html',
album=album,
view=request.args.get('view', 'grid'),
)
return response
@site.route('/album/<album_id>.json')
def get_album_json(album_id):
album = common.P_album(album_id, response_type='json')
album = album.jsonify()
return flasktools.json_response(album)
@site.route('/album/<album_id>.zip')
def get_album_zip(album_id):
album = common.P_album(album_id, response_type='html')
recursive = request.args.get('recursive', True)
recursive = stringtools.truthystring(recursive)
streamed_zip = etiquette.helpers.zip_album(album, recursive=recursive)
if album.title:
download_as = f'album {album.id} - {album.title}.zip'
else:
download_as = f'album {album.id}.zip'
download_as = etiquette.helpers.remove_path_badchars(download_as)
download_as = urllib.parse.quote(download_as)
outgoing_headers = {
'Content-Type': 'application/octet-stream',
'Content-Disposition': f'attachment; filename*=UTF-8\'\'{download_as}',
}
return flask.Response(streamed_zip, headers=outgoing_headers)
@site.route('/album/<album_id>/add_child', methods=['POST'])
@flasktools.required_fields(['child_id'], forbid_whitespace=True)
def post_album_add_child(album_id):
album = common.P_album(album_id, response_type='json')
child_ids = stringtools.comma_space_split(request.form['child_id'])
children = list(common.P_albums(child_ids, response_type='json'))
print(children)
album.add_children(children, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_child', methods=['POST'])
@flasktools.required_fields(['child_id'], forbid_whitespace=True)
def post_album_remove_child(album_id):
album = common.P_album(album_id, response_type='json')
child_ids = stringtools.comma_space_split(request.form['child_id'])
children = list(common.P_albums(child_ids, response_type='json'))
album.remove_children(children, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_thumbnail_photo', methods=['POST'])
def post_album_remove_thumbnail_photo(album_id):
album = common.P_album(album_id, response_type='json')
album.set_thumbnail_photo(None)
common.P.commit(message='album remove thumbnail photo endpoint')
return flasktools.json_response(album.jsonify())
@site.route('/album/<album_id>/refresh_directories', methods=['POST'])
def post_album_refresh_directories(album_id):
album = common.P_album(album_id, response_type='json')
for directory in album.get_associated_directories():
if not directory.is_dir:
continue
digest = common.P.digest_directory(directory, new_photo_ratelimit=0.1)
gentools.run(digest)
common.P.commit(message='refresh album directories endpoint')
return flasktools.json_response({})
@site.route('/album/<album_id>/set_thumbnail_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_set_thumbnail_photo(album_id):
album = common.P_album(album_id, response_type='json')
photo = common.P_photo(request.form['photo_id'], response_type='json')
album.set_thumbnail_photo(photo)
common.P.commit(message='album set thumbnail photo endpoint')
return flasktools.json_response(album.jsonify())
# Album photo operations ###########################################################################
@site.route('/album/<album_id>/add_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_add_photo(album_id):
'''
Add a photo or photos to this album.
'''
album = common.P_album(album_id, response_type='json')
photo_ids = stringtools.comma_space_split(request.form['photo_id'])
photos = list(common.P_photos(photo_ids, response_type='json'))
album.add_photos(photos, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_remove_photo(album_id):
'''
Remove a photo or photos from this album.
'''
album = common.P_album(album_id, response_type='json')
photo_ids = stringtools.comma_space_split(request.form['photo_id'])
photos = list(common.P_photos(photo_ids, response_type='json'))
album.remove_photos(photos, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
# Album tag operations #############################################################################
@site.route('/album/<album_id>/add_tag', methods=['POST'])
def post_album_add_tag(album_id):
'''
Apply a tag to every photo in the album.
'''
response = {}
album = common.P_album(album_id, response_type='json')
tag = request.form['tagname'].strip()
try:
tag = common.P_tag(tag, response_type='json')
except etiquette.exceptions.NoSuchTag as exc:
response = exc.jsonify()
return flasktools.json_response(response, status=404)
recursive = request.form.get('recursive', False)
recursive = stringtools.truthystring(recursive)
album.add_tag_to_all(tag, nested_children=recursive, commit=True)
response['action'] = 'add_tag'
response['tagname'] = tag.name
return flasktools.json_response(response)
# Album metadata operations ########################################################################
@site.route('/album/<album_id>/edit', methods=['POST'])
def post_album_edit(album_id):
'''
Edit the title / description.
'''
album = common.P_album(album_id, response_type='json')
title = request.form.get('title', None)
description = request.form.get('description', None)
album.edit(title=title, description=description, commit=True)
response = album.jsonify(minimal=True)
return flasktools.json_response(response)
@site.route('/album/<album_id>/show_in_folder', methods=['POST'])
def post_album_show_in_folder(album_id):
if not request.is_localhost:
flask.abort(403)
album = common.P_album(album_id, response_type='json')
directories = album.get_associated_directories()
if len(directories) != 1:
flask.abort(400)
directory = directories.pop()
if os.name == 'nt':
command = f'start explorer.exe "{directory.absolute_path}"'
os.system(command)
return flasktools.json_response({})
flask.abort(501)
# Album listings ###################################################################################
@site.route('/all_albums.json')
@flasktools.cached_endpoint(max_age=15)
def get_all_album_names():
all_albums = {album.id: album.display_name for album in common.P.get_albums()}
response = {'albums': all_albums}
return flasktools.json_response(response)
def get_albums_core():
albums = list(common.P.get_root_albums())
albums.sort(key=lambda x: x.display_name.lower())
return albums
@site.route('/albums')
def get_albums_html():
albums = get_albums_core()
response = common.render_template(
request,
'album.html',
albums=albums,
view=request.args.get('view', 'grid'),
)
return response
@site.route('/albums.json')
def get_albums_json():
albums = get_albums_core()
albums = [album.jsonify(minimal=True) for album in albums]
return flasktools.json_response(albums)
# Album create and delete ##########################################################################
@site.route('/albums/create_album', methods=['POST'])
def post_albums_create():
title = request.form.get('title', None)
description = request.form.get('description', None)
parent_id = request.form.get('parent_id', None)
if parent_id is not None:
parent = common.P_album(parent_id, response_type='json')
user = session_manager.get(request).user
album = common.P.new_album(title=title, description=description, author=user)
if parent_id is not None:
parent.add_child(album)
common.P.commit('create album endpoint')
response = album.jsonify(minimal=False)
return flasktools.json_response(response)
@site.route('/album/<album_id>/delete', methods=['POST'])
def post_album_delete(album_id):
album = common.P_album(album_id, response_type='json')
album.delete(commit=True)
return flasktools.json_response({})
|
voussoir/etiquette
|
frontends/etiquette_flask/backend/endpoints/album_endpoints.py
|
Python
|
bsd-2-clause
| 9,106 | 0.003075 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteProfile
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-talent
# [START jobs_v4beta1_generated_ProfileService_DeleteProfile_sync]
from google.cloud import talent_v4beta1
def sample_delete_profile():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.DeleteProfileRequest(
name="name_value",
)
# Make the request
client.delete_profile(request=request)
# [END jobs_v4beta1_generated_ProfileService_DeleteProfile_sync]
|
googleapis/python-talent
|
samples/generated_samples/jobs_v4beta1_generated_profile_service_delete_profile_sync.py
|
Python
|
apache-2.0
| 1,401 | 0.000714 |
# coding=utf-8
"""Test configuration of toolbox."""
import importlib
import os
import pytest
from snntoolbox.bin.utils import update_setup
from snntoolbox.utils.utils import import_configparser
with open(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'requirements.txt'))) as f:
requirements = []
for s in f.readlines():
requirements.append(s.rstrip('\n').split('==')[0])
@pytest.mark.parametrize('required_module', requirements)
def test_imports_from_requirements(required_module):
assert importlib.import_module(required_module)
# Todo: Add configuration that is expected to pass.
_in_and_out = [
({}, False),
({'paths': {'path_wd': os.path.dirname(__file__),
'dataset_path': os.path.dirname(__file__),
'filename_ann': '98.96'}}, False)
]
@pytest.mark.parametrize('params, expect_pass', _in_and_out)
def test_updating_settings(params, expect_pass, _path_wd):
configparser = import_configparser()
config = configparser.ConfigParser()
config.read_dict(params)
configpath = os.path.join(str(_path_wd), 'config')
with open(configpath, 'w') as file:
config.write(file)
if expect_pass:
assert update_setup(configpath)
else:
pytest.raises(AssertionError, update_setup, configpath)
|
NeuromorphicProcessorProject/snn_toolbox
|
tests/core/test_config.py
|
Python
|
mit
| 1,363 | 0 |
"""Assertions related to event validation"""
import json
import pprint
import six
def assert_event_matches(expected, actual, tolerate=None):
"""
Compare two event dictionaries.
Fail if any discrepancies exist, and output the list of all discrepancies. The intent is to produce clearer
error messages than "{ some massive dict } != { some other massive dict }", instead enumerating the keys that
differ. Produces period separated "paths" to keys in the output, so "context.foo" refers to the following
structure:
{
'context': {
'foo': 'bar' # this key, value pair
}
}
The other key difference between this comparison and `assertEquals` is that it supports differing levels of
tolerance for discrepancies. We don't want to litter our tests full of exact match tests because then anytime we
add a field to all events, we have to go update every single test that has a hardcoded complete event structure in
it. Instead we support making partial assertions about structure and content of the event. So if I say my expected
event looks like this:
{
'event_type': 'foo.bar',
'event': {
'user_id': 10
}
}
This method will raise an assertion error if the actual event either does not contain the above fields in their
exact locations in the hierarchy, or if it does contain them but has different values for them. Note that it will
*not* necessarily raise an assertion error if the actual event contains other fields that are not listed in the
expected event. For example, the following event would not raise an assertion error:
{
'event_type': 'foo.bar',
'referer': 'http://example.com'
'event': {
'user_id': 10
}
}
Note that the extra "referer" field is not considered an error by default.
The `tolerate` parameter takes a set that allows you to specify varying degrees of tolerance for some common
eventing related issues. See the `EventMatchTolerates` class for more information about the various flags that are
supported here.
Example output if an error is found:
Unexpected differences found in structs:
* <path>: not found in actual
* <path>: <expected_value> != <actual_value> (expected != actual)
Expected:
{ <expected event }
Actual:
{ <actual event> }
"<path>" is a "." separated string indicating the key that differed. In the examples above "event.user_id" would
refer to the value of the "user_id" field contained within the dictionary referred to by the "event" field in the
root dictionary.
"""
differences = get_event_differences(expected, actual, tolerate=tolerate)
if len(differences) > 0:
debug_info = [
'',
'Expected:',
block_indent(expected),
'Actual:',
block_indent(actual),
'Tolerating:',
block_indent(EventMatchTolerates.default_if_not_defined(tolerate)),
]
differences = ['* ' + d for d in differences]
message_lines = differences + debug_info
raise AssertionError('Unexpected differences found in structs:\n\n' + '\n'.join(message_lines))
class EventMatchTolerates(object):
"""
Represents groups of flags that specify the level of tolerance for deviation between an expected event and an actual
event.
These are common event specific deviations that we don't want to handle with special case logic throughout our
tests.
"""
# Allow the "event" field to be a string, currently this is the case for all browser events.
STRING_PAYLOAD = 'string_payload'
# Allow unexpected fields to exist in the top level event dictionary.
ROOT_EXTRA_FIELDS = 'root_extra_fields'
# Allow unexpected fields to exist in the "context" dictionary. This is where new fields that appear in multiple
# events are most commonly added, so we frequently want to tolerate variation here.
CONTEXT_EXTRA_FIELDS = 'context_extra_fields'
# Allow unexpected fields to exist in the "event" dictionary. Typically in unit tests we don't want to allow this
# type of variance since there are typically only a small number of tests for a particular event type.
PAYLOAD_EXTRA_FIELDS = 'payload_extra_fields'
@classmethod
def default(cls):
"""A reasonable set of tolerated variations."""
# NOTE: "payload_extra_fields" is deliberately excluded from this list since we want to detect erroneously added
# fields in the payload by default.
return {
cls.STRING_PAYLOAD,
cls.ROOT_EXTRA_FIELDS,
cls.CONTEXT_EXTRA_FIELDS,
}
@classmethod
def lenient(cls):
"""Allow all known variations."""
return cls.default() | {
cls.PAYLOAD_EXTRA_FIELDS
}
@classmethod
def strict(cls):
"""Allow no variation at all."""
return frozenset()
@classmethod
def default_if_not_defined(cls, tolerates=None):
"""Use the provided tolerance or provide a default one if None was specified."""
if tolerates is None:
return cls.default()
else:
return tolerates
def assert_events_equal(expected, actual):
"""
Strict comparison of two events.
This asserts that every field in the real event exactly matches the expected event.
"""
assert_event_matches(expected, actual, tolerate=EventMatchTolerates.strict())
def get_event_differences(expected, actual, tolerate=None):
"""Given two events, gather a list of differences between them given some set of tolerated variances."""
tolerate = EventMatchTolerates.default_if_not_defined(tolerate)
# Some events store their payload in a JSON string instead of a dict. Comparing these strings can be problematic
# since the keys may be in different orders, so we parse the string here if we were expecting a dict.
if EventMatchTolerates.STRING_PAYLOAD in tolerate:
expected = parse_event_payload(expected)
actual = parse_event_payload(actual)
def should_strict_compare(path):
"""
We want to be able to vary the degree of strictness we apply depending on the testing context.
Some tests will want to assert that the entire event matches exactly, others will tolerate some variance in the
context or root fields, but not in the payload (for example).
"""
if path == [] and EventMatchTolerates.ROOT_EXTRA_FIELDS in tolerate:
return False
elif path == ['event'] and EventMatchTolerates.PAYLOAD_EXTRA_FIELDS in tolerate:
return False
elif path == ['context'] and EventMatchTolerates.CONTEXT_EXTRA_FIELDS in tolerate:
return False
else:
return True
return compare_structs(expected, actual, should_strict_compare=should_strict_compare)
def block_indent(text, spaces=4):
"""
Given a multi-line string, indent every line of it by the given number of spaces.
If `text` is not a string it is formatted using pprint.pformat.
"""
return '\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])
def parse_event_payload(event):
"""
Given an event, parse the "event" field as a JSON string.
Note that this may simply return the same event unchanged, or return a new copy of the event with the payload
parsed. It will never modify the event in place.
"""
if 'event' in event and isinstance(event['event'], six.string_types):
event = event.copy()
try:
event['event'] = json.loads(event['event'])
except ValueError:
pass
return event
def compare_structs(expected, actual, should_strict_compare=None, path=None):
"""
Traverse two structures to ensure that the `actual` structure contains all of the elements within the `expected`
one.
Note that this performs a "deep" comparison, descending into dictionaries, lists and ohter collections to ensure
that the structure matches the expectation.
If a particular value is not recognized, it is simply compared using the "!=" operator.
"""
if path is None:
path = []
differences = []
if isinstance(expected, dict) and isinstance(actual, dict):
expected_keys = frozenset(list(expected.keys()))
actual_keys = frozenset(list(actual.keys()))
for key in expected_keys - actual_keys:
differences.append(u'{0}: not found in actual'.format(_path_to_string(path + [key])))
if should_strict_compare is not None and should_strict_compare(path):
for key in actual_keys - expected_keys:
differences.append(u'{0}: only defined in actual'.format(_path_to_string(path + [key])))
for key in expected_keys & actual_keys:
child_differences = compare_structs(expected[key], actual[key], should_strict_compare, path + [key])
differences.extend(child_differences)
elif expected != actual:
differences.append(u'{path}: {a} != {b} (expected != actual)'.format(
path=_path_to_string(path),
a=repr(expected),
b=repr(actual)
))
return differences
def is_matching_event(expected_event, actual_event, tolerate=None):
"""Return True iff the `actual_event` matches the `expected_event` given the tolerances."""
return len(get_event_differences(expected_event, actual_event, tolerate=tolerate)) == 0
def _path_to_string(path):
"""Convert a list of path elements into a single path string."""
return '.'.join(path)
|
edx-solutions/edx-platform
|
openedx/core/lib/tests/assertions/events.py
|
Python
|
agpl-3.0
| 9,836 | 0.004778 |
# Copyright 2015 Peter van Zetten
#
# This file is part of pypkgmirror.
#
# pypkgmirror is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pypkgmirror is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pypkgmirror. If not, see <http://www.gnu.org/licenses/>.
""" Entry point and main function for pypkgmirror. """
import errno
import multiprocessing
import os
import subprocess
from pypkgmirror.util import conf, log
def mkdir(d):
try:
os.makedirs(d)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def main():
"""
Script entry point for pypkgmirror.
Parses the configuration and assembles a collection of subprocess calls,
then invokes them.
"""
from pypkgmirror.agents import DebmirrorAgent, RsyncAgent, AptlyAgent
if 'loglevel' in conf:
log.setLevel(conf['loglevel'])
mirrors = []
aptly_mirrors = [] # aptly shares a database so these should not be parallel
for _ in conf.get('mirrors', {}).get('debmirror', []):
mirrors.append(DebmirrorAgent(_, conf['mirrors']['debmirror'][_]))
for _ in conf.get('mirrors', {}).get('rsync', []):
mirrors.append(RsyncAgent(_, conf['mirrors']['rsync'][_]))
for _ in conf.get('mirrors', {}).get('aptly', []):
aptly_mirrors.append(AptlyAgent(_, conf['mirrors']['aptly'][_]))
pool = multiprocessing.Pool(2)
pool.map(start_sync, mirrors)
pool.close()
pool.join()
pool = multiprocessing.Pool(1)
pool.map(start_sync, aptly_mirrors)
pool.close()
pool.join()
_subprocess_call(['hardlink', '-fpot', conf['basedir']])
def start_sync(agent):
"""
Performs a full mirror update with the given agent. This should typically
download any new or updated packages from a remote repository, and update
any necessary indexes.
"""
log.info("Syncing repository '%s' hosted at %s", agent.name, agent.host)
outfile_path = "%s/%s.out" % (conf['logdir'], agent.name)
errfile_path = "%s/%s.err" % (conf['logdir'], agent.name)
mkdir(os.path.dirname(outfile_path))
mkdir(agent.basedir)
with open(outfile_path, 'w') as outfile, open(errfile_path, 'w') as errfile:
for call in agent.get_calls():
log.debug(' '.join(call))
if conf.get('noop'):
continue
_subprocess_call(call, outfile, errfile)
def _subprocess_call(call, stdout=None, stderr=None):
"""
Trigger a subprocess execution with optional stdout/stderr redirection and
trivial handling of missing executables.
"""
try:
subprocess.call(call, stdout=stdout, stderr=stderr)
except OSError as e:
if e.errno == os.errno.ENOENT:
log.error("The required program %s was not found, no packages synced", call[0])
else:
raise
if __name__ == "__main__":
main()
|
Zetten/pypkgmirror
|
pypkgmirror/__init__.py
|
Python
|
gpl-3.0
| 3,339 | 0.000898 |
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
import os
import shutil
from senf import fsnative
from quodlibet.formats import AudioFileError
from quodlibet import config
from quodlibet.util import connect_obj, is_windows
from quodlibet.formats import AudioFile
from quodlibet.compat import text_type, iteritems, iterkeys, itervalues
from tests import TestCase, get_data_path, mkstemp, mkdtemp, skipIf
from .helper import capture_output, get_temp_copy
from quodlibet.library.libraries import Library, PicklingMixin, SongLibrary, \
FileLibrary, AlbumLibrary, SongFileLibrary, iter_paths
class Fake(int):
def __init__(self, _):
self.key = int(self)
def Frange(*args):
return list(map(Fake, range(*args)))
class FakeSong(Fake):
def list(self, tag):
# Turn tag_values into a less-than query, for testing.
if tag <= self:
return []
else:
return [int(self)]
def rename(self, newname):
self.key = newname
class AlbumSong(AudioFile):
"""A mock AudioFile belong to one of three albums,
based on a single number"""
def __init__(self, num, album=None):
super(AlbumSong, self).__init__()
self["~filename"] = fsnative(u"file_%d.mp3" % (num + 1))
self["title"] = "Song %d" % (num + 1)
self["artist"] = "Fakeman"
if album is None:
self["album"] = "Album %d" % (num % 3 + 1)
else:
self["album"] = album
self["labelid"] = self["album"]
class FakeSongFile(FakeSong):
_valid = True
_exists = True
_mounted = True
@property
def mountpoint(self):
return "/" if self._mounted else "/FAKE"
def valid(self):
return self._valid
def exists(self):
return self._exists
def reload(self):
if self._exists:
self._valid = True
else:
raise IOError("doesn't exist")
def mounted(self):
return self._mounted
# Custom range functions, to generate lists of song-like objects
def FSFrange(*args):
return list(map(FakeSongFile, range(*args)))
def FSrange(*args):
return list(map(FakeSong, range(*args)))
def ASrange(*args):
return list(map(AlbumSong, range(*args)))
class TLibrary(TestCase):
Fake = Fake
Frange = staticmethod(Frange)
Library = Library
def setUp(self):
self.library = self.Library()
self.added = []
self.changed = []
self.removed = []
connect_obj(self.library, 'added', list.extend, self.added)
connect_obj(self.library, 'changed', list.extend, self.changed)
connect_obj(self.library, 'removed', list.extend, self.removed)
def test_add(self):
self.library.add(self.Frange(12))
self.failUnlessEqual(self.added, self.Frange(12))
del(self.added[:])
self.library.add(self.Frange(12, 24))
self.failUnlessEqual(self.added, self.Frange(12, 24))
def test_remove(self):
self.library.add(self.Frange(10))
self.assertTrue(self.library.remove(self.Frange(3, 6)))
self.failUnlessEqual(self.removed, self.Frange(3, 6))
# Neither the objects nor their keys should be present.
self.failIf(self.Fake(3) in self.library)
self.failUnless(self.Fake(6) in self.library)
self.failIf(3 in self.library)
self.failUnless(6 in self.library)
def test_remove_when_not_present(self):
self.assertFalse(self.library.remove([self.Fake(12)]))
def test_changed(self):
self.library.add(self.Frange(10))
self.library.changed(self.Frange(5))
while Gtk.events_pending():
Gtk.main_iteration()
self.failUnlessEqual(self.changed, self.Frange(5))
def test_changed_not_present(self):
self.library.add(self.Frange(10))
self.library.changed(self.Frange(2, 20, 3))
while Gtk.events_pending():
Gtk.main_iteration()
self.failUnlessEqual(set(self.changed), {2, 5, 8})
def test_changed_none_present(self):
self.library.changed(self.Frange(5))
while Gtk.events_pending():
Gtk.main_iteration()
def test___iter__(self):
self.library.add(self.Frange(10))
self.failUnlessEqual(sorted(list(self.library)), self.Frange(10))
def test___iter___empty(self):
self.failIf(list(self.library))
def test___len__(self):
self.failUnlessEqual(len(self.library), 0)
self.library.add(self.Frange(10))
self.failUnlessEqual(len(self.library), 10)
self.library.remove(self.Frange(3))
self.failUnlessEqual(len(self.library), 7)
def test___getitem__(self):
self.library.add(self.Frange(10))
self.failUnlessEqual(self.library[5], 5)
new = self.Fake(12)
new.key = 100
self.library.add([new])
self.failUnlessEqual(self.library[100], 12)
self.failIf(12 in self.library)
def test___getitem___not_present(self):
self.library.add(self.Frange(10))
self.failUnlessRaises(KeyError, self.library.__getitem__, 12)
def test___contains__(self):
self.library.add(self.Frange(10))
new = self.Fake(12)
new.key = 100
self.library.add([new])
for value in [0, 1, 2, 6, 9, 100, new]:
# 0, 1, 2, 6, 9: all added by self.Frange
# 100: key for new
# new: is itself present
self.failUnless(value in self.library, "didn't find %d" % value)
for value in [-1, 10, 12, 101]:
# -1, 10, 101: boundry values
# 12: equal but non-key-equal to new
self.failIf(value in self.library, "found %d" % value)
def test_get(self):
self.failUnless(self.library.get(12) is None)
self.failUnless(self.library.get(12, "foo") == "foo")
new = self.Fake(12)
new.key = 100
self.library.add([new])
self.failUnless(self.library.get(12) is None)
self.failUnless(self.library.get(100) is new)
def test_keys(self):
items = []
for i in range(20):
items.append(self.Fake(i))
items[-1].key = i + 100
self.library.add(items)
self.failUnlessEqual(
sorted(self.library.keys()), list(range(100, 120)))
self.failUnlessEqual(
sorted(iterkeys(self.library)), list(range(100, 120)))
def test_values(self):
items = []
for i in range(20):
items.append(self.Fake(i))
items[-1].key = i + 100
self.library.add(items)
self.failUnlessEqual(sorted(self.library.values()), list(range(20)))
self.failUnlessEqual(
sorted(itervalues(self.library)), list(range(20)))
def test_items(self):
items = []
for i in range(20):
items.append(self.Fake(i))
items[-1].key = i + 100
self.library.add(items)
expected = list(zip(range(100, 120), range(20)))
self.failUnlessEqual(sorted(self.library.items()), expected)
self.failUnlessEqual(sorted(iteritems(self.library)), expected)
def test_has_key(self):
self.failIf(self.library.has_key(10))
new = self.Fake(10)
new.key = 20
self.library.add([new])
self.failIf(self.library.has_key(10))
self.failUnless(self.library.has_key(20))
def tearDown(self):
self.library.destroy()
class FakeAudioFile(AudioFile):
def __init__(self, key):
self["~filename"] = fsnative(text_type(key))
def FakeAudioFileRange(*args):
return list(map(FakeAudioFile, range(*args)))
class TPicklingMixin(TestCase):
class PicklingMockLibrary(PicklingMixin, Library):
"""A library-like class that implements enough to test PicklingMixin"""
def __init__(self):
PicklingMixin.__init__(self)
self._contents = {}
# set up just enough of the library interface to work
self.values = self._contents.values
self.items = self._contents.items
def add(self, items):
for item in items:
self._contents[item.key] = item
Library = PicklingMockLibrary
Frange = staticmethod(FakeAudioFileRange)
def setUp(self):
self.library = self.Library()
def test_load_noexist(self):
fd, filename = mkstemp()
os.close(fd)
os.unlink(filename)
library = self.Library()
library.load(filename)
assert len(library) == 0
def test_load_invalid(self):
fd, filename = mkstemp()
os.write(fd, b"nope")
os.close(fd)
try:
library = self.Library()
library.load(filename)
assert len(library) == 0
finally:
os.unlink(filename)
def test_save_load(self):
fd, filename = mkstemp()
os.close(fd)
try:
self.library.add(self.Frange(30))
self.library.save(filename)
library = self.Library()
library.load(filename)
for (k, v), (k2, v2) in zip(
sorted(self.library.items()), sorted(library.items())):
assert k == k2
assert v.key == v2.key
finally:
os.unlink(filename)
class TSongLibrary(TLibrary):
Fake = FakeSong
Frange = staticmethod(FSrange)
Library = SongLibrary
def test_rename_dirty(self):
self.library.dirty = False
song = self.Fake(10)
self.library.add([song])
self.failUnless(self.library.dirty)
self.library.dirty = False
self.library.rename(song, 20)
self.failUnless(self.library.dirty)
def test_rename(self):
song = self.Fake(10)
self.library.add([song])
self.library.rename(song, 20)
while Gtk.events_pending():
Gtk.main_iteration()
self.failUnless(song in self.changed)
self.failUnless(song in self.library)
self.failUnless(song.key in self.library)
self.failUnlessEqual(song.key, 20)
def test_rename_changed(self):
song = self.Fake(10)
self.library.add([song])
changed = set()
self.library.rename(song, 20, changed=changed)
self.assertEqual(len(changed), 1)
self.assertTrue(song in changed)
def test_tag_values(self):
self.library.add(self.Frange(30))
del(self.added[:])
self.failUnlessEqual(
sorted(self.library.tag_values(10)), list(range(10)))
self.failUnlessEqual(sorted(self.library.tag_values(0)), [])
self.failIf(self.changed or self.added or self.removed)
class TFileLibrary(TLibrary):
Fake = FakeSongFile
Library = FileLibrary
def test_mask_invalid_mount_point(self):
new = self.Fake(1)
self.library.add([new])
self.failIf(self.library.masked_mount_points)
self.failUnless(len(self.library))
self.library.mask("/adsadsafaf")
self.failIf(self.library.masked_mount_points)
self.library.unmask("/adsadsafaf")
self.failIf(self.library.masked_mount_points)
self.failUnless(len(self.library))
def test_mask_basic(self):
new = self.Fake(1)
self.library.add([new])
self.failIf(self.library.masked_mount_points)
self.library.mask(new.mountpoint)
self.failUnlessEqual(self.library.masked_mount_points,
[new.mountpoint])
self.failIf(len(self.library))
self.failUnlessEqual(self.library.get_masked(new.mountpoint), [new])
self.failUnless(self.library.masked(new))
self.library.unmask(new.mountpoint)
self.failUnless(len(self.library))
self.failUnlessEqual(self.library.get_masked(new.mountpoint), [])
def test_remove_masked(self):
new = self.Fake(1)
self.library.add([new])
self.library.mask(new.mountpoint)
self.failUnless(self.library.masked_mount_points)
self.library.remove_masked(new.mountpoint)
self.failIf(self.library.masked_mount_points)
def test_content_masked(self):
new = self.Fake(100)
new._mounted = False
self.failIf(self.library.get_content())
self.library._load_init([new])
self.failUnless(self.library.masked(new))
self.failUnless(self.library.get_content())
def test_init_masked(self):
new = self.Fake(100)
new._mounted = False
self.library._load_init([new])
self.failIf(self.library.items())
self.failUnless(self.library.masked(new))
def test_load_init_nonmasked(self):
new = self.Fake(200)
new._mounted = True
self.library._load_init([new])
self.failUnlessEqual(list(self.library.values()), [new])
def test_reload(self):
new = self.Fake(200)
self.library.add([new])
changed = set()
removed = set()
self.library.reload(new, changed=changed, removed=removed)
self.assertTrue(new in changed)
self.assertFalse(removed)
class TSongFileLibrary(TSongLibrary):
Fake = FakeSongFile
Frange = staticmethod(FSFrange)
Library = SongFileLibrary
def test__load_exists_invalid(self):
new = self.Fake(100)
new._valid = False
changed, removed = self.library._load_item(new)
self.failIf(removed)
self.failUnless(changed)
self.failUnless(new._valid)
self.failUnless(new in self.library)
def test__load_not_exists(self):
new = self.Fake(100)
new._valid = False
new._exists = False
changed, removed = self.library._load_item(new)
self.failIf(removed)
self.failIf(changed)
self.failIf(new._valid)
self.failIf(new in self.library)
def test__load_error_during_reload(self):
try:
from quodlibet import util
print_exc = util.print_exc
util.print_exc = lambda *args, **kwargs: None
new = self.Fake(100)
def error():
raise AudioFileError
new.reload = error
new._valid = False
changed, removed = self.library._load_item(new)
self.failUnless(removed)
self.failIf(changed)
self.failIf(new._valid)
self.failIf(new in self.library)
finally:
util.print_exc = print_exc
def test__load_not_mounted(self):
new = self.Fake(100)
new._valid = False
new._exists = False
new._mounted = False
changed, removed = self.library._load_item(new)
self.failIf(removed)
self.failIf(changed)
self.failIf(new._valid)
self.failIf(new in self.library)
self.failUnless(self.library.masked(new))
def __get_file(self):
return get_temp_copy(get_data_path('empty.flac'))
def test_add_filename(self):
config.init()
try:
filename = self.__get_file()
ret = self.library.add_filename(filename)
self.failUnless(ret)
self.failUnlessEqual(len(self.library), 1)
self.failUnlessEqual(len(self.added), 1)
ret = self.library.add_filename(filename)
self.failUnless(ret)
self.failUnlessEqual(len(self.added), 1)
os.unlink(filename)
filename = self.__get_file()
ret = self.library.add_filename(filename, add=False)
self.failUnless(ret)
self.failIf(ret in self.library)
self.failUnlessEqual(len(self.added), 1)
self.library.add([ret])
self.failUnless(ret in self.library)
self.failUnlessEqual(len(self.added), 2)
self.failUnlessEqual(2, len(self.library))
os.unlink(filename)
with capture_output():
ret = self.library.add_filename("")
self.failIf(ret)
self.failUnlessEqual(len(self.added), 2)
self.failUnlessEqual(len(self.library), 2)
finally:
config.quit()
def test_contains_filename(self):
filename = self.__get_file()
try:
assert not self.library.contains_filename(filename)
assert self.library.add_filename(filename, add=False)
assert not self.library.contains_filename(filename)
assert self.library.add_filename(filename)
assert self.library.contains_filename(filename)
finally:
os.unlink(filename)
def test_add_filename_normalize_path(self):
if not os.name == "nt":
return
config.init()
filename = self.__get_file()
# create a equivalent path different from the original one
if filename.upper() == filename:
other = filename.lower()
else:
other = filename.upper()
song = self.library.add_filename(filename)
other_song = self.library.add_filename(other)
self.assertTrue(song is other_song)
os.unlink(filename)
config.quit()
class TAlbumLibrary(TestCase):
Fake = FakeSong
Frange = staticmethod(ASrange)
UnderlyingLibrary = Library
def setUp(self):
self.underlying = self.UnderlyingLibrary()
self.added = []
self.changed = []
self.removed = []
self._sigs = [
connect_obj(self.underlying, 'added', list.extend, self.added),
connect_obj(self.underlying,
'changed', list.extend, self.changed),
connect_obj(self.underlying,
'removed', list.extend, self.removed),
]
self.library = AlbumLibrary(self.underlying)
# Populate for every test
self.underlying.add(self.Frange(12))
def tearDown(self):
for s in self._sigs:
self.underlying.disconnect(s)
self.underlying.destroy()
self.library.destroy()
def test_get(self):
key = self.underlying.get("file_1.mp3").album_key
self.failUnlessEqual(self.library.get(key).title, "Album 1")
album = self.library.get(key)
self.failUnlessEqual(album.key, key)
self.failUnlessEqual(len(album.songs), 4)
key = self.underlying.get("file_2.mp3").album_key
self.failUnlessEqual(self.library.get(key).title, "Album 2")
def test_getitem(self):
key = self.underlying.get("file_4.mp3").album_key
self.failUnlessEqual(self.library[key].key, key)
def test_keys(self):
self.failUnless(len(self.library.keys()), 3)
def test_has_key(self):
key = self.underlying.get("file_1.mp3").album_key
self.failUnless(self.library.has_key(key))
def test_misc_collection(self):
self.failUnless(itervalues(self.library))
self.failUnless(iteritems(self.library))
def test_items(self):
self.failUnlessEqual(len(self.library.items()), 3)
def test_items_2(self):
albums = self.library.values()
self.failUnlessEqual(len(albums), 3)
songs = self.underlying._contents.values()
# Make sure "all the songs' albums" == "all the albums", roughly
self.failUnlessEqual({a.key for a in albums},
{s.album_key for s in songs})
def test_remove(self):
key = self.underlying.get("file_1.mp3").album_key
songs = self.underlying._contents
# Remove all songs in Album 1
for i in range(1, 12, 3):
song = songs["file_%d.mp3" % i]
self.underlying.remove([song])
# Album 1 is all gone...
self.failUnlessEqual(self.library.get(key), None)
# ...but Album 2 is fine
key = self.underlying.get("file_2.mp3").album_key
album2 = self.library[key]
self.failUnlessEqual(album2.key, key)
self.failUnlessEqual(len(album2.songs), 4)
def test_misc(self):
# It shouldn't implement FileLibrary etc
self.failIf(getattr(self.library, "filename", None))
class TAlbumLibrarySignals(TestCase):
def setUp(self):
lib = SongLibrary()
received = []
def listen(name, items):
received.append(name)
self._sigs = [
connect_obj(lib, 'added', listen, 'added'),
connect_obj(lib, 'changed', listen, 'changed'),
connect_obj(lib, 'removed', listen, 'removed'),
]
albums = lib.albums
self._asigs = [
connect_obj(albums, 'added', listen, 'a_added'),
connect_obj(albums, 'changed', listen, 'a_changed'),
connect_obj(albums, 'removed', listen, 'a_removed'),
]
self.lib = lib
self.albums = albums
self.received = received
def test_add_one(self):
self.lib.add([AlbumSong(1)])
self.failUnlessEqual(self.received, ["added", "a_added"])
def test_add_two_same(self):
self.lib.add([AlbumSong(1, "a1")])
self.lib.add([AlbumSong(5, "a1")])
self.failUnlessEqual(self.received,
["added", "a_added", "added", "a_changed"])
def test_remove(self):
songs = [AlbumSong(1, "a1"), AlbumSong(2, "a1"), AlbumSong(4, "a2")]
self.lib.add(songs)
self.lib.remove(songs[:2])
self.failUnlessEqual(self.received,
["added", "a_added", "removed", "a_removed"])
def test_change(self):
songs = [AlbumSong(1, "a1"), AlbumSong(2, "a1"), AlbumSong(4, "a2")]
self.lib.add(songs)
self.lib.changed(songs)
self.failUnlessEqual(self.received,
["added", "a_added", "changed", "a_changed"])
def tearDown(self):
for s in self._asigs:
self.albums.disconnect(s)
for s in self._sigs:
self.lib.disconnect(s)
self.lib.destroy()
class Titer_paths(TestCase):
def setUp(self):
# on osx the temp folder returned is a symlink
self.root = os.path.realpath(mkdtemp())
def tearDown(self):
shutil.rmtree(self.root)
def test_empty(self):
assert list(iter_paths(self.root)) == []
def test_one_file(self):
fd, name = mkstemp(dir=self.root)
os.close(fd)
assert list(iter_paths(self.root)) == [name]
def test_one_file_exclude(self):
fd, name = mkstemp(dir=self.root)
os.close(fd)
assert list(iter_paths(self.root, exclude=[self.root])) == []
assert list(iter_paths(self.root,
exclude=[os.path.dirname(self.root)])) == []
assert list(iter_paths(self.root, exclude=[name])) == []
assert list(iter_paths(self.root, exclude=[name + "a"])) == [name]
@skipIf(is_windows(), "no symlink")
def test_with_dir_symlink(self):
child = mkdtemp(dir=self.root)
link = os.path.join(self.root, "foo")
os.symlink(child, link)
fd, name = mkstemp(dir=link)
os.close(fd)
assert name not in list(iter_paths(self.root))
assert list(iter_paths(link)) == list(iter_paths(child))
assert list(iter_paths(link, exclude=[link])) == []
assert list(iter_paths(child, exclude=[child])) == []
assert list(iter_paths(link, exclude=[child])) == []
@skipIf(is_windows(), "no symlink")
def test_with_file(self):
fd, name = mkstemp(dir=self.root)
os.close(fd)
link = os.path.join(self.root, "foo")
os.symlink(name, link)
assert list(iter_paths(self.root)) == [name, name]
assert list(iter_paths(self.root, exclude=[link])) == [name]
assert list(iter_paths(self.root, exclude=[name])) == []
def test_hidden_dir(self):
child = mkdtemp(dir=self.root, prefix=".")
fd, name = mkstemp(dir=child)
os.close(fd)
assert list(iter_paths(child)) == []
assert list(iter_paths(child, skip_hidden=False)) == [name]
assert list(iter_paths(self.root)) == []
assert list(iter_paths(self.root, skip_hidden=False)) == [name]
def test_hidden_file(self):
fd, name = mkstemp(dir=self.root, prefix=".")
os.close(fd)
assert list(iter_paths(self.root)) == []
|
elbeardmorez/quodlibet
|
quodlibet/tests/test_library_libraries.py
|
Python
|
gpl-2.0
| 24,563 | 0.000366 |
# $Id: stp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Spanning Tree Protocol."""
import dpkt
class STP(dpkt.Packet):
__hdr__ = (
('proto_id', 'H', 0),
('v', 'B', 0),
('type', 'B', 0),
('flags', 'B', 0),
('root_id', '8s', ''),
('root_path', 'I', 0),
('bridge_id', '8s', ''),
('port_id', 'H', 0),
('_age', 'H', 0),
('_max_age', 'H', 0),
('_hello', 'H', 0),
('_fd', 'H', 0)
)
@property
def age(self):
return self._age >> 8
@age.setter
def age(self, age):
self._age = age << 8
@property
def max_age(self):
return self._max_age >> 8
@max_age.setter
def max_age(self, max_age):
self._max_age = max_age << 8
@property
def hello(self):
return self._hello >> 8
@hello.setter
def hello(self, hello):
self._hello = hello << 8
@property
def fd(self):
return self._fd >> 8
@fd.setter
def fd(self, fd):
self._fd = fd << 8
def test_stp():
buf = '\x00\x00\x02\x02\x3e\x80\x00\x08\x00\x27\xad\xa3\x41\x00\x00\x00\x00\x80\x00\x08\x00\x27\xad\xa3\x41\x80\x01\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x02\x00\x14\x00'
stp = STP(buf)
assert stp.proto_id == 0
assert stp.port_id == 0x8001
assert stp.age == 0
assert stp.max_age == 20
assert stp.hello == 2
assert stp.fd == 15
assert str(stp) == buf
stp.fd = 100
assert stp.pack_hdr()[-2:] == '\x64\x00' # 100 << 8
if __name__ == '__main__':
# Runs all the test associated with this class/file
test_stp()
print 'Tests Successful...'
|
lkash/test
|
dpkt/stp.py
|
Python
|
bsd-3-clause
| 1,695 | 0.00059 |
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
|
ofer43211/unisubs
|
apps/comments/__init__.py
|
Python
|
agpl-3.0
| 763 | 0.006553 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.v1.datamodel import component
from solum.api.handlers import component_handler
from solum.common import exception
from solum.common import policy
from solum import objects
class ComponentController(rest.RestController):
"""Manages operations on a single component."""
def __init__(self, component_id):
super(ComponentController, self).__init__()
self._id = component_id
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(component.Component)
def get(self):
"""Return this component."""
policy.check('show_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
return component.Component.from_db_model(handler.get(self._id),
host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(component.Component, body=component.Component)
def put(self, data):
"""Modify this component."""
policy.check('update_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
res = handler.update(self._id,
data.as_dict(objects.registry.Component))
host_url = pecan.request.application_url.rstrip('/')
return component.Component.from_db_model(res, host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(None, status_code=204)
def delete(self):
"""Delete this component."""
policy.check('delete_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
return handler.delete(self._id)
class ComponentsController(rest.RestController):
"""Manages operations on the components collection."""
@pecan.expose()
def _lookup(self, component_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return ComponentController(component_id), remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(component.Component, body=component.Component,
status_code=201)
def post(self, data):
"""Create a new component."""
policy.check('create_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
return component.Component.from_db_model(
handler.create(data.as_dict(objects.registry.Component)), host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose([component.Component])
def get_all(self):
"""Return all components, based on the query provided."""
policy.check('get_components',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
return [component.Component.from_db_model(ser, host_url)
for ser in handler.get_all()]
|
stackforge/solum
|
solum/api/controllers/v1/component.py
|
Python
|
apache-2.0
| 4,074 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Auther: Xiaowei Deng
#
# This file is part of Mini Proxy Pool
#
# This program is free software and it is distributed under
# the terms of the MIT license. Please see LICENSE file for details.
PROXY_DB_FILE = "_proxies.db"
VALIDATOR_TIMEOUT = 1 # seconds
VALIDATOR_URL = "http://www.google.ca"
VALIDATOR_THREAD_POOL_SIZE = 20
VALIDATOR_CONNECTIONS_PER_THREAD = 20
INVALID_PROXY_TIMES = 5 # if a proxy cannot be connected for VALIDATOR_DEFINE_INVALID_TIMES time, it is defined as invalid
INVALID_PROXY_IF_DELETE = True
VALIDATE_THREAD_RUN_PERIOD = 5 * 60 # seconds wait after each validation
LOAD_PORXIES_FROM_SOURCES_THREAD_RUN_PERIOD = 30 * 60 # seconds wait after each loading from sites
REST_SRV_IP = "0.0.0.0"
REST_SRV_PORT = 9876
REST_API_PATH_GET_ALL_VALID = "/api/v1/proxies/*"
# Free proxy sites
PROXY_SOURCE_SITES = [
{
'url_base': "https://free-proxy-list.net",
'pattern': "((?:\d{1,3}\.){1,3}\d{1,3})<\/td><td>(\d{1,6})(.{1,200})<td class='hx'>(.{2,3})",
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': 3
},
{
'url_base': 'https://www.us-proxy.org',
'pattern': "((?:\d{1,3}\.){1,3}\d{1,3})<\/td><td>(\d{1,6})(.{1,200})<td class='hx'>(.{2,3})",
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': 3 # todo: to specify the protocol: http or https
},
{
'url_base': "http://spys.me/proxy.txt",
'pattern': '((?:\d{1,3}\.){1,3}\d{1,3}):(\d{1,6})',
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': None
}
]
PROXY_SOURCE_FILES = [
'custom_proxies_list.txt'
]
DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2693.2 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, default',
}
|
xwdeng/MiniProxyPool
|
miniproxypool/config.py
|
Python
|
mit
| 2,154 | 0.011142 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import socket
import traceback
from lineup.datastructures import Queue
class Node(object):
def __init__(self, *args, **kw):
self.initialize(*args, **kw)
def initialize(self, *args, **kw):
pass
@property
def id(self):
return '|'.join([self.get_hostname(), str(os.getpid())])
@property
def taxonomy(self):
class_name = self.__class__.__name__
module_name = self.__class__.__module__
return '.'.join([module_name, class_name])
def get_name(self):
return getattr(self, 'name', None) or self.taxonomy
def get_hostname(self):
return socket.gethostname()
def make_worker(self, Worker, index):
return Worker(self, self.input, self.output)
def start(self):
for worker in self.workers:
worker.start()
def feed(self, item):
self.input.put(item)
def enqueue_error(self, source_class, instructions, exception):
print exception, source_class, instructions
def wait_and_get_work(self):
return self.output.get()
@property
def running(self):
return all([w.alive for w in self.workers])
def are_running(self):
if self.running:
return True
self.start()
return self.running
class Pipeline(Node):
def initialize(self):
self.queues = self.get_queues(*args, **kw)
self.workers = [self.make_worker(Worker, index) for index, Worker in enumerate(steps)]
@property
def input(self):
return self.queues[0]
@property
def output(self):
return self.queues[-1]
def get_queues(self):
steps = getattr(self, 'steps', None) or []
return [Queue() for _ in steps] + [Queue()]
def make_worker(self, Worker, index):
return Worker(self, self.queues[index], self.queues[index + 1])
|
pombredanne/lineup
|
lineup/framework.py
|
Python
|
mit
| 1,964 | 0.000509 |
# -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = 'UsTd+_P&kv#jdQ!3Oc.Kb$yd,ey/B2i-aM8em'
SITE_NAME = 'basilinna'
MYSQL_DB = 'basilinna'
MYSQL_USER = 'root'
MYSQL_PASSWD = 'root'
MYSQL_HOST = 'localhost'
MYSQL_POST = 3306
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://%s:%s@%s:%s/%s?charset=utf8' % (MYSQL_USER, MYSQL_PASSWD, MYSQL_HOST, MYSQL_POST, MYSQL_DB)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'migrations')
HOST = 'http://localhost:5000/'
UPLOAD_FOLDER = '.\\app\\static\\uploads'
UPLOAD_AVATAR_FOLDER = '.\\app\\static\\avatars'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# 16M
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
# pagination
POSTS_PER_PAGE = 5
|
allotory/basilinna
|
config.py
|
Python
|
mit
| 850 | 0.002353 |
from . import audio
import pytest
import re
@pytest.mark.parametrize('input,expected', [
('http://foo.bar https://bar.foo',
('http://foo.bar', 'https://bar.foo', None)),
('http://test.foo',
('http://test.foo', None, None)),
('https://test.foo',
('https://test.foo', None, None)),
('http://foo.foo https://bar.bar http://zonk.zonk',
('http://foo.foo', 'https://bar.bar', 'http://zonk.zonk'))
])
def test_regex(input, expected):
assert re.match(audio.AUDIO, input).groups() == expected
@pytest.mark.parametrize('input,expected', [
('http://foo.foo/foo.mp3',
('<audio controls>'
'<source src="http://foo.foo/foo.mp3" type="audio/mpeg">'
'Your browser does not support the audio element.</audio>')),
('https://foo.foo/foo.ogg http://bar.bar/bar.opus',
('<audio controls>'
'<source src="https://foo.foo/foo.ogg" type="audio/ogg">'
'<source src="http://bar.bar/bar.opus" type="audio/ogg">'
'Your browser does not support the audio element.</audio>')),
('http://1.de/1.wav http://2.de/2.mp4 http://3.de/3.ogg',
('<audio controls>'
'<source src="http://1.de/1.wav" type="audio/wav">'
'<source src="http://2.de/2.mp4" type="audio/mp4">'
'<source src="http://3.de/3.ogg" type="audio/ogg">'
'Your browser does not support the audio element.</audio>'))
])
def test_create_html(input, expected):
assert audio.create_html(input) == expected
|
2015fallhw/cdw11
|
static/plugin/liquid_tags/test_audio.py
|
Python
|
agpl-3.0
| 1,494 | 0 |
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length
from app.models import User
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
class EditForm(Form):
nickname = StringField('nickname', validators=[DataRequired()])
about_me = TextAreaField('about_me', validators=[Length(min=0, max=140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
user = User.query.filter_by(nickname=self.nickname.data).first()
if user != None:
self.nickname.errors.append('This nickname is already in use. Please choose another one.')
return False
return True
|
hei-hilman/microblog
|
app/forms.py
|
Python
|
bsd-3-clause
| 1,059 | 0.004721 |
#!/usr/bin/env python
import psutil
def main():
for proc in psutil.process_iter():
if any(name in proc.name() for name in ['start_producer', 'start_converter', 'start_online']) or any(name in ''.join(proc.cmdline()) for name in ['start_producer', 'start_converter', 'start_online']):
proc.kill()
if __name__ == '__main__':
main()
|
SiLab-Bonn/online_monitor
|
online_monitor/stop_online_monitor.py
|
Python
|
mit
| 362 | 0.002762 |
#!/usr/bin/env python
import rospy
import actionlib
from play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal
from sensor_msgs.msg import JointState
if __name__ == "__main__":
rospy.init_node("grasp_demo")
rospy.loginfo("Waiting for play_motion...")
client = actionlib.SimpleActionClient("/play_motion", PlayMotionAction)
client.wait_for_server()
rospy.loginfo("...connected.")
rospy.wait_for_message("/joint_states", JointState)
rospy.sleep(3.0)
rospy.loginfo("Grasping demo...")
goal = PlayMotionGoal()
goal.motion_name = 'home'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(15.0))
goal.motion_name = 'look_at_object_demo'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(5.0))
goal.motion_name = 'pregrasp_demo'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(40.0))
goal.motion_name = 'grasp_demo'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(80.0))
goal.motion_name = 'pick_demo'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(40.0))
rospy.loginfo("Grasping demo OK.")
|
robosafe/mc-vs-bdi
|
tiago_simulator/scripts/grasp_demo.py
|
Python
|
gpl-3.0
| 1,252 | 0.023962 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from robot.utils import (Sortable, py2to3, secs_to_timestr, timestr_to_secs,
IRONPYTHON, JYTHON, WINDOWS)
from robot.errors import TimeoutError, DataError, FrameworkError
if JYTHON:
from .jython import Timeout
elif IRONPYTHON:
from .ironpython import Timeout
elif WINDOWS:
from .windows import Timeout
else:
from .posix import Timeout
@py2to3
class _Timeout(Sortable):
def __init__(self, timeout=None, message='', variables=None):
self.string = timeout or ''
self.message = message
self.secs = -1
self.starttime = -1
self.error = None
if variables:
self.replace_variables(variables)
@property
def active(self):
return self.starttime > 0
def replace_variables(self, variables):
try:
self.string = variables.replace_string(self.string)
if not self:
return
self.secs = timestr_to_secs(self.string)
self.string = secs_to_timestr(self.secs)
self.message = variables.replace_string(self.message)
except (DataError, ValueError) as err:
self.secs = 0.000001 # to make timeout active
self.error = (u'Setting %s timeout failed: %s'
% (self.type.lower(), err))
def start(self):
if self.secs > 0:
self.starttime = time.time()
def time_left(self):
if not self.active:
return -1
elapsed = time.time() - self.starttime
# Timeout granularity is 1ms. Without rounding some timeout tests fail
# intermittently on Windows, probably due to threading.Event.wait().
return round(self.secs - elapsed, 3)
def timed_out(self):
return self.active and self.time_left() <= 0
def __unicode__(self):
return self.string
@property
def _sort_key(self):
return (not self.active, self.time_left())
def __nonzero__(self):
return bool(self.string and self.string.upper() != 'NONE')
def run(self, runnable, args=None, kwargs=None):
if self.error:
raise DataError(self.error)
if not self.active:
raise FrameworkError('Timeout is not active')
timeout = self.time_left()
if timeout <= 0:
raise TimeoutError(self.get_message())
executable = lambda: runnable(*(args or ()), **(kwargs or {}))
return Timeout(timeout, self._timeout_error).execute(executable)
def get_message(self):
if not self.active:
return '%s timeout not active.' % self.type
if not self.timed_out():
return '%s timeout %s active. %s seconds left.' \
% (self.type, self.string, self.time_left())
return self._timeout_error
@property
def _timeout_error(self):
if self.message:
return self.message
return '%s timeout %s exceeded.' % (self.type, self.string)
class TestTimeout(_Timeout):
type = 'Test'
_keyword_timeout_occurred = False
def set_keyword_timeout(self, timeout_occurred):
if timeout_occurred:
self._keyword_timeout_occurred = True
def any_timeout_occurred(self):
return self.timed_out() or self._keyword_timeout_occurred
class KeywordTimeout(_Timeout):
type = 'Keyword'
|
moto-timo/robotframework
|
src/robot/running/timeouts/__init__.py
|
Python
|
apache-2.0
| 3,987 | 0.000251 |
import numpy as np
# for the double bounded trafo
def i2e(v, u, l):
return l + 0.5 * (u - l) * (np.sin(v) + 1)
def e2i(v, u, l, eps2):
piby2 = 2. * np.arctan(1.)
distnn = 8. * np.sqrt(eps2)
vlimhi = piby2 - distnn
vlimlo = -piby2 + distnn
yy = 2. * (v - l) / (u - l) - 1.
yy2 = yy * yy
if yy2 > (1. - eps2):
if yy < 0.:
print("vlimlo")
return vlimlo
else:
print("vlimhi")
return vlimhi
else:
print("arcsin")
return np.arcsin(yy)
def i2e2i(v_hex, u, l):
eps2 = float.fromhex('0x1p-24')
i = np.longdouble(float.fromhex(v_hex))
e = i2e(i, u, l)
i2 = e2i(e, u, l, eps2)
return i, i2
def print_i2e2i(i, i2):
print(i, i2)
print(float.hex(float(i)), float.hex(float(i2)))
# this is the first number in my test that goes wrong
right1, wrong1 = i2e2i('-0x1.abadef0339ab8p-3', 3, -3)
print_i2e2i(right1, wrong1)
# prints:
# -0.20882784584610703 -0.208827845846
# -0x1.abadef0339ab8p-3 -0x1.abadef0339ab9p-3
# i.e. the last bit is now one higher than before
# let's try another:
print_i2e2i(*i2e2i('-0x1.abadef0339ab9p-3', 3, -3))
# that goes fine...
# another:
print_i2e2i(*i2e2i('-0x1.abadef0339abap-3', 3, -3))
# aha, this also goes wrong, with same result!
print_i2e2i(*i2e2i('-0x1.abadef0339abbp-3', 3, -3))
# also!
print_i2e2i(*i2e2i('-0x1.abadef0339ab7p-3', 3, -3))
# also! still same value!
print_i2e2i(*i2e2i('-0x1.abadef0339ab6p-3', 3, -3))
# still wrong, now different value.
print_i2e2i(*i2e2i('-0x1.abadef0339ab5p-3', 3, -3))
# that is a correct one again.
# So basically in this range ~1/3 of results are wrong...
|
roofit-dev/parallel-roofit-scripts
|
int2ext_precision_solution.py
|
Python
|
apache-2.0
| 1,674 | 0.001792 |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""N-dimensional Brownian Motion.
Implements the Ito process defined by:
```
dX_i = a_i(t) dt + Sum[S_{ij}(t) dW_{j}, 1 <= j <= n] for each i in {1,..,n}
```
where `dW_{j}, 1 <= j <= n` are n independent 1D Brownian increments. The
coefficient `a_i` is the drift and the matrix `S_{ij}` is the volatility of the
process.
For more details, see Ref [1].
#### References:
[1]: Brent Oksendal. Stochastic Differential Equations: An Introduction with
Applications. Springer. 2010.
"""
import tensorflow.compat.v2 as tf
from tf_quant_finance.math.random_ops import multivariate_normal as mvn
from tf_quant_finance.models.legacy import brownian_motion_utils as bmu
from tf_quant_finance.models.legacy import ito_process
class BrownianMotion(ito_process.ItoProcess):
"""The multi dimensional Brownian Motion."""
def __init__(self,
dim=1,
drift=None,
volatility=None,
total_drift_fn=None,
total_covariance_fn=None,
dtype=None,
name=None):
"""Initializes the Brownian motion class.
Represents the Ito process:
```None
dX_i = a_i(t) dt + Sum(S_{ij}(t) dW_j for j in [1 ... n]), 1 <= i <= n
```
`a_i(t)` is the drift rate of this process and the `S_{ij}(t)` is the
volatility matrix. Associated to these parameters are the integrated
drift and covariance functions. These are defined as:
```None
total_drift_{i}(t1, t2) = Integrate(a_{i}(t), t1 <= t <= t2)
total_covariance_{ij}(t1, t2) = Integrate(inst_covariance_{ij}(t),
t1 <= t <= t2)
inst_covariance_{ij}(t) = (S.S^T)_{ij}(t)
```
Sampling from the Brownian motion process with time dependent parameters
can be done efficiently if the total drift and total covariance functions
are supplied. If the parameters are constant, the total parameters can be
automatically inferred and it is not worth supplying then explicitly.
Currently, it is not possible to infer the total drift and covariance from
the instantaneous values if the latter are functions of time. In this case,
we use a generic sampling method (Euler-Maruyama) which may be
inefficient. It is advisable to supply the total covariance and total drift
in the time dependent case where possible.
#### Example
The following is an example of a 1 dimensional brownian motion using default
arguments of zero drift and unit volatility.
```python
process = bm.BrownianMotion()
times = np.array([0.2, 0.33, 0.7, 0.9, 1.88])
num_samples = 10000
with tf.Session() as sess:
paths = sess.run(process.sample_paths(
times,
num_samples=num_samples,
initial_state=np.array(0.1),
seed=1234))
# Compute the means at the specified times.
means = np.mean(paths, axis=0)
print (means) # Mean values will be near 0.1 for each time
# Compute the covariances at the given times
covars = np.cov(paths.reshape([num_samples, 5]), rowvar=False)
# covars is a 5 x 5 covariance matrix.
# Expected result is that Covar(X(t), X(t')) = min(t, t')
expected = np.minimum(times.reshape([-1, 1]), times.reshape([1, -1]))
print ("Computed Covars: {}, True Covars: {}".format(covars, expected))
```
Args:
dim: Python int greater than or equal to 1. The dimension of the Brownian
motion.
Default value: 1 (i.e. a one dimensional brownian process).
drift: The drift of the process. The type and shape of the value must be
one of the following (in increasing order of generality) (a) A real
scalar `Tensor`. This corresponds to a time and component independent
drift. Every component of the Brownian motion has the same drift rate
equal to this value. (b) A real `Tensor` of shape `[dim]`. This
corresponds to a time independent drift with the `i`th component as the
drift rate of the `i`th component of the Brownian motion. (c) A Python
callable accepting a single positive `Tensor` of general shape (referred
to as `times_shape`) and returning a `Tensor` of shape `times_shape +
[dim]`. The input argument is the times at which the drift needs to be
evaluated. This case corresponds to a general time and direction
dependent drift rate.
Default value: None which maps to zero drift.
volatility: The volatility of the process. The type and shape of the
supplied value must be one of the following (in increasing order of
generality) (a) A positive real scalar `Tensor`. This corresponds to a
time independent, diagonal volatility matrix. The `(i, j)` component of
the full volatility matrix is equal to zero if `i != j` and equal to the
supplied value otherwise. (b) A positive real `Tensor` of shape `[dim]`.
This corresponds to a time independent volatility matrix with zero
correlation. The `(i, j)` component of the full volatility matrix is
equal to zero `i != j` and equal to the `i`th component of the supplied
value otherwise. (c) A positive definite real `Tensor` of shape `[dim,
dim]`. The full time independent volatility matrix. (d) A Python
callable accepting a single positive `Tensor` of general shape (referred
to as `times_shape`) and returning a `Tensor` of shape `times_shape +
[dim, dim]`. The input argument are the times at which the volatility
needs to be evaluated. This case corresponds to a general time and axis
dependent volatility matrix.
Default value: None which maps to a volatility matrix equal to identity.
total_drift_fn: Optional Python callable to compute the integrated drift
rate between two times. The callable should accept two real `Tensor`
arguments. The first argument contains the start times and the second,
the end times of the time intervals for which the total drift is to be
computed. Both the `Tensor` arguments are of the same dtype and shape.
The return value of the callable should be a real `Tensor` of the same
dtype as the input arguments and of shape `times_shape + [dim]` where
`times_shape` is the shape of the times `Tensor`. Note that it is an
error to supply this parameter if the `drift` is not supplied.
Default value: None.
total_covariance_fn: A Python callable returning the integrated covariance
rate between two times. The callable should accept two real `Tensor`
arguments. The first argument is the start times and the second is the
end times of the time intervals for which the total covariance is
needed. Both the `Tensor` arguments are of the same dtype and shape. The
return value of the callable is a real `Tensor` of the same dtype as the
input arguments and of shape `times_shape + [dim, dim]` where
`times_shape` is the shape of the times `Tensor`. Note that it is an
error to supply this argument if the `volatility` is not supplied.
Default value: None.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: None which means that default dtypes inferred by
TensorFlow are used.
name: str. The name scope under which ops created by the methods of this
class are nested.
Default value: None which maps to the default name `brownian_motion`.
Raises:
ValueError if the dimension is less than 1 or if total drift is supplied
but drift is not supplied or if the total covariance is supplied but
but volatility is not supplied.
"""
super(BrownianMotion, self).__init__()
if dim < 1:
raise ValueError('Dimension must be 1 or greater.')
if drift is None and total_drift_fn is not None:
raise ValueError('total_drift_fn must not be supplied if drift'
' is not supplied.')
if volatility is None and total_covariance_fn is not None:
raise ValueError('total_covariance_fn must not be supplied if drift'
' is not supplied.')
self._dim = dim
self._dtype = dtype
self._name = name or 'brownian_motion'
drift_fn, total_drift_fn = bmu.construct_drift_data(drift, total_drift_fn,
dim, dtype)
self._drift_fn = drift_fn
self._total_drift_fn = total_drift_fn
vol_fn, total_covar_fn = bmu.construct_vol_data(volatility,
total_covariance_fn, dim,
dtype)
self._volatility_fn = vol_fn
self._total_covariance_fn = total_covar_fn
# Override
def dim(self):
"""The dimension of the process."""
return self._dim
# Override
def dtype(self):
"""The data type of process realizations."""
return self._dtype
# Override
def name(self):
"""The name to give to the ops created by this class."""
return self._name
# Override
def drift_fn(self):
return lambda t, x: self._drift_fn(t)
# Override
def volatility_fn(self):
return lambda t, x: self._volatility_fn(t)
def total_drift_fn(self):
"""The integrated drift of the process.
Returns:
None or a Python callable. None is returned if the input drift was a
callable and no total drift function was supplied.
The callable returns the integrated drift rate between two times.
It accepts two real `Tensor` arguments. The first argument is the
left end point and the second is the right end point of the time interval
for which the total drift is needed. Both the `Tensor` arguments are of
the same dtype and shape. The return value of the callable is
a real `Tensor` of the same dtype as the input arguments and of shape
`times_shape + [dim]` where `times_shape` is the shape of the times
`Tensor`.
"""
return self._total_drift_fn
def total_covariance_fn(self):
"""The total covariance of the process between two times.
Returns:
A Python callable returning the integrated covariances between two times.
The callable accepts two real `Tensor` arguments. The first argument
is the left end point and the second is the right end point of the time
interval for which the total covariance is needed.
The shape of the two input arguments and their dtypes must match.
The output of the callable is a `Tensor` of shape
`times_shape + [dim, dim]` containing the integrated covariance matrix
between the start times and end times.
"""
return self._total_covariance_fn
# Override
def sample_paths(self,
times,
num_samples=1,
initial_state=None,
random_type=None,
seed=None,
swap_memory=True,
name=None,
**kwargs):
"""Returns a sample of paths from the process.
Generates samples of paths from the process at the specified time points.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
num_samples: Positive scalar `int`. The number of paths to draw.
initial_state: `Tensor` of shape `[dim]`. The initial state of the
process.
Default value: None which maps to a zero initial state.
random_type: Enum value of `RandomType`. The type of (quasi)-random number
generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Python `int`. The random seed to use. If not supplied, no seed is
set.
swap_memory: Whether GPU-CPU memory swap is enabled for this op. See
equivalent flag in `tf.while_loop` documentation for more details.
Useful when computing a gradient of the op since `tf.while_loop` is used
to propagate stochastic process in time.
name: str. The name to give this op. If not supplied, default name of
`sample_paths` is used.
**kwargs: parameters, specific to Euler schema: `grid_step` is rank 0 real
`Tensor` - maximal distance between points in grid in Euler schema. Note
that Euler sampling is only used if it is not possible to do exact
sampling because total drift or total covariance are unavailable.
Returns:
A real `Tensor` of shape [num_samples, k, n] where `k` is the size of the
`times`, `n` is the dimension of the process.
"""
if self._total_drift_fn is None or self._total_covariance_fn is None:
return super(BrownianMotion, self).sample_paths(
times,
num_samples=num_samples,
initial_state=initial_state,
random_type=random_type,
seed=seed,
name=name,
**kwargs)
default_name = self._name + '_sample_path'
with tf.compat.v1.name_scope(
name, default_name=default_name, values=[times, initial_state]):
end_times = tf.convert_to_tensor(times, dtype=self.dtype())
start_times = tf.concat(
[tf.zeros([1], dtype=end_times.dtype), end_times[:-1]], axis=0)
paths = self._exact_sampling(end_times, start_times, num_samples,
initial_state, random_type, seed)
if initial_state is not None:
return paths + initial_state
return paths
def _exact_sampling(self, end_times, start_times, num_samples, initial_state,
random_type, seed):
"""Returns a sample of paths from the process."""
non_decreasing = tf.debugging.assert_greater_equal(
end_times, start_times, message='Sampling times must be non-decreasing')
starts_non_negative = tf.debugging.assert_greater_equal(
start_times,
tf.zeros_like(start_times),
message='Sampling times must not be < 0.')
with tf.compat.v1.control_dependencies(
[starts_non_negative, non_decreasing]):
drifts = self._total_drift_fn(start_times, end_times)
covars = self._total_covariance_fn(start_times, end_times)
# path_deltas are of shape [num_samples, size(times), dim].
path_deltas = mvn.multivariate_normal((num_samples,),
mean=drifts,
covariance_matrix=covars,
random_type=random_type,
seed=seed)
paths = tf.cumsum(path_deltas, axis=1)
return paths
# Override
def fd_solver_backward(self,
final_time,
discounting_fn=None,
grid_spec=None,
time_step=None,
time_step_fn=None,
values_batch_size=1,
name=None,
**kwargs):
"""Returns a solver for solving Feynman-Kac PDE associated to the process.
Represents the PDE
```None
V_t + Sum[a_i(t) V_i, 1<=i<=n] +
(1/2) Sum[ D_{ij}(t) V_{ij}, 1 <= i,j <= n] - r(t, x) V = 0
```
In the above, `V_t` is the derivative of `V` with respect to `t`,
`V_i` is the partial derivative with respect to `x_i` and `V_{ij}` the
(mixed) partial derivative with respect to `x_i` and `x_j`. `D_{ij}` are
the components of the diffusion tensor:
```None
D_{ij}(t) = (Sigma . Transpose[Sigma])_{ij}(t)
```
This method provides a finite difference solver to solve the above
differential equation. Whereas the coefficients `mu` and `D` are properties
of the SDE itself, the function `r(t, x)` may be arbitrarily specified
by the user (the parameter `discounting_fn` to this method).
Args:
final_time: Positive scalar real `Tensor`. The time of the final value.
The solver is initialized to this final time.
discounting_fn: Python callable corresponding to the function `r(t, x)` in
the description above. The callable accepts two positional arguments.
The first argument is the time at which the discount rate function is
needed. The second argument contains the values of the state at which
the discount is to be computed.
Default value: None which maps to `r(t, x) = 0`.
grid_spec: An iterable convertible to a tuple containing at least the
attributes named 'grid', 'dim' and 'sizes'. For a full description of
the fields and expected types, see `grids.GridSpec` which provides the
canonical specification of this object.
time_step: A real positive scalar `Tensor` or None. The fixed
discretization parameter along the time dimension. Either this argument
or the `time_step_fn` must be specified. It is an error to specify both.
Default value: None.
time_step_fn: A callable accepting an instance of `grids.GridStepperState`
and returning the size of the next time step as a real scalar tensor.
This argument allows usage of a non-constant time step while stepping
back. If not specified, the `time_step` parameter must be specified. It
is an error to specify both.
Default value: None.
values_batch_size: A positive Python int. The batch size of values to be
propagated simultaneously.
Default value: 1.
name: Python str. The name to give this op.
Default value: None which maps to `fd_solver_backward`.
**kwargs: Any other keyword args needed.
Returns:
An instance of `BackwardGridStepper` configured for solving the
Feynman-Kac PDE associated to this process.
"""
# TODO(b/141669934): Implement the method once the boundary conditions
# specification is complete.
raise NotImplementedError('Finite difference solver not implemented')
def _prefer_static_shape(tensor):
"""Returns the static shape if fully specified else the dynamic shape."""
tensor = tf.convert_to_tensor(tensor)
static_shape = tensor.shape
if static_shape.is_fully_defined():
return static_shape
return tf.shape(tensor)
def _prefer_static_rank(tensor):
"""Returns the static rank if fully specified else the dynamic rank."""
tensor = tf.convert_to_tensor(tensor)
if tensor.shape.rank is None:
return tf.rank(tensor)
return tensor.shape.rank
|
google/tf-quant-finance
|
tf_quant_finance/models/legacy/brownian_motion.py
|
Python
|
apache-2.0
| 19,227 | 0.003173 |
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError, 'test_support must be imported from the test package'
import sys
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
import os
try:
os.unlink(filename)
except OSError:
pass
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
import os
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def bind_port(sock, host='', preferred_port=54321):
"""Try to bind the sock to a port. If we are running multiple
tests and we don't try multiple ports, the test can fails. This
makes the test more robust."""
import socket, errno
# Find some random ports that hopefully no one is listening on.
# Ideally each test would clean up after itself and not continue listening
# on any ports. However, this isn't the case. The last port (0) is
# a stop-gap that asks the O/S to assign a port. Whenever the warning
# message below is printed, the test that is listening on the port should
# be fixed to close the socket at the end of the test.
# Another reason why we can't use a port is another process (possibly
# another instance of the test suite) is using the same port.
for port in [preferred_port, 9907, 10243, 32999, 0]:
try:
sock.bind((host, port))
if port == 0:
port = sock.getsockname()[1]
return port
except socket.error, (err, msg):
if err != errno.EADDRINUSE:
raise
print >>sys.__stderr__, \
' WARNING: failed to listen on port %d, trying another' % port
raise TestFailed, 'unable to find port to listen on'
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if type(x) == type(0.0) or type(y) == type(0.0):
try:
x, y = coerce(x, y)
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and type(x) in (type(()), type([])):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return cmp(len(x), len(y))
return cmp(x, y)
try:
unicode
have_unicode = 1
except NameError:
have_unicode = 0
is_jython = sys.platform.startswith('java')
import os
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
unlink(TESTFN)
del os, fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
import os
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed, "%r == %r" % (a, b)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def check_syntax(statement):
try:
compile(statement, '<string>', 'exec')
except SyntaxError:
pass
else:
print 'Missing SyntaxError: "%s"' % statement
def open_urlresource(url):
import urllib, urlparse
import os.path
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
for path in [os.path.curdir, os.path.pardir]:
fn = os.path.join(path, filename)
if os.path.exists(fn):
return open(fn)
requires('urlfetch')
print >> get_original_stdout(), '\tfetching %s ...' % url
fn, _ = urllib.urlretrieve(url, filename)
return open(fn)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
# Hack to get at the maximum value an internal index can take.
class _Dummy:
def __getslice__(self, i, j):
return j
MAX_Py_ssize_t = _Dummy()[:]
def set_memlimit(limit):
import re
global max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independant of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# Preliminary PyUNIT integration.
import unittest
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def run_suite(suite, testclass=None):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
if testclass is None:
msg = "errors occurred; run in verbose mode for details"
else:
msg = "errors occurred in %s.%s" \
% (testclass.__module__, testclass.__name__)
raise TestFailed(msg)
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, (unittest.TestSuite, unittest.TestCase)):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
if len(classes)==1:
testclass = classes[0]
else:
testclass = None
run_suite(suite, testclass)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
def threading_setup():
import threading
return len(threading._active), len(threading._limbo)
def threading_cleanup(num_active, num_limbo):
import threading
import time
_MAX_COUNT = 10
count = 0
while len(threading._active) != num_active and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
count = 0
while len(threading._limbo) != num_limbo and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
import os
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
|
certik/pyjamas
|
pgen/test_support.py
|
Python
|
apache-2.0
| 18,242 | 0.003728 |
# -*- coding: utf-8 -*-
#
# pygeocodio documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 22 14:09:09 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"pygeocodio"
copyright = u"2014-2019, Ben Lopatin"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.11"
# The full version, including alpha/beta/rc tags.
release = "0.11.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "pygeocodiodoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "pygeocodio.tex", u"pygeocodio Documentation", u"Ben Lopatin", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "pygeocodio", u"pygeocodio Documentation", [u"Ben Lopatin"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"pygeocodio",
u"pygeocodio Documentation",
u"Ben Lopatin",
"pygeocodio",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
bennylope/pygeocodio
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,264 | 0.000242 |
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to
# make it easier to submit large numbers of jobs on supercomputers. It
# provides a python interface to physical input, such as crystal structures,
# as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs.
# It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyLaDa. If not, see <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def shell():
from IPython.core.interactiveshell import InteractiveShell
shell = InteractiveShell.instance()
shell.magic("load_ext pylada")
return shell
def Extract(outdir=None):
from os.path import exists
from os import getcwd
from collections import namedtuple
from pickle import load
from pylada.misc import chdir
if outdir == None:
outdir = getcwd()
Extract = namedtuple("Extract", ["success", "directory", "indiv", "functional"])
if not exists(outdir):
return Extract(False, outdir, None, functional)
with chdir(outdir):
if not exists("OUTCAR"):
return Extract(False, outdir, None, functional)
with open("OUTCAR", "rb") as file:
indiv, value = load(file)
return Extract(True, outdir, indiv, functional)
def call_functional(indiv, outdir=None, value=False, **kwargs):
from pylada.misc import local_path
from pickle import dump
path = local_path(outdir)
path.ensure(dir=True)
dump((indiv, value), path.join("OUTCAR").open("wb"))
return Extract(outdir)
call_functional.Extract = Extract
@fixture
def functional():
return call_functional
|
pylada/pylada-light
|
tests/ipython/conftest.py
|
Python
|
gpl-3.0
| 2,390 | 0.000837 |
'''
Wireshark Analyzer(s)
@author: Michael Eddington
@version: $Id$
'''
#
# Copyright (c) Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington (mike@phed.org)
# $Id$
from Peach.analyzer import *
from Peach.Engine.common import *
class WireSharkAnalyzer(Analyzer):
'''
Analyzers produce data and state models. Examples of analyzers would be
the parsing of Peach PIT XML files, tokenizing a string, building a data
model based on XML file, etc.
'''
#: Does analyzer support asCommandLine()
supportCommandLine = True
def asParser(self, uri):
'''
Called when Analyzer is used as default PIT parser.
Should produce a Peach DOM.
'''
raise Exception("asParser not supported")
def asDataElement(self, parent, args, dataBuffer):
'''
Called when Analyzer is used in a data model.
Should return a DataElement such as Block, Number or String.
'''
raise Exception("asDataElement not supported")
def asCommandLine(self, args):
'''
Called when Analyzer is used from command line. Analyzer
should produce Peach PIT XML as output.
'''
inFile = args["in"]
if args.has_key("proto"):
proto = args["proto"]
else:
proto = None
if args.has_key("out"):
outFile = args["out"]
else:
outFile = None
xml = DoTheShark(inFile, proto)
if outFile != None:
fd = open(outFile, "wb+")
fd.write(xml)
fd.close()
else:
print xml
def asTopLevel(self, peach, args):
'''
Called when Analyzer is used from top level.
From the top level producing zero or more data models and
state models is possible.
'''
raise Exception("asTopLevel not supported")
import sys, struct, re
from Ft.Xml import Parse
def debug(str):
sys.stderr.write("debug: %s\n" % str)
#pdml/packet/proto
# method
# 1. Check for children, if we have them make block and recurse
# 2. Look for value show attribute and see if it contains a sub portion of the
# data (treat this different)
# 3. Look for items labled "len" or "length" and try and match them up
# 4. Optionally look at RFC's and try and match things up
class PeachShark:
def __init__(self):
self._currentPos = 0
self._regexIp = re.compile("^\d+\.\d+\.\d+\.\d+$")
self._regexFlagBit1 = re.compile("^(\.*)(\d+)(\.*)")
self._relations = {}
self._findStack = []
self._templates = []
def inStr(self, str, values):
str = str.lower()
for value in values:
if str.find(value) > -1:
#debug("found str")
return True
#debug("No: %s" % str)
return False
def findSizeRelation(self, sizeNode, node):
# We know two things:
#
# 1. Sizes always come first
# 2. It will be the size of something :P
#
# Prevent infinit looping
if node in self._findStack:
return None
self._findStack.append(node)
size = self.findSizeGetSize(sizeNode)
# Search from us forward
sibling = sizeNode.nextSibling
while sibling != None:
checkSize = self._getNodeSize(sibling)
if checkSize == size:
return sibling
sibling = sibling.nextSibling
# That didn't work look from parent
for child in node.childNodes:
if child != sizeNode:
checkSize = self._getNodeSize(child)
if checkSize == size:
return child
ret = self.findSizeRelation(sizeNode, child)
if ret != None:
return ret
# Search from parent forward
sibling = node.nextSibling
while sibling != None:
if not sibling.hasAttributeNS(None, 'size'):
sibling = sibling.nextSibling
continue
checkSize = int(sibling.getAttributeNS(None, 'size'))
if checkSize == size:
return sibling
ret = self.findSizeRelation(sizeNode, sibling)
if ret != None:
return ret
sibling = sibling.nextSibling
# !!!TODO!!! Sometimes length can indicate the rest of our siblings
# but they may not be in a block of there own.
# -> Detect
# -> Force into a bock
#
#sibling = node.previousSibling
#while sibling != None:
# sizeUptoMe += int(sibling.getAttributeNS(None, 'size'))
# sibling = sibling.previousSibling
#
## This is good, but not what we want!
#if (parentSize - sizeUptoMe) == size:
# return True
#else:
# debug("Nope: ParentSize: %d - SizeUptoMe: %d -- Size: %d" % (parentSize, sizeUptoMe, size))
return None
def findSizes(self, nodes):
'''
Find nodes that could be sizes or lengths.
'''
if nodes == None:
return []
findValues = ["length", "size"]
sizeNodes = []
for node in nodes:
if node == None:
continue
name = node.getAttributeNS(None, 'name')
show = node.getAttributeNS(None, 'show')
showName = node.getAttributeNS(None, 'showname')
if self.inStr(show, findValues) or self.inStr(showName, findValues) or self.inStr(name, findValues):
#debug("findSizes(): Found size: %s:%s" % (node.nodeName, name))
sizeNodes.append(node)
for n in self.findSizes(node.childNodes):
sizeNodes.append(n)
return sizeNodes
def findSizeGetSize(self, node):
'''
Take a size/length node and figure out it's value.
'''
ret = None
if node.hasAttributeNS(None, 'show') and len(node.getAttributeNS(None, 'show')) > 0:
try:
return int(node.getAttributeNS(None, 'show'))
except:
pass
if node.hasAttributeNS(None, 'value') and len(node.getAttributeNS(None, 'value')) > 0:
try:
return int(node.getAttributeNS(None, 'value'), 16)
except:
pass
try:
return int(re.compile(r"(\d+)").search(node.getAttributeNS(None, 'show')).group(1))
except:
pass
debug(str("Failed on %s:%s" % (node.getAttributeNS(None, 'name'), node.nodeName)))
debug(str("Show: " + node.getAttributeNS(None, 'show')))
debug(str("Value: "+ node.getAttributeNS(None, 'value')))
raise Exception("OMG!!")
def findSizeRelationCheckSelf(self, node):
'''
Check if parent - me + prior siblings == size
'''
parentSize = self._getNodeSize(node.parentNode)
sizeUptoMe = self._getNodeSize(node)
size = self.findSizeGetSize(node)
#debug("%d:%d" % (parentSize,size))
# If our parent is the size we are indicating
# then return True!
if parentSize == size:
return True
return False
def findSizeRelations(self, nodes):
'''
Find and resolve size relations.
'''
debug("Finding relations: " + nodes[0].nodeName)
if nodes[0].nodeName == 'proto':
parentNode = nodes[0]
else:
parentNode = nodes[0].parentNode
for node in self.findSizes(nodes):
#debug("findSizeRelations()... %s:%s" % (node.nodeName, node.getAttributeNS(None, 'name')))
if self.findSizeRelationCheckSelf(node):
debug("findSizeRelations: Found relation to parent: %s and %s" % (node.getAttributeNS(None, 'name'), node.parentNode.getAttributeNS(None, 'name')))
self._relations[node] = node.parentNode
else:
ret = self.findSizeRelation(node, parentNode)
if ret != None:
debug("findSizeRelations: Found relation: %s and %s" % (node.getAttributeNS(None, 'name'), ret.getAttributeNS(None, 'name')))
self._relations[node] = ret
def removeTextNodes(self, node):
for child in node.childNodes:
if child.nodeName == '#text':
node.removeChild(child)
else:
self.removeTextNodes(child)
def htmlEncode(self, strInput, default=''):
if strInput == None or len(strInput) == 0:
strInput = default
if strInput == None or len(strInput) == 0:
return ''
# Allow: a-z A-Z 0-9 SPACE , .
# Allow (dec): 97-122 65-90 48-57 32 44 46
out = ''
for char in strInput:
c = ord(char)
if ((c >= 97 and c <= 122) or
(c >= 65 and c <= 90 ) or
(c >= 48 and c <= 57 ) or
c == 32 or c == 44 or c == 46):
out += char
else:
out += "&#%d;" % c
return out
def getNodeName(self, node):
'''
Check for name and show attributes. Figureout a possible name
for this node.
'''
if node.hasAttributeNS(None, 'name'):
name = node.getAttributeNS(None, 'name')
if len(name.strip()) < 1:
return None
# Sounds good on paper, but causes problems
#try:
# name = name[name.rindex('.')+1:]
#except:
# pass
return name.replace(' ', '_').replace('.', '_')
return None
def _getNodeSize(self, node):
if not node.hasAttributeNS(None, 'size'):
size = 0
for child in node.childNodes:
if child.hasAttributeNS(None, "size"):
size += int(child.getAttributeNS(None, 'size'))
else:
size = int(node.getAttributeNS(None, 'size'))
return size
def _getNodePosition(self, node):
if not node.hasAttributeNS(None, "pos"):
pos = 0
for child in node.childNodes:
if child.hasAttributeNS(None, "pos"):
pos = int(child.getAttributeNS(None, 'pos'))
break
else:
pos = int(node.getAttributeNS(None, 'pos'))
return pos
def peachNode(self, node, tabCount, size, parent):
if node.nodeName == '#text':
return '', 0, 0
tabs = '\t' * tabCount
name = node.getAttributeNS(None, 'name')
show = node.getAttributeNS(None, 'show')
showName = node.getAttributeNS(None, 'showname')
size = self._getNodeSize(node)
pos = self._getNodePosition(node)
ret = ''
nodeName = self.getNodeName(node)
if nodeName != None:
nodeName = 'name="%s"' % nodeName
else:
nodeName = ''
debug("peachNode: " + name)
# This should be prior sibling, not parent!!
if parent != None:
parentPos = self._getNodePosition(parent)
parentSize = self._getNodeSize(parent)
else:
parentPos = -1
parentSize = -1
self._currentPos = pos
if size == 0:
#print "Size == 0: ", node.getAttributeNS(None, 'size')
return '', 0, 0
if tabCount == 1:
# Do this just once
self.findSizeRelations([node])
if name.find('-'):
newName = ''
for n in name.split('-'):
newName += n[:1].upper() + n[1:]
name = newName
self._groupName = name[:1].upper() + name[1:]
self._genName = name[:1].upper() + name[1:]
self._templates.append(self._genName)
name = node.getAttributeNS(None, 'name')
#if len(node.childNodes) > 0 and not (self._getNodeSize(node.childNodes[0]) == size and self._getNodePosition(node.childNodes[0]) == pos):
if len(node.childNodes) > 0:
curPos = pos
sizeOfChildren = 0
if tabCount == 1:
if len(showName) > 1: ret += tabs + '<!-- %s -->\n' % showName
ret += tabs + '<DataModel name="%s">\n' % self._genName
else:
ret += tabs + '<Block %s>\n' % nodeName
for child in node.childNodes:
if not child.hasAttributeNS(None, "value"):
continue
sibling = child.nextSibling
if sibling != None:
siblingSize = self._getNodeSize(sibling)
siblingPos = self._getNodePosition(sibling)
childSize = self._getNodeSize(child)
childPos = self._getNodePosition(child)
if siblingPos == childPos and siblingSize < childSize and sibling.hasAttributeNS(None, "value"):
debug("Skipping " + child.getAttributeNS(None, 'name') + " same as " + sibling.getAttributeNS(None, "name"))
ret += tabs + "\t<!-- Skipping %s, same as following fields -->\n" % child.getAttributeNS(None, 'name')
continue
childShow = child.getAttributeNS(None, 'show')
#print "Child: %s" % childShow
childRet, childSize, childPos = self.peachNode(child, tabCount + 1, size, node)
childPos = int(childPos)
childSize = int(childSize)
#print "Child: %s, %d, %d" % (childShow, childPos, childSize)
if childSize == 0:
if len(childRet) > 0:
ret += childRet
continue
if int(childPos) == pos + int(sizeOfChildren):
ret += childRet
else:
valueHex = node.getAttributeNS(None, 'value')
value = self.hex2bin(valueHex)
# Locate "extra" bits not covered by children and
# add them in. Maybe we should fuzz this too?
if curPos < childPos:
if len(valueHex) >= (childPos-pos)*2:
ret += tabs + "\t<!-- Found some extra bits... -->\n"
ret += tabs + "\t<Blob %s valueType=\"hex\" value=\"%s\" />\n" % (nodeName, valueHex[(curPos-pos)*2:(childPos-pos)*2])
else:
ret += tabs + "\t<!-- Found some extra bits, guessing they are z3r0 -->\n"
ret += tabs + "\t<Blob %s valueType=\"hex\" value=\"%s\" />\n\n" % (nodeName, ('00'*((childPos-pos) - (curPos-pos))))
ret += childRet
sizeOfChildren += childSize
curPos = childPos + childSize
#if sizeOfChildren != size:
# raise Exception("Size not match %d != %d" % (size, sizeOfChildren))
# Dunno if we need this anymore
if tabCount == 1:
name = self._genName[3:]
ret += tabs + '</DataModel>\n'
else:
ret += tabs + '</Block>\n'
else:
type = self.figureType(node)
valueHex = node.getAttributeNS(None, 'value')
show = node.getAttributeNS(None, 'show')
showName = node.getAttributeNS(None, 'showname')
if len(showName) < 1:
showName = show
value = self.hex2bin(valueHex)
if type != 'bit_flag':
if node.previousSibling != None:
previousSiblingPos = self._getNodePosition(node.previousSibling)
previousSiblingSize = self._getNodeSize(node.previousSibling)
if pos == previousSiblingPos and size == previousSiblingSize:
debug("node same position and size of previousSibling")
return tabs + "<!-- *** Skipping %s, same position and size of previousSibling *** -->\n\n" % node.getAttributeNS(None, 'name'), 0, 0
#return '', 0, 0
#ret += " [%s] " % type
if len(showName) > 0:
ret += tabs + '<!-- %s -->\n' % showName
if type.find('str') > -1:
# TODO: We should take into account that this string
# may be fixed in size as well as different lengths.
if len(valueHex) == size*2:
str = 'valueType="hex" value="%s"' % valueHex
else:
str = 'value="%s"' % value
if type == 'str':
# regular string
ret += tabs + '<String %s %s' % (nodeName, str)
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</String>\n'
else:
ret += ' />\n'
elif type == 'p_str':
# Padded string
ret += tabs + '<String %s %s length="%d"' % (nodeName, str, size)
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</String>\n'
else:
ret += ' />\n'
elif type == 'w_str':
# wchar string
ret += tabs + '<String %s type="wchar" %s' % (nodeName, str)
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</String>\n'
else:
ret += ' />\n'
elif type == 'p_w_str':
# padded wchar string
ret += tabs + '<String %s type="wchar" length="%d" %s' % (nodeName, size/2, str)
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</String>\n'
else:
ret += ' />\n'
elif type == 'byte' or type == 'uint8':
ret += tabs + '<Number %s size="8" valueType="hex" value="%s" signed="false"' % (nodeName, valueHex)
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'int16':
ret += tabs + ('<Number %s size="16" valueType="hex" value="%s" signed="true"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'uint16':
ret += tabs + ('<Number %s size="16" valueType="hex" value="%s" signed="false"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'n_int16':
ret += tabs + ('<Number %s size="16" valueType="hex" value="%s" signed="true" endian="big"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'n_uint16':
ret += tabs + ('<Number %s size="16" valueType="hex" value="%s" signed="false" endian="big"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'int32':
ret += tabs + ('<Number %s size="32" valueType="hex" value="%s" signed="true"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'uint32':
ret += tabs + ('<Number %s size="32" valueType="hex" value="%s" signed="false"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'n_int32':
ret += tabs + ('<Number %s size="32" valueType="hex" value="%s" signed="true" endian="big"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'n_uint32':
ret += tabs + ('<Number %s size="32" valueType="hex" value="%s" signed="false" endian="big"' % (nodeName, valueHex))
if self._relations.has_key(node):
of = self._relations[node].getAttributeNS(None, 'name').replace('.', '_')
ret += '>\n' + tabs + '\t<Relation type="size" of="'+of+'" />\n'+tabs+'</Number>\n'
else:
ret += ' />\n'
elif type == 'blob':
ret += tabs + '<Blob %s valueType="hex" value="%s" />\n' % (nodeName, valueHex)
elif type == 'ip':
#ret += tabs + "WithDefault(%s.addNewGroup(), '%s', BadIpAddress()).setTransformer(Ipv4StringToOctet()),\n" % ( self._groupName, show )
ret += tabs + "<!-- TODO: Handle IP Address Better! -->\n"
ret += tabs + '<String %s value="%s">\n' % (nodeName, show)
ret += tabs + '\t<Transformer class="encode.Ipv4StringToOctet" />\n'
ret += tabs + '</String>\n'
#raise Exception("TODO")
elif type == 'n_ip':
#ret += tabs + "WithDefault(%s.addNewGroup(), '%s', BadIpAddress()).setTransformer(Ipv4StringToNetworkOctet()),\n" % ( self._groupName, show )
ret += tabs + "<!-- TODO: Handle IP Address Better! -->\n"
ret += tabs + '<String %s value="%s">\n' % (nodeName, show)
ret += tabs + '\t<Transformer class="encode.Ipv4StringToNetworkOctet" />\n'
ret += tabs + '</String>\n'
#raise Exception("TODO")
elif type == 'bit_flag':
# TODO: Handle flags!
if node.previousSibling == None:
# First flag, lets do it!
nodeNames = []
offsets = []
bits = []
shownames = []
length = 0
offset, bit = self.getFlagBits(node)
length += bit
offsets.append(offset)
bits.append(bit)
shownames.append(showName)
nodeName = self.getNodeName(node)
if nodeName != None:
nodeNames.append('name="%s"' % nodeName)
else:
nodeNames.append('')
sibling = node.nextSibling
while sibling != None:
offset, bit = self.getFlagBits(sibling)
length += bit
offsets.append(offset)
bits.append(bit)
shownames.append(sibling.getAttributeNS(None, 'showname'))
nodeName = self.getNodeName(sibling)
if nodeName != None:
nodeNames.append('name="%s"' % nodeName)
else:
nodeNames.append('')
sibling = sibling.nextSibling
# Now output Flags generator
# make sure length is multiple of 2
while length % 2 != 0:
length += 1
parentName = self.getNodeName(node.parentNode)
if parentName != None:
ret += tabs + '<Flags name="%s" size="%d">\n' % (parentName, length)
else:
ret += tabs + '<Flags size="%d">\n' % length
for i in range(len(offsets)):
ret += tabs + '\t<Flag %s position="%d" size="%d" />\n' % (nodeNames[i], offsets[i], bits[i])
ret += tabs + "</Flags>\n"
else:
raise Exception("Unknown type: %s" % type)
return ret + '\n', size, pos
def hex2bin(self, h):
'''
Convert hex string to binary string
'''
ret = ''
for cnt in range(0, len(h), 2):
ret += chr(int(h[cnt:cnt+2],16))
return ret
def isWideString(self, str):
'''
Is this a wchar string?
'''
# Wide chars should always have even string
# length
if len(str) < 4 or len(str) % 2 != 0:
return False
for i in range(0, len(str), 2):
c = str[i]
c2 = str[i+1]
# Assume we don't actually have characters that
# require two bytes to display. So second byte
# should always be NULL
if c2 != '\0':
return False
o = ord(c)
if o < 32 or o > 126:
if c == '\n' or c == '\r' or c == '\t':
continue
return False
return True
def isPaddedWideString(self, str):
'''
Is this a wchar string with nulls at the end?
'''
# Wide chars should always have even string
# length
if len(str) < 4 or len(str) % 2 != 0:
return False
if str[-1] != '\0' or str[-2] != '\0':
return False
for i in range(0, len(str), 2):
c = str[i]
c2 = str[i+1]
# Assume we don't actually have characters that
# require two bytes to display. So second byte
# should always be NULL
if c2 != '\0':
return False
o = ord(c)
if o < 32 or o > 126:
if c == '\n' or c == '\r' or c == '\t' or c == '\0':
continue
return False
return True
def isString(self, str):
'''
Is this a char string?
'''
if len(str) < 3:
return False
for c in str:
o = ord(c)
if o < 32 or o > 126:
if c == '\n' or c == '\r' or c == '\t':
continue
return False
#debug("isString('%s'): True" % str)
return True
def isPaddedString(self, str):
'''
Is this a char string with nulls at the end?
'''
if len(str) < 3:
#debug("to small")
return False
if str[-1] != '\0':
#debug("no null term")
return False
for c in str:
o = ord(c)
if o < 32 or o > 126:
if c == '\n' or c == '\r' or c == '\t' or c == '\0':
continue
debug("odd char [%d]" % o)
return False
return True
def getFlagBits(self, node):
'''
Checks out the showname field to see if we can determin
the number of bits this flag is and it's offset in the packet.
'''
# .... ...1 .... .... = Recursion desired: Do query recursively
show = node.getAttributeNS(None, 'showname')
#debug("flag str (initial): [%s]" % show)
# remove spaces
show = show.replace(' ', '')
# Get dots and numbers
try:
result = self._regexFlagBit1.match(show)
firstDots = result.group(1)
number = result.group(2)
lastDots = result.group(3)
offset = len(firstDots)
bits = len(number)
#debug("flag str: [%s]" % show)
#debug("offset: %d - bits: %s - remander: %d" % (offset, bits, len(lastDots)))
if (len(firstDots) + len(number) + len(lastDots)) % 2 != 0:
debug("getFlagBits(): Something fishy about this!!! %d" % (len(firstDots) + len(number) + len(lastDots)))
return offset, bits
except:
return -1, 1
def figureType(self, node):
# Try and figure out our type, number, string, etc.
show = node.getAttributeNS(None, 'show')
showName = node.getAttributeNS(None, 'showname')
value = self.hex2bin(node.getAttributeNS(None, 'value'))
valueHex = node.getAttributeNS(None, 'value')
size = self._getNodeSize(node)
pos = self._getNodePosition(node)
parentPos = self._getNodePosition(node.parentNode)
parentSize = self._getNodeSize(node.parentNode)
#print "Show: [%s], valueHex: [%s], size: %d" % (show, valueHex, size)
if showName != None and showName.find("Data:") == 0:
return 'blob'
# If just compar pos == parentPos we will get the first
# child. Should also check next child and size
if pos == parentPos and parentSize == size:
# A flag will have the same position as it's parent
# parent will have size of 1
#print "bit_flag: pos: %d parentPos: %d" % (pos, parentPos)
#debug("show: %s - showName: %s" % (show, showName))
(p,l) = self.getFlagBits(node)
if p > -1:
return 'bit_flag'
if len(value) > 2 and value.isalnum() and not show.isdigit():
return 'str'
elif self._regexIp.match(show) != None and size == 4:
# ip address
ip1, ip2, ip3, ip4 = show.split('.')
#debug("ip: %s - %s - %s - %s" % (show, ip1, valueHex[len(valueHex)-2:], valueHex))
if int(ip1) == int(valueHex[6:], 16) and int(ip2) == int(valueHex[4:6], 16) and int(ip3) == int(valueHex[2:4], 16) and int(ip4) == int(valueHex[:2], 16):
return 'n_ip'
if int(ip1) == int(valueHex[:2], 16):
return 'ip'
elif show[:2] == "0x":
# Figure if we are little or big endian
showHex = show[2:]
if showHex == valueHex or int(showHex, 16) == int(valueHex, 16):
# little
if size == 1:
return 'uint8'
if size == 2:
return 'uint16'
elif size == 4:
return 'uint32'
elif size == 8:
return 'uint64'
#debug("bigBalue: [%s][%s][%s]" % (valueHex, show, repr(value)))
if len(value) == 2:
format = '!H'
elif len(value) == 4:
format = '!I'
else:
debug("There's an issue with bigValue: [%s][%s][%s]" % (valueHex, show, repr(value)))
if len(value) > 4:
value = value[:4]
format = '!I'
else:
value = value.ljust(4)
format = '!I'
bigValue = struct.unpack(format, value)[0]
if int(bigValue) == int(showHex, 16):
if size == 1:
return 'n_uint8'
if size == 2:
return 'n_uint16'
elif size == 4:
return 'n_uint32'
elif size == 8:
return 'n_uint64'
elif not show.isdigit() and self.isWideString(value):
return 'w_str'
elif not show.isdigit() and self.isPaddedWideString(value):
return 'p_w_str'
elif not show.isdigit() and self.isString(value):
return 'str'
elif not show.isdigit() and self.isPaddedString(value):
return 'p_str'
elif show.isdigit() or (len(showName) == 0 and size <= 4):
cnt = len(valueHex)
if size == 1:
# Byte I bet
return 'byte'
elif size == 2:
# Maybe 16 bit int?
try:
show = int(show)
except:
show = 0
try:
val = struct.unpack('H', value)[0]
if int(val) == show:
return 'uint16'
val = struct.unpack('h', value)[0]
if val == show:
return 'int16'
val = struct.unpack('!H', value)[0]
if val == show:
return 'n_int16'
val = struct.unpack('!h', value)[0]
if val == show:
return 'n_uint16'
except struct.error:
pass
return 'n_uint16'
elif size == 4:
# Maybe 32 bit int?
if struct.unpack('I', value)[0] == show:
return 'uint32'
elif struct.unpack('i', value)[0] == show:
return 'int32'
elif struct.unpack('!I', value)[0] == show:
return 'n_int32'
return 'n_uint32'
return 'blob'
def figureOutPublisher(self, doc):
'''
Look for udp or tcp protocol and pull out
address and port.
'''
defaultPublisher = "\t<Publisher class=\"Publisher\" />"
nodes = doc.xpath('descendant::proto[@name="ip"]')
if len(nodes) == 0:
return defaultPublisher
nodeIp = nodes[0]
nodes = doc.xpath('descendant::proto[@name="tcp"]')
if len(nodes) == 0:
nodes = doc.xpath('descendant::proto[@name="udp"]')
if len(nodes) == 0:
return defaultPublisher
nodeProt = nodes[0]
m = re.search("Dst: ([^\s(]*)", str(nodeIp.getAttributeNS(None, 'showname')))
ip = m.group(1)
ret = ''
for child in nodeProt.childNodes:
if child.getAttributeNS(None, 'name') == 'udp.dstport':
ret ="""
<Publisher class="udp.Udp">
<Param name="Host" value="%s" />
<Param name="Port" value="%s" />
</Publisher>
""" % (ip, str(child.getAttributeNS(None, 'show')))
if child.getAttributeNS(None, 'name') == 'tcp.dstport':
ret ="""
<Publisher class="tcp.Tcp">
<Param name="Host" value="%s" />
<Param name="Port" value="%s" />
</Publisher>
""" % (ip, str(child.getAttributeNS(None, 'show')))
return ret
# ########################################################################
def DoTheShark(fileName, proto):
if proto == 2:
# print out the protocols
print "Select one of the following protocols:\n"
doc = Parse(fileName)
nodes = doc.xpath('descendant::proto')
for n in nodes:
print "\t", n.getAttributeNS(None, 'name')
raise PeachException("")
name = fileName
doc = Parse(fileName)
node = doc.xpath('descendant::proto[@name="%s"]' % proto)[0]
shark = PeachShark()
shark.removeTextNodes(node.parentNode)
ret = """<?xml version="1.0" encoding="utf-8"?>
<Peach xmlns="http://phed.org/2008/Peach" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://phed.org/2008/Peach /peach/peach.xsd">
<!-- ==// Auto Generated by PeachShark //== -->
<!--
Please do the following before using this fuzzer:
- Take a look through the generated output, see if it makes sense
- Integrate into a Peach Fuzzer
-->
<!-- Import defaults for Peach instance -->
<Include ns="default" src="file:defaults.xml"/>
"""
sibling = node
while sibling != None:
#shark.removeTextNodes(sibling.parentNode)
debug("Handing node: " + sibling.getAttributeNS(None, 'name'))
templateStr, s, p = shark.peachNode(sibling, 1, sibling.getAttributeNS(None, 'size'), None)
ret += templateStr
sibling = sibling.nextSibling
ret += '\t<DataModel name="SharkTemplate">\n'
for t in shark._templates:
ret += '\t\t<Block ref="%s" />\n' % t
ret += """
</DataModel>
<Test name="MyTest">
%s
</Test>
<Run name="DefaultRun">
<Test ref="MyTest" />
</Run>
</Peach>
""" % shark.figureOutPublisher(doc)
return ret
# end
|
thecrackofdawn/Peach2.3
|
Peach/Analyzers/shark.py
|
Python
|
mit
| 32,258 | 0.048112 |
# pdteco.py
# A public-domain Python implementation of the core commands of TECO.
# This code is released to the public domain.
# "Share and enjoy....." ;)
#
# *** To do...... ***
# NOTE - the built-in functions f.tell() and f.seek() should be very
# useful.
# From the Python docs -
# f.tell() returns an integer giving the file object’s current position
# in the file, measured in bytes from the beginning of the file.
# To change the file object’s position, use f.seek(offset, from_what).
# The position is computed from adding offset to a reference point;
# the reference point is selected by the from_what argument.
# A from_what value of 0 measures from the beginning of the file,
# 1 uses the current file position, and 2 uses the end of the file
# as the reference point.
# from_what can be omitted and defaults to 0, using the beginning of
# the file as the reference point.
# NOTE - Most TECO commands follow this general pattern -
# nX string ESC
# We need to implement the following types of commands -
# a) File commands -
# - ERfname$ - open file "fname" for read access
# - EBfname$ - open file for read/write with backup
# - EWfname$ - open file for writing.
# - EX$$ - close output file and exit.
# b) The eight basic Teco functions
# - DOT (current value of POINT)
# - nC - Move POINT \T{n} characters forward.
# - nD - Delete \T{n} characters.
# - Istring\A{ESC} - Insert text.
# - nJ - Move POINT to absolute position \T{n}
# - m,nK - Kill a range of characters.
# - Sstring\A{ESC} - Search for a string.
# - Z - Current buffer size.
# c) Line-oriented commands -
# - nL - Move to beginning of $\T{n}^{th}$ line from \POINT{}.
# - nK - Kill from point to beginning of $\T{n}^{th}$ following
# line.
# d) Looping -
# - n< - Begin \T{n}-iteration loop.
# - > - End loop.
# - n; - Exit loop if $\T{n} \geq 0$.
# e) Conditionals -
# - n"x - ( To be completed..... )
# f) "Q-registers", to store results.
# g) Conversion functions, from numbers to strings and vice versa.
# Helper functions
# Move n characters left or right from current position
# Use f.seek(n, 1) where 1 denotes "measure from current position"
import string, linecache, os, fileinput, curses
class editor(object):
def __init__(self):
self.dot = 0
self.buf = []
# The "Q-registers" (variables)
self.qregs = {}
self.fname = None
self.pos = self.line = 0
# Open a file
def open(self, fname):
#self.f = f.readlines()
self.f = open(fname, 'r+')
# Move to a line
def move2line(self, line):
pass
# Move by a given number of bytes from the current position
def moveinline(self, n):
self.f.seek(n, 1)
# Show the current position of the pointer.
def showptr(self):
return self.f.tell()
# Print a given number of bytes
def display(self, n):
self.f.read(n)
# Search for some text
def search(self, str):
pass
# Replace some text
def replace(self, target, repwith):
pass
# Insert some text
def ins_text(self, txt):
pass
# Delete some text
def del_text(self, txt):
pass
# Now the curses side of things.
# A class to handle keystrokes
class keyhandler:
def __init__(self, scr):
self.scr = scr
# Dictionary to store our data in. This uses the line-number as
# the key, and the line text as the data.
self.data = {}
self.stuff = ""
# A variable to save the line-number of text.
self.win_y = self.win_x = 0
# The screen size (number of rows and columns).
(self.max_y, self.max_x) = self.scr.getmaxyx()
# The top and bottom lines. These are defined because they help
# with page-up and page-down.
self.topline = 0
self.bottomline = self.max_y - 1
# Set page size (for page-up and page-down)
self.pagesize = self.max_y-1
curses.noecho()
self.scr.keypad(1)
self.scr.scrollok(1)
self.scr.idlok(1)
self.scr.setscrreg(0, self.max_y-1)
self.scr.refresh()
def action(self):
while (1):
curses.echo()
(y, x) = self.scr.getyx()
c=self.scr.getch() # Get a keystroke
if c in (curses.KEY_ENTER, 10):
#self.nextline()
pass
elif c==curses.KEY_BACKSPACE:
pass
elif c==curses.KEY_DC:
curses.noecho()
#self.removechar()
self.scr.refresh()
elif c==curses.KEY_UP:
curses.noecho()
self.scr.refresh()
# Ctrl-G quits the app
elif c==curses.ascii.BEL:
break
elif 0<c<256:
c=chr(c)
if x < self.max_x-2:
self.stuff += c
else:
self.nextline()
# Main loop
def main(stdscr):
a = keyhandler(stdscr)
a.action()
# Run the code from the command-line
if __name__ == '__main__':
try:
stdscr = curses.initscr()
curses.noecho() ; curses.cbreak()
stdscr.keypad(1)
main(stdscr) # Enter the main loop
# Set everything back to normal
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin() # Terminate curses
except:
# In the event of an error, restore the terminal
# to a sane state.
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin()
traceback.print_exc() # Print the exception
|
mooseman/pdteco
|
pdteco.py
|
Python
|
unlicense
| 6,753 | 0.041636 |
# Copyright 2021 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.ops import py_metrics_ops
from waymo_open_dataset.protos import motion_metrics_pb2
class MotionMetricsOpsTest(tf.test.TestCase):
"""Unit tests for motion metrics."""
def _BuildConfig(self, additional_config_str=''):
"""Builds a metrics config."""
config = motion_metrics_pb2.MotionMetricsConfig()
config_text = """
track_steps_per_second: 10
prediction_steps_per_second: 10
track_history_samples: 0
track_future_samples: 4
step_configurations {
measurement_step: 3
lateral_miss_threshold: 1.0
longitudinal_miss_threshold: 2.0
}
max_predictions: 6
speed_scale_lower: 1.0
speed_scale_upper: 1.0
speed_lower_bound: 1.4
speed_upper_bound: 11.0
""" + additional_config_str
text_format.Parse(config_text, config)
return config
def _CreateTestScenario(self):
gt_scenario_id = ['test']
gt_object_id = [[1, 2]]
gt_object_type = [[1, 1]]
gt_is_valid = np.ones([1, 2, 5], dtype=np.bool)
gt_trajectory = np.reshape([[[2, 2, 1, 1, 0.78539816, 20.0, 20.0],
[4, 4, 1, 1, 0.78539816, 20.0, 20.0],
[6, 6, 1, 1, 0.78539816, 20.0, 20.0],
[8, 8, 1, 1, 0.78539816, 20.0, 20.0],
[10, 10, 1, 1, 0.78539816, 20.0, 20.0]],
[[-1, 0, 1, 1, 3.14159, -10.0, 0.0],
[-2, 0, 1, 1, 3.14159, -10.0, 0.0],
[-3, 0, 1, 1, 3.14159, -10.0, 0.0],
[-4, 0, 1, 1, 3.14159, -10.0, 0.0],
[-5, 0, 1, 1, 3.14159, -10.0, 0.0]]],
[1, 2, 5, 7])
pred_gt_indices = np.reshape([0, 1], (1, 1, 2))
pred_gt_indices_mask = np.ones((1, 1, 2)) > 0.0
return {
'scenario_id': gt_scenario_id,
'object_id': gt_object_id,
'object_type': gt_object_type,
'gt_is_valid': gt_is_valid,
'gt_trajectory': gt_trajectory,
'pred_gt_indices': pred_gt_indices,
'pred_gt_indices_mask': pred_gt_indices_mask,
}
def setUp(self):
super(MotionMetricsOpsTest, self).setUp()
self._config = self._BuildConfig()
self._gt = self._CreateTestScenario()
def _RunEval(self, pred_score, pred_trajectory, gt=None, config=None):
if not gt:
gt = self._gt
if not config:
config = self._config
g = tf.Graph()
with g.as_default():
(min_ade, min_fde, miss_rate, overlap_rate,
mean_ap) = py_metrics_ops.motion_metrics(
config=config.SerializeToString(),
prediction_trajectory=pred_trajectory,
prediction_score=pred_score,
ground_truth_trajectory=gt['gt_trajectory'],
ground_truth_is_valid=gt['gt_is_valid'],
prediction_ground_truth_indices=gt['pred_gt_indices'],
prediction_ground_truth_indices_mask=gt['pred_gt_indices_mask'],
object_type=gt['object_type'],
object_id=gt['object_id'],
scenario_id=gt['scenario_id'])
with self.test_session(graph=g) as sess:
return sess.run([min_ade, min_fde, miss_rate, overlap_rate, mean_ap])
def testComputeMissRateNoMisses(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeMissRateNoMisses2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[-2, 0], [-3, 0], [-4, 0], [-5, 0]],
[[4, 4], [6, 6], [8, 8], [10, 10]]],
(1, 1, 1, 2, 4, 2))
gt = copy.deepcopy(self._gt)
gt['pred_gt_indices'] = np.reshape([1, 0], (1, 1, 2))
val = self._RunEval(pred_score, pred_trajectory, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeMissRateLateral_2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape(
[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 1.01], [-3, 1.01], [-4, 1.01], [-5, 1.01]]], (1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLateral_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [9.292, 10.708]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLongitudinal_2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLongitudinal_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [11.415, 11.415]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeNoMissLongitudinal_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [11.414, 11.414]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeVelocityScalingLatitudinal(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0.75]]],
(1, 1, 1, 2, 4, 2))
config = motion_metrics_pb2.MotionMetricsConfig()
config.CopyFrom(self._config)
config.speed_scale_lower = 0.5
config.speed_scale_upper = 1.0
config.speed_lower_bound = 1.0
config.speed_upper_bound = 3.0
val = self._RunEval(pred_score, pred_trajectory, config=config)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
# Decrease the velocity below the speed lower bound.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5:7] = 0.0
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just below the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 1.999
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just above the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 2.001
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
def testComputeVelocityScalingLongitudinal(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-6.5, 0]]],
(1, 1, 1, 2, 4, 2))
config = motion_metrics_pb2.MotionMetricsConfig()
config.CopyFrom(self._config)
config.speed_scale_lower = 0.5
config.speed_scale_upper = 1.0
config.speed_lower_bound = 1.0
config.speed_upper_bound = 3.0
val = self._RunEval(pred_score, pred_trajectory, config=config)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
# Decrease the velocity below the speed lower bound.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5:7] = 0.0
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just below the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 1.999
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# Set the velocity to just above the speed required for object2 to fit.
gt = copy.deepcopy(self._gt)
gt['gt_trajectory'][0, 1, :, 5] = 2.001
val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
def testComputeNoMissLateral_2(self):
pred_score = np.reshape([0.8, 0.5], (1, 1, 2))
pred_trajectory = np.reshape([[[[4, 4], [6, 6], [8, 8], [9.294, 10.706]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]]],
(1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testTwoJointPredictionsNoMiss(self):
pred_score = np.reshape([0.8, 0.5], (1, 1, 2))
pred_trajectory = np.reshape([[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]],
[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]]],
(1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.5)
def testTwoJointPredictionsMiss(self):
pred_score = np.reshape([0.8, 0.5], (1, 1, 2))
pred_trajectory = np.reshape([[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]],
[[[4, 4], [6, 6], [8, 8], [14, 14]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]]],
(1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMinADE(self):
pred_score = np.reshape([0.5, 0.5], (1, 1, 2))
pred_trajectory = np.reshape(
[[[[4, 0], [6, 0], [8, 0], [10, 0]], [[0, 2], [0, 3], [0, 4], [0, 5]]],
[[[14, 0], [16, 0], [18, 0], [20, 0]],
[[0, 22], [0, 23], [0, 24], [0, 25]]]], (1, 1, 2, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# 5 metrics.
self.assertEqual(len(val), 5)
# 3 steps.
self.assertEqual(len(val[0]), 3)
# ADE of Vehicle.
self.assertAlmostEqual(val[0][0], 5.97487, delta=1e-4)
# FDE of Vehicle.
self.assertAlmostEqual(val[1][0], 8.53553, delta=1e-4)
if __name__ == '__main__':
tf.compat.v1.disable_eager_execution()
tf.test.main()
|
waymo-research/waymo-open-dataset
|
waymo_open_dataset/metrics/ops/motion_metrics_ops_test.py
|
Python
|
apache-2.0
| 13,287 | 0.001806 |
"""
sidewalk.exceptions
This module contains custom exceptions that can be thrown by Sidewalk.
:copyright: (c) 2013 by Blake Rohde.
:license: ISC, see LICENSE for more details.
"""
class SidewalkSettingsFileIOError(Exception):
"""Settings file IOError."""
def __init__(self, filename, permission):
self.filename = filename
self.permission = permission
def __str__(self):
return repr('%s %s' % (
self.filename,
self.permission
))
class SidewalkSectionNotDefined(Exception):
"""The specified settings file does not contain a required section."""
def __init__(self, filename, section):
self.filename = filename
self.section = section
def __str__(self):
return repr('%s %s' % (
self.filename,
self.section
))
class SidewalkKeyDoesNotExist(Exception):
"""Activity processor requested is not defined."""
def __init__(self, key):
self.key = key
def __str__(self):
return repr(self.key)
class SidewalkGroupDoesNotExist(Exception):
"""Activity processor group requested is not defined."""
def __init__(self, group_key):
self.group_key = group_key
def __str__(self):
return repr(self.group_key)
class SidewalkModuleImportError(Exception):
"""Activity processor module could not be imported."""
def __init__(self, module):
self.module = module
def __str__(self):
return repr(self.module)
class SidewalkMethodDoesNotExist(Exception):
"""The Activity processor (method) does exist in the specified module."""
def __init__(self, module, method):
self.module = module
self.method = method
def __str__(self):
return repr('%s %s' % (
self.module,
self.method
))
class SidewalkRogueActivityProcessor(Exception):
"""The Activity processor threw an unhandled exception."""
def __init__(self, activity_processor):
self.activity_processor = activity_processor
def __str__(self):
return repr(self.activity_processor)
|
blakerohde/sidewalk
|
sidewalk/core/exceptions.py
|
Python
|
isc
| 1,924 | 0.04262 |
import gtk
import pygtk
import pexpect
import gio
import commands
import gobject
class RemoteMounter:
def login_remote(self, host, port, username, password):
'''
Mount the remote file system and update the remote file
chooser to the corresponding location.
Any remote filesystem, previously mounted by the application,
is first unmounted.
'''
if self.is_mounted:
self.unmount_remote()
self.mount_remote(host, port, username, password)
remote_uri = 'file:///home/' + self.local_username + '/.gvfs/'
self.remote_file_chooser.set_current_folder_uri(remote_uri)
# gobject.idle_add(self.remote_file_chooser.set_uri, remote_uri)
def unmount_remote(self):
'''
Unmount a previously mounted remote file system.
Also, set the remote file chooser widget insensitive.
'''
if self.is_mounted:
(status, output) = commands.getstatusoutput('gvfs-mount -u sftp://' + self.last_mount)
self.is_mounted = False
gobject.idle_add(self.remote_file_chooser.set_sensitive, False)
def already_mounted(self, host, username):
'''
Return True if the remote filesystem has already been mounted,
else return False.
'''
(status, output) = commands.getstatusoutput('ls /home/' + self.local_username + '/.gvfs/')
if output.find('sftp for ' + username + ' on ' + host) != -1:
return True
return False
def mount_remote(self, host, port, username, password):
'''
Mount the remote filesystem if it is not mounted already.
Also, set the remote file chooser widget sensitive.
'''
if port == '':
port = 22
remote = username + '@' + host + ':' + str(port)
if not self.already_mounted(host, username):
child = pexpect.spawn('gvfs-mount sftp://' + remote)
child.expect('Password:\s*')
child.sendline(password)
child.expect(pexpect.EOF)
self.is_mounted = True
self.last_mount = remote
gobject.idle_add(self.remote_file_chooser.set_sensitive, True)
def __init__(self, remote_file_chooser):
'''
Constructor. Assign a data member of point to the remote
file chooser widget.
Initialize data members related to previous mounts
to their defaults.
Obtain the name of the local user.
'''
self.remote_file_chooser = remote_file_chooser
self.is_mounted = False
self.last_mount = ''
(status, self.local_username) = commands.getstatusoutput('whoami')
|
viswanathgs/EratoSCP
|
src/mountremote.py
|
Python
|
gpl-3.0
| 2,361 | 0.034307 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This python module defines a wrapper for synthesising spectra using TurboSpectrum.
"""
import logging
from numpy import RankWarning
from warnings import simplefilter
from .turbospectrum import TurboSpectrum
from .solar_abundances import solar_abundances
__version__ = "20190301.1"
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # TODO: Remove this when stable.
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
simplefilter("ignore", RankWarning)
simplefilter("ignore", RuntimeWarning)
|
dcf21/4most-4gp
|
src/pythonModules/fourgp_specsynth/fourgp_specsynth/__init__.py
|
Python
|
mit
| 666 | 0.001502 |
# -*- coding: utf-8 -*-
"""py.test fixtures for kuma.wiki.tests."""
import base64
import json
from collections import namedtuple
from datetime import datetime
import pytest
from django.contrib.auth.models import Permission
from waffle.testutils import override_flag
from ..models import Document, DocumentDeletionLog, Revision
BannedUser = namedtuple('BannedUser', 'user ban')
Contributors = namedtuple('Contributors', 'valid banned inactive')
DocWithContributors = namedtuple('DocWithContributors', 'doc contributors')
DocHierarchy = namedtuple('DocHierarchy', 'top middle_top middle_bottom bottom')
KumaScriptToolbox = namedtuple(
'KumaScriptToolbox',
'errors errors_as_headers macros_response'
)
@pytest.fixture
def inactive_wiki_user(db, django_user_model):
"""An inactive test user."""
return django_user_model.objects.create(
is_active=False,
username='wiki_user_slacker',
email='wiki_user_slacker@example.com',
date_joined=datetime(2017, 4, 19, 10, 58))
@pytest.fixture
def banned_wiki_user(db, django_user_model, wiki_user):
"""A banned test user."""
user = django_user_model.objects.create(
username='bad_wiki_user',
email='bad_wiki_user@example.com',
date_joined=datetime(2017, 4, 18, 9, 15)
)
ban = user.bans.create(by=wiki_user, reason='because')
return BannedUser(user=user, ban=ban)
@pytest.fixture
def wiki_moderator(db, django_user_model):
"""A user with moderator permissions."""
moderator = django_user_model.objects.create(
username='moderator',
email='moderator@example.com',
date_joined=datetime(2018, 8, 21, 18, 19))
moderator.user_permissions.add(
Permission.objects.get(codename='purge_document'),
Permission.objects.get(codename='delete_document'),
Permission.objects.get(codename='restore_document')
)
return moderator
@pytest.fixture
def moderator_client(client, wiki_moderator):
"""A test client with wiki_moderator logged in."""
wiki_moderator.set_password('password')
wiki_moderator.save()
client.login(username=wiki_moderator.username, password='password')
with override_flag('kumaediting', True):
yield client
@pytest.fixture
def edit_revision(root_doc, wiki_user):
"""A revision that edits an English document."""
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=wiki_user,
content='<p>The root document.</p>',
comment='Done with initial version.',
created=datetime(2017, 4, 14, 12, 30))
root_doc.save()
return root_doc.current_revision
@pytest.fixture
def trans_revision(trans_doc):
return trans_doc.current_revision
@pytest.fixture
def trans_edit_revision(trans_doc, edit_revision, wiki_user):
"""A further edit to the translated document."""
trans_doc.current_revision = Revision.objects.create(
document=trans_doc,
creator=wiki_user,
based_on=edit_revision,
content='<p>Le document racine.</p>',
title='Racine du Document',
created=datetime(2017, 4, 14, 20, 25))
trans_doc.save()
return trans_doc.current_revision
@pytest.fixture
def deleted_doc(wiki_moderator):
"""A recently deleted but unpurged document."""
deleted_doc = Document.objects.create(
locale='en-US', slug='Doomed', title='Doomed Document')
Revision.objects.create(
document=deleted_doc,
creator=wiki_moderator,
content='<p>This document is doomed...</p>',
title='Doomed Document',
created=datetime(2018, 8, 21, 17, 3))
deleted_doc.delete()
DocumentDeletionLog.objects.create(
user=wiki_moderator,
reason="Deleted doomed document",
locale='en-US',
slug='Doomed')
DocumentDeletionLog.objects.filter(user=wiki_moderator).update(
timestamp=datetime(2018, 8, 21, 17, 22))
return deleted_doc
@pytest.fixture
def doc_hierarchy(wiki_user, wiki_user_2, wiki_user_3):
top_doc = Document.objects.create(
locale='en-US',
slug='top',
title='Top Document'
)
Revision.objects.create(
document=top_doc,
creator=wiki_user,
content='<p>Top...</p>',
title='Top Document',
created=datetime(2017, 4, 24, 13, 49)
)
top_de_doc = Document.objects.create(
locale='de',
slug='oben',
title='Oben Dokument',
rendered_html='<p>Oben...</p>',
parent=top_doc
)
Revision.objects.create(
document=top_de_doc,
creator=wiki_user_2,
based_on=top_doc.current_revision,
content='<p>Oben...</p>',
title='Oben Dokument',
created=datetime(2017, 4, 30, 10, 3)
)
top_fr_doc = Document.objects.create(
locale='fr',
slug='haut',
title='Haut Document',
rendered_html='<p>Haut...</p>',
parent=top_doc
)
Revision.objects.create(
document=top_fr_doc,
creator=wiki_user_3,
based_on=top_doc.current_revision,
content='<p>Haut...</p>',
title='Haut Document',
is_approved=True,
created=datetime(2017, 4, 30, 12, 1)
)
top_it_doc = Document.objects.create(
locale='it',
slug='superiore',
title='Superiore Documento',
rendered_html='<p>Superiore...</p>',
parent=top_doc
)
Revision.objects.create(
document=top_it_doc,
creator=wiki_user_2,
based_on=top_doc.current_revision,
content='<p>Superiore...</p>',
title='Superiore Documento',
created=datetime(2017, 4, 30, 11, 17)
)
middle_top_doc = Document.objects.create(
locale='en-US',
slug='top/middle-top',
title='Middle-Top Document',
parent_topic=top_doc
)
Revision.objects.create(
document=middle_top_doc,
creator=wiki_user,
content='<p>Middle-Top...</p>',
title='Middle-Top Document',
created=datetime(2017, 4, 24, 13, 50)
)
middle_bottom_doc = Document.objects.create(
locale='en-US',
slug='top/middle-top/middle-bottom',
title='Middle-Bottom Document',
parent_topic=middle_top_doc
)
Revision.objects.create(
document=middle_bottom_doc,
creator=wiki_user,
content='<p>Middle-Bottom...</p>',
title='Middle-Bottom Document',
created=datetime(2017, 4, 24, 13, 51)
)
bottom_doc = Document.objects.create(
locale='en-US',
slug='top/middle-top/middle-bottom/bottom',
title='Bottom Document',
parent_topic=middle_bottom_doc
)
Revision.objects.create(
document=bottom_doc,
creator=wiki_user,
content='<p>Bottom...</p><div id="Quick_Links"><p>sidebar</p></div>',
title='Bottom Document',
created=datetime(2017, 4, 24, 13, 52)
)
return DocHierarchy(
top=top_doc,
middle_top=middle_top_doc,
middle_bottom=middle_bottom_doc,
bottom=bottom_doc,
)
@pytest.fixture
def root_doc_with_mixed_contributors(root_doc, wiki_user, wiki_user_2,
inactive_wiki_user, banned_wiki_user):
"""
A top-level English document with mixed contributors (some are valid,
some are banned, and some are inactive).
"""
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=wiki_user_2,
content='<p>The root document.</p>',
comment='Done with the initial version.',
created=datetime(2017, 4, 17, 12, 35))
root_doc.save()
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=inactive_wiki_user,
content='<p>The root document re-envisioned.</p>',
comment='Done with the second revision.',
created=datetime(2017, 4, 18, 10, 15))
root_doc.save()
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=banned_wiki_user.user,
content='<p>The root document re-envisioned with malice.</p>',
comment='Nuke the previous revision.',
created=datetime(2017, 4, 19, 10, 15))
root_doc.save()
return DocWithContributors(
doc=root_doc,
contributors=Contributors(
valid=[wiki_user_2, wiki_user],
banned=banned_wiki_user,
inactive=inactive_wiki_user
)
)
@pytest.fixture
def ks_toolbox():
errors = {
"logs": [
{"level": "debug",
"message": "Message #1",
"args": ['TestError', {},
{'name': 'SomeMacro',
'token': {'args': 'arguments here'}}],
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Error: unable to load: SomeMacro2",
"args": ['TestError', {}, {'name': 'SomeMacro2'}],
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "error",
"message": "Syntax error at line 88...",
"args": [
'DocumentParsingError',
'Syntax error at line 88...',
{'error': {'line': 88, 'column': 65}}
],
"time": "12:33:59 GMT-0400 (EDT)",
"timestamp": "1331829238053000"},
{"level": "info",
"message": "Message #3",
"args": ['TestError'],
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
{"level": "debug",
"message": "Message #4",
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Message #5",
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "info",
"message": "Message #6",
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
]
}
d_json = json.dumps(errors)
d_b64 = base64.encodestring(d_json.encode('utf-8'))
d_lines = [x for x in d_b64.split("\n") if x]
# Headers are case-insensitive, so let's drive that point home.
p = ['firelogger', 'FIRELOGGER', 'FireLogger']
fl_uid = 8675309
errors_as_headers = {}
for i in range(0, len(d_lines)):
errors_as_headers['%s-%s-%s' % (p[i % len(p)], fl_uid, i)] = d_lines[i]
macros_response = {
'json': {
'loader': 'FileLoader',
'can_list_macros': True,
'macros': [
{
'name': 'SomeMacro',
'filename': 'SomeMacro.ejs'
},
{
'name': 'SomeMacro2',
'filename': 'SomeMacro2.ejs'
}
]
},
'headers': {
'Content-Type': 'application/json; charset=utf-8'
}
}
return KumaScriptToolbox(errors, errors_as_headers, macros_response)
|
SphinxKnight/kuma
|
kuma/wiki/tests/conftest.py
|
Python
|
mpl-2.0
| 11,242 | 0.000089 |
import numpy as np
from bokeh.plotting import figure, show, output_file
from bokeh.models import TapTool
xx, yy = np.meshgrid(range(0,101,4), range(0,101,4))
x = xx.flatten()
y = yy.flatten()
N = len(x)
inds = [str(i) for i in np.arange(N)]
radii = np.random.random(size=N)*0.4 + 1.7
colors = [
"#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
]
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,tap,save"
p = figure(title="Tappy Scatter", tools=TOOLS)
cr = p.circle(x, y, radius=radii,
fill_color=colors, fill_alpha=0.6, line_color=None)
tr = p.text(x, y, text=inds, alpha=0.5, text_font_size="5pt",
text_baseline="middle", text_align="center")
# in the browser console, you will see messages when circles are clicked
tool = p.select_one(TapTool).renderers = [cr]
output_file("tap.html", title="tap.py example")
show(p) # open a browser
|
percyfal/bokeh
|
examples/plotting/file/tap.py
|
Python
|
bsd-3-clause
| 898 | 0.005568 |
import ssadata
# how many boys names are also girls names?
# implementation details:
# find all names in the boys data set that are
# also keys in the girls data set.
num_shared_names = 0
for name in ssadata.boys:
if name in ssadata.girls:
num_shared_names = num_shared_names + 1
print(str(num_shared_names) + " names out of " + str(len(ssadata.boys)) + " are shared.")
num_shared_names = 0
for name in ssadata.girls:
if name in ssadata.boys:
num_shared_names = num_shared_names + 1
print(str(num_shared_names) + " names out of " + str(len(ssadata.girls)) + " are shared.")
|
guyrt/teaching
|
2017/Com597I/babynames/problem8.py
|
Python
|
mit
| 607 | 0.003295 |
from __future__ import unicode_literals
import httplib
import wac
class BalancedError(Exception):
def __str__(self):
attrs = ', '.join([
'{0}={1}'.format(k, repr(v))
for k, v in self.__dict__.iteritems()
])
return '{0}({1})'.format(self.__class__.__name__, attrs)
class ResourceError(BalancedError):
pass
class NoResultFound(BalancedError):
pass
class MultipleResultsFound(BalancedError):
pass
class FundingSourceNotCreditable(Exception):
pass
def convert_error(ex):
if not hasattr(ex.response, 'data'):
return ex
return HTTPError.from_response(**ex.response.data)(ex)
class HTTPError(BalancedError, wac.Error):
class __metaclass__(type):
def __new__(meta_cls, name, bases, dikt):
cls = type.__new__(meta_cls, name, bases, dikt)
cls.types = [
getattr(cls, k)
for k in dir(cls)
if k.isupper() and isinstance(getattr(cls, k), basestring)
]
cls.type_to_error.update(zip(cls.types, [cls] * len(cls.types)))
return cls
def __init__(self, requests_ex):
super(wac.Error, self).__init__(requests_ex)
self.status_code = requests_ex.response.status_code
data = getattr(requests_ex.response, 'data', {})
for k, v in data.get('errors', [{}])[0].iteritems():
setattr(self, k, v)
@classmethod
def format_message(cls, requests_ex):
data = getattr(requests_ex.response, 'data', {})
status = httplib.responses[requests_ex.response.status_code]
error = data['errors'][0]
status = error.pop('status', status)
status_code = error.pop('status_code',
requests_ex.response.status_code)
desc = error.pop('description', None)
message = ': '.join(str(v) for v in [status, status_code, desc] if v)
return message
@classmethod
def from_response(cls, **data):
try:
err = data['errors'][0]
exc = cls.type_to_error.get(err['category_code'], HTTPError)
except:
exc = HTTPError
return exc
type_to_error = {}
class FundingInstrumentVerificationFailure(HTTPError):
pass
class BankAccountVerificationFailure(FundingInstrumentVerificationFailure):
AUTH_NOT_PENDING = 'bank-account-authentication-not-pending'
AUTH_FAILED = 'bank-account-authentication-failed'
AUTH_DUPLICATED = 'bank-account-authentication-already-exists'
|
balanced/balanced-python
|
balanced/exc.py
|
Python
|
mit
| 2,557 | 0.000391 |
#! /usr/bin/env python
class myclass:
"zx class"
i = 11111
def f(self):
return 'hw'
def pi(self):
print "pi i={0}".format(self.i)
def si(self, v):
self.i = v
def pp(self):
print 'pp',
self.pi()
x = myclass()
x.f()
x.pi()
x.si(9)
x.pi()
x.pp()
print '============================'
class newclass(myclass):
j = 2222
def pj(self):
print "j={0}".format(self.j)
def sj(self, v):
self.j = v
# override baseclass method with same name
# nothing to do with args, but can fail
# compatibility
def pi(self):
print 'new pi {}'.format(self.i)
y = newclass()
y.f()
y.pi()
y.pj()
y.sj(9)
y.pj()
y.pp()
myclass.pi(y)
print "========================="
z=myclass()
z.pp()
|
jencce/stuff
|
py/class.py
|
Python
|
gpl-2.0
| 693 | 0.049062 |
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
from copy import deepcopy
import warnings
import itertools as itt
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import Raw, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels)
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
@slow_test
def test_concat():
"""Test RawFIF concatenation"""
_test_concat(read_raw_fif, test_fif_fname)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects
"""
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = Raw(fif_fname).crop(0, 0.5, False)
raw.preload_data()
raw_2 = Raw(fif_fname).crop(0, 0.5, False)
raw_2.preload_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 1, False)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = Raw(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
raw_read.anonymize()
assert_true(raw_read.info.get('subject_info') is None)
out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
raw_read.save(out_fname_anon, overwrite=True)
raw_read = Raw(out_fname_anon)
assert_true(raw_read.info.get('subject_info') is None)
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations
"""
raw = Raw(fif_fname, preload=True).copy()
raw_full = Raw(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation
"""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = Raw(fname)
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
raw = Raw(fname, preload=True)
if 'proc_history' not in raw.info:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings),
n_eeg)
raw = Raw(fname, preload=False)
if 'sss' in fname:
tstart, tstop = 0., 30.
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
else:
tstart, tstop = 10., 20.
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats
"""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = Raw(test_fif_fname).crop(0, 1, copy=False)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = Raw(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously
"""
# split file
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 10, False)
raw.preload_data()
raw.preload_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = Raw(fname)
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = Raw(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = Raw(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = Raw([fif_fname, fif_fname], preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = Raw(fif_fname, preload=True)
raw_combo.append(Raw(fif_fname, preload=True))
assert_true(raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=False)])
assert_true(raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload=True)
assert_true(raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=True)],
preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=False)],
preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files
"""
tempdir = _TempDir()
raw_1 = Raw(fif_fname, preload=True)
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = Raw(split_fname)
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where the silly user specifies the split files
fnames = [split_fname]
fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_2 = Raw(fnames)
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
def test_load_bad_channels():
"""Test reading/writing of bad channels
"""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = Raw(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = Raw(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.load_bad_channels(bad_file_wrong, force=True)
n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
assert_equal(n_found, 1) # there could be other irrelevant errors
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal([], raw_new.info['bads'])
@slow_test
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)
"""
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with Raw(fif_fname) as r:
assert_true('Raw' in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with Raw(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = Raw(fif_fname).crop(0, 3.5, False)
raw.preload_data()
# put in some data that we know the values of
data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = Raw(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = Raw(fname_in)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5])
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_true(times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert_true(raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
raw.save(raw_badname)
Raw(raw_badname)
assert_true(len(w) > 0) # len(w) should be 2 but Travis sometimes has more
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types
"""
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = Raw(fif_fname, preload=True)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * np.random.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
# this should throw an error because it's complex
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
# warning gets thrown on every instance b/c simplifilter('always')
assert_equal(len(w), 1)
raw2 = Raw(op.join(tempdir, 'raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw
"""
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations
"""
tempdir = _TempDir()
for proj in [True, False]:
raw = Raw(fif_fname, preload=False, proj=proj)
assert_true(all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
assert_raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
assert_equal(len(raw.info['projs']), 2 * n_proj - 1)
raw.add_proj(projs, remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = Raw(fif_fname, preload=preload, proj=False)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = Raw(fif_fname, preload=preload, proj=False)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = Raw(fif_fname, preload=preload, proj=True)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, proj=True, preload=False)
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data
"""
tempdir = _TempDir()
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = np.random.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError as err:
if not preload:
continue
else:
raise err
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = Raw(tmp_fname)
data_new, _ = raw_new[picks, :nsamp / 2]
assert_allclose(data, data_new)
@slow_test
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface
"""
raw = Raw(fif_fname).crop(0, 7, False)
raw.preload_data()
sig_dec = 11
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_lp = raw.copy()
raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
raw_hp = raw.copy()
raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
raw_bp = raw.copy()
raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
raw_bs = raw.copy()
raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
raw_lp_iir = raw.copy()
raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
raw_hp_iir = raw.copy()
raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
raw_bp_iir = raw.copy()
raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
sig_dec)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# do a very simple check on line filtering
raw_bs = raw.copy()
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy()
raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy()
raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files
"""
# split a concatenated file to test a difficult case
raw = Raw([fif_fname, fif_fname], preload=False)
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.crop(tmin, tmax, True)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy()
raws[ri].crop(tmin, tmax, False)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.crop(0, None, True)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 3, False)
raw.preload_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2)
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1)
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10)
raw3.resample(10)
raw4.resample(10)
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert
"""
raw = Raw(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw2 = raw.copy()
raw.apply_hilbert(picks)
raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy
"""
raw = Raw(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = Raw(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter"""
raw = Raw(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
@testing.requires_testing_data
def test_raw_index_as_time():
""" Test index as time conversion"""
raw = Raw(fif_fname, preload=True)
t0 = raw.index_as_time([0], True)[0]
t1 = raw.index_as_time([100], False)[0]
t2 = raw.index_as_time([100], True)[0]
assert_equal(t2 - t1, t0)
# ensure we can go back and forth
t3 = raw.index_as_time(raw.time_as_index([0], True), True)
assert_array_almost_equal(t3, [0.0], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
i0 = raw.time_as_index(raw.index_as_time([0], True), True)
assert_equal(i0[0], 0)
i1 = raw.time_as_index(raw.index_as_time([100], True), True)
assert_equal(i1[0], 100)
# Have to add small amount of time because we truncate via int casting
i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
assert_equal(i1[0], 100)
@testing.requires_testing_data
def test_raw_time_as_index():
""" Test time as index conversion"""
raw = Raw(fif_fname, preload=True)
first_samp = raw.time_as_index([0], True)[0]
assert_equal(raw.first_samp, -first_samp)
@testing.requires_testing_data
def test_save():
""" Test saving raw"""
tempdir = _TempDir()
raw = Raw(fif_fname, preload=False)
# can't write over file being read
assert_raises(ValueError, raw.save, fif_fname)
raw = Raw(fif_fname, preload=True)
# can't overwrite file without overwrite=True
assert_raises(IOError, raw.save, fif_fname)
# test abspath support
new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = Raw(op.join(tempdir, new_fname), preload=False)
assert_raises(ValueError, new_raw.save, new_fname)
# make sure we can overwrite the file we loaded when preload=True
new_raw = Raw(op.join(tempdir, new_fname), preload=True)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
os.remove(new_fname)
@testing.requires_testing_data
def test_with_statement():
""" Test with statement """
for preload in [True, False]:
with Raw(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation
"""
tempdir = _TempDir()
raw1 = Raw(ctf_comp_fname, compensation=None)
assert_true(raw1.comp is None)
data1, times1 = raw1[:, :]
raw2 = Raw(ctf_comp_fname, compensation=3)
data2, times2 = raw2[:, :]
assert_true(raw2.comp is None) # unchanged (data come with grade 3)
assert_array_equal(times1, times2)
assert_array_equal(data1, data2)
raw3 = Raw(ctf_comp_fname, compensation=1)
data3, times3 = raw3[:, :]
assert_true(raw3.comp is not None)
assert_array_equal(times1, times3)
# make sure it's different with a different compensation:
assert_true(np.mean(np.abs(data1 - data3)) > 1e-12)
assert_raises(ValueError, Raw, ctf_comp_fname, compensation=33)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw1.save(temp_file, overwrite=True)
raw4 = Raw(temp_file)
data4, times4 = raw4[:, :]
assert_array_equal(times1, times4)
assert_array_equal(data1, data4)
# Now save the file that has modified compensation
# and make sure we can the same data as input ie. compensation
# is undone
raw3.save(temp_file, overwrite=True)
raw5 = Raw(temp_file)
data5, times5 = raw5[:, :]
assert_array_equal(times1, times5)
assert_allclose(data1, data5, rtol=1e-12, atol=1e-22)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE
"""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return Raw(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = Raw(ctf_comp_fname, preload=True, compensation=grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw = Raw(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
# preload is True
raw = Raw(fif_fname, preload=True)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.pick_channels(ch_names, copy=True) # copy is True
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names, copy=False) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
raw = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, raw.pick_channels, ch_names)
assert_raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels
"""
raw1 = Raw(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
|
Odingod/mne-python
|
mne/io/fiff/tests/test_raw.py
|
Python
|
bsd-3-clause
| 38,869 | 0 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((3171.68, 9029.21, -1139.01), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((3173.32, 7641.65, -7.59267), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((3803.4, 8043.31, 1799.07), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2666.72, 9556.45, 456.26), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((2401.45, 10810.3, 1711.38), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((3826.97, 10319.9, 3558.07), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((5267.4, 10161.5, 4525.44), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((4933.12, 10729, 4046.72), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((6683.05, 9391.65, 5497.93), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8208.82, 10014.2, 5535.15), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((8867.56, 8468.95, 6393.47), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((8619.61, 7593.49, 5646.83), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((9013.94, 6288.27, 4803.55), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((8872.4, 7041.74, 3460.97), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((10190.2, 5903.46, 2028.9), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((9719.44, 3094.41, 731.581), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((8077.39, 2602.05, 1658.32), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((8868.48, 2528.27, 2892.63), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((9165.75, 3921.61, 3841.11), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((10301.5, 4486.01, 4688.54), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((8522.51, 5837.01, 5636.16), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((9493.49, 4172.69, 4953.76), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9034.89, 3774.53, 5847.71), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8777.35, 2523.89, 5440.65), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((8573.36, 2540.82, 4012.57), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((8998.9, 1643.92, 2683.61), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((8990.55, 2742.59, 3735.47), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((8309.5, 4779.28, 4133.88), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((8469.04, 4310.3, 5552.9), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((8405.08, 5264.39, 6396.31), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((8116.19, 4748.65, 6198.76), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((8458.16, 6381.13, 5980.73), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((9198.38, 4960.14, 6673.69), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((8369.35, 3853.39, 6088.2), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((8954.58, 3356.37, 5028.9), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((9942.46, 2604.05, 4445.91), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((8717.21, 4559.17, 5253.99), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((9086.66, 2734.02, 4935.49), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((8554.69, 3256.13, 5181.72), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((9247.67, 3166.59, 6786.78), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8475.56, 4449.18, 7763.97), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((8596.68, 5576.75, 9330.28), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((8343.23, 4423.75, 11822.2), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7645.23, 6310.08, 11514.1), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((7651.25, 5112.44, 10427.6), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7252.38, 5829.06, 8673.58), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((5822.24, 4545.28, 9000.62), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((7049.07, 3628.54, 10363), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((5790.73, 4510.11, 9353.05), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((5336.85, 5999.02, 8251.6), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5443.04, 6367.25, 9673.41), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((5998.57, 6987.2, 8170.17), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((6305.8, 7375.7, 6388.49), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6187.92, 8875.01, 6747.26), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((5813.94, 8918.86, 7379.07), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5344.98, 6850.66, 7496.74), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3547.24, 6649.15, 6261.57), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((1739.99, 4938.35, 5834.09), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((1359.58, 4478.74, 5766.47), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((962.469, 5161.48, 5521.67), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((1623.11, 5203.75, 5999.69), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((908.239, 5438.28, 6510.08), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((2722.36, 5848.41, 6088.2), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((844.664, 6076.2, 6217.11), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((-1152.26, 5619.77, 6199.34), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((-307.851, 5766.97, 7789.24), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((-820.689, 4185.61, 7920.82), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((1117.85, 5548.01, 7239.77), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((-696.022, 5054.33, 6438.05), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((-1176.59, 3758.02, 7301.85), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((185.745, 3054.8, 7200.01), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models12702.py
|
Python
|
gpl-3.0
| 17,583 | 0.025081 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SharedPrivateLinkResourcesOperations(object):
"""SharedPrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.search.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
search_service_name, # type: str
shared_private_link_resource_name, # type: str
shared_private_link_resource, # type: "_models.SharedPrivateLinkResource"
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.SharedPrivateLinkResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SharedPrivateLinkResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'sharedPrivateLinkResourceName': self._serialize.url("shared_private_link_resource_name", shared_private_link_resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(shared_private_link_resource, 'SharedPrivateLinkResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedPrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
search_service_name, # type: str
shared_private_link_resource_name, # type: str
shared_private_link_resource, # type: "_models.SharedPrivateLinkResource"
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SharedPrivateLinkResource"]
"""Initiates the creation or update of a shared private link resource managed by the search
service in the given resource group.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param shared_private_link_resource_name: The name of the shared private link resource managed
by the Azure Cognitive Search service within the specified resource group.
:type shared_private_link_resource_name: str
:param shared_private_link_resource: The definition of the shared private link resource to
create or update.
:type shared_private_link_resource: ~azure.mgmt.search.models.SharedPrivateLinkResource
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SharedPrivateLinkResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.search.models.SharedPrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedPrivateLinkResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
search_service_name=search_service_name,
shared_private_link_resource_name=shared_private_link_resource_name,
shared_private_link_resource=shared_private_link_resource,
search_management_request_options=search_management_request_options,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SharedPrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'sharedPrivateLinkResourceName': self._serialize.url("shared_private_link_resource_name", shared_private_link_resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
search_service_name, # type: str
shared_private_link_resource_name, # type: str
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> "_models.SharedPrivateLinkResource"
"""Gets the details of the shared private link resource managed by the search service in the given
resource group.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param shared_private_link_resource_name: The name of the shared private link resource managed
by the Azure Cognitive Search service within the specified resource group.
:type shared_private_link_resource_name: str
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedPrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.search.models.SharedPrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedPrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'sharedPrivateLinkResourceName': self._serialize.url("shared_private_link_resource_name", shared_private_link_resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedPrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
search_service_name, # type: str
shared_private_link_resource_name, # type: str
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'sharedPrivateLinkResourceName': self._serialize.url("shared_private_link_resource_name", shared_private_link_resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
search_service_name, # type: str
shared_private_link_resource_name, # type: str
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Initiates the deletion of the shared private link resource from the search service.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param shared_private_link_resource_name: The name of the shared private link resource managed
by the Azure Cognitive Search service within the specified resource group.
:type shared_private_link_resource_name: str
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
search_service_name=search_service_name,
shared_private_link_resource_name=shared_private_link_resource_name,
search_management_request_options=search_management_request_options,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'sharedPrivateLinkResourceName': self._serialize.url("shared_private_link_resource_name", shared_private_link_resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}'} # type: ignore
def list_by_service(
self,
resource_group_name, # type: str
search_service_name, # type: str
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SharedPrivateLinkResourceListResult"]
"""Gets a list of all shared private link resources managed by the given service.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedPrivateLinkResourceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.search.models.SharedPrivateLinkResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedPrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SharedPrivateLinkResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/sharedPrivateLinkResources'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/search/azure-mgmt-search/azure/mgmt/search/operations/_shared_private_link_resources_operations.py
|
Python
|
mit
| 27,117 | 0.00579 |
#!/usr/bin/env python
import rospy
from art_msgs.msg import ObjInstance, InstancesArray
import time
import sys
import random
from tf import transformations
from math import pi
def a2q(q, arr):
q.x = arr[0]
q.y = arr[1]
q.z = arr[2]
q.w = arr[3]
class FakeDetector:
def __init__(self, obj_id, frame_id, pos, rpy, noise):
self.object_publisher = rospy.Publisher('/art/object_detector/object',
InstancesArray, queue_size=10, latch=True)
self.frame_id = frame_id
self.pos = pos
self.noise = noise
self.obj = ObjInstance()
self.obj.object_id = obj_id
self.obj.object_type = "fake_object_type"
angles = list(rpy)
for idx in range(0, len(angles)):
angles[idx] = angles[idx] / 360.0 * 2 * pi
# TODO apply noise also to orientation
a2q(self.obj.pose.orientation, transformations.quaternion_from_euler(*angles))
self.timer = rospy.Timer(rospy.Duration(0.1), self.timer_callback)
def timer_callback(self, evt):
ia = InstancesArray()
ia.header.stamp = rospy.Time.now()
ia.header.frame_id = self.frame_id
self.obj.pose.position.x = self.pos[0] + random.uniform(-self.noise, self.noise)
self.obj.pose.position.y = self.pos[1] + random.uniform(-self.noise, self.noise)
self.obj.pose.position.z = self.pos[2] + random.uniform(-self.noise, self.noise)
ia.instances = [self.obj]
self.object_publisher.publish(ia)
if __name__ == '__main__':
rospy.init_node('fake_detector', anonymous=True)
try:
pos = (float(sys.argv[3]), float(sys.argv[4]), float(sys.argv[5]))
rpy = (float(sys.argv[6]), float(sys.argv[7]), float(sys.argv[8]))
FakeDetector(sys.argv[1], sys.argv[2], pos, rpy, float(sys.argv[9]))
rospy.spin()
except rospy.ROSInterruptException:
pass
except IndexError:
print("Arguments: obj_id frame_id x y z r p y noise")
|
robofit/artable
|
art_simple_tracker/scripts/fake_detector.py
|
Python
|
lgpl-2.1
| 2,036 | 0.002456 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Testing and testing related modules.
'''
|
vagonbar/GNUnetwork
|
gwn/utils/testing/__init__.py
|
Python
|
gpl-3.0
| 90 | 0.011111 |
# -*- coding: utf-8 -*-
#
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import RetrieveAPIView, ListAPIView
from django.shortcuts import get_object_or_404
from django.db.models import Q
from common.utils import get_logger, get_object_or_none
from common.mixins.api import SuggestionMixin
from users.models import User, UserGroup
from users.serializers import UserSerializer, UserGroupSerializer
from users.filters import UserFilter
from perms.models import AssetPermission
from perms.serializers import AssetPermissionSerializer
from perms.filters import AssetPermissionFilter
from orgs.mixins.api import OrgBulkModelViewSet
from orgs.mixins import generics
from assets.api import FilterAssetByNodeMixin
from ..models import Asset, Node, Platform
from .. import serializers
from ..tasks import (
update_assets_hardware_info_manual, test_assets_connectivity_manual,
test_system_users_connectivity_a_asset, push_system_users_a_asset
)
from ..filters import FilterAssetByNodeFilterBackend, LabelFilterBackend, IpInFilterBackend
logger = get_logger(__file__)
__all__ = [
'AssetViewSet', 'AssetPlatformRetrieveApi',
'AssetGatewayListApi', 'AssetPlatformViewSet',
'AssetTaskCreateApi', 'AssetsTaskCreateApi',
'AssetPermUserListApi', 'AssetPermUserPermissionsListApi',
'AssetPermUserGroupListApi', 'AssetPermUserGroupPermissionsListApi',
]
class AssetViewSet(SuggestionMixin, FilterAssetByNodeMixin, OrgBulkModelViewSet):
"""
API endpoint that allows Asset to be viewed or edited.
"""
model = Asset
filterset_fields = {
'hostname': ['exact'],
'ip': ['exact'],
'system_users__id': ['exact'],
'platform__base': ['exact'],
'is_active': ['exact'],
'protocols': ['exact', 'icontains']
}
search_fields = ("hostname", "ip")
ordering_fields = ("hostname", "ip", "port", "cpu_cores")
ordering = ('hostname', )
serializer_classes = {
'default': serializers.AssetSerializer,
'suggestion': serializers.MiniAssetSerializer
}
rbac_perms = {
'match': 'assets.match_asset'
}
extra_filter_backends = [FilterAssetByNodeFilterBackend, LabelFilterBackend, IpInFilterBackend]
def set_assets_node(self, assets):
if not isinstance(assets, list):
assets = [assets]
node_id = self.request.query_params.get('node_id')
if not node_id:
return
node = get_object_or_none(Node, pk=node_id)
if not node:
return
node.assets.add(*assets)
def perform_create(self, serializer):
assets = serializer.save()
self.set_assets_node(assets)
class AssetPlatformRetrieveApi(RetrieveAPIView):
queryset = Platform.objects.all()
serializer_class = serializers.PlatformSerializer
rbac_perms = {
'retrieve': 'assets.view_gateway'
}
def get_object(self):
asset_pk = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_pk)
return asset.platform
class AssetPlatformViewSet(ModelViewSet):
queryset = Platform.objects.all()
serializer_class = serializers.PlatformSerializer
filterset_fields = ['name', 'base']
search_fields = ['name']
def check_object_permissions(self, request, obj):
if request.method.lower() in ['delete', 'put', 'patch'] and obj.internal:
self.permission_denied(
request, message={"detail": "Internal platform"}
)
return super().check_object_permissions(request, obj)
class AssetsTaskMixin:
def perform_assets_task(self, serializer):
data = serializer.validated_data
action = data['action']
assets = data.get('assets', [])
if action == "refresh":
task = update_assets_hardware_info_manual.delay(assets)
else:
# action == 'test':
task = test_assets_connectivity_manual.delay(assets)
return task
def perform_create(self, serializer):
task = self.perform_assets_task(serializer)
self.set_task_to_serializer_data(serializer, task)
def set_task_to_serializer_data(self, serializer, task):
data = getattr(serializer, '_data', {})
data["task"] = task.id
setattr(serializer, '_data', data)
class AssetTaskCreateApi(AssetsTaskMixin, generics.CreateAPIView):
model = Asset
serializer_class = serializers.AssetTaskSerializer
def create(self, request, *args, **kwargs):
pk = self.kwargs.get('pk')
request.data['asset'] = pk
request.data['assets'] = [pk]
return super().create(request, *args, **kwargs)
def check_permissions(self, request):
action = request.data.get('action')
action_perm_require = {
'refresh': 'assets.refresh_assethardwareinfo',
'push_system_user': 'assets.push_assetsystemuser',
'test': 'assets.test_assetconnectivity',
'test_system_user': 'assets.test_assetconnectivity'
}
perm_required = action_perm_require.get(action)
has = self.request.user.has_perm(perm_required)
if not has:
self.permission_denied(request)
def perform_asset_task(self, serializer):
data = serializer.validated_data
action = data['action']
if action not in ['push_system_user', 'test_system_user']:
return
asset = data['asset']
system_users = data.get('system_users')
if not system_users:
system_users = asset.get_all_system_users()
if action == 'push_system_user':
task = push_system_users_a_asset.delay(system_users, asset=asset)
elif action == 'test_system_user':
task = test_system_users_connectivity_a_asset.delay(system_users, asset=asset)
else:
task = None
return task
def perform_create(self, serializer):
task = self.perform_asset_task(serializer)
if not task:
task = self.perform_assets_task(serializer)
self.set_task_to_serializer_data(serializer, task)
class AssetsTaskCreateApi(AssetsTaskMixin, generics.CreateAPIView):
model = Asset
serializer_class = serializers.AssetsTaskSerializer
class AssetGatewayListApi(generics.ListAPIView):
serializer_class = serializers.GatewayWithAuthSerializer
rbac_perms = {
'list': 'assets.view_gateway'
}
def get_queryset(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
if not asset.domain:
return []
queryset = asset.domain.gateways.filter(protocol='ssh')
return queryset
class BaseAssetPermUserOrUserGroupListApi(ListAPIView):
def get_object(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
return asset
def get_asset_related_perms(self):
asset = self.get_object()
nodes = asset.get_all_nodes(flat=True)
perms = AssetPermission.objects.filter(Q(assets=asset) | Q(nodes__in=nodes))
return perms
class AssetPermUserListApi(BaseAssetPermUserOrUserGroupListApi):
filterset_class = UserFilter
search_fields = ('username', 'email', 'name', 'id', 'source', 'role')
serializer_class = UserSerializer
def get_queryset(self):
perms = self.get_asset_related_perms()
users = User.objects.filter(
Q(assetpermissions__in=perms) | Q(groups__assetpermissions__in=perms)
).distinct()
return users
class AssetPermUserGroupListApi(BaseAssetPermUserOrUserGroupListApi):
serializer_class = UserGroupSerializer
def get_queryset(self):
perms = self.get_asset_related_perms()
user_groups = UserGroup.objects.filter(assetpermissions__in=perms).distinct()
return user_groups
class BaseAssetPermUserOrUserGroupPermissionsListApiMixin(generics.ListAPIView):
model = AssetPermission
serializer_class = AssetPermissionSerializer
filterset_class = AssetPermissionFilter
search_fields = ('name',)
rbac_perms = {
'list': 'perms.view_assetpermission'
}
def get_object(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
return asset
def filter_asset_related(self, queryset):
asset = self.get_object()
nodes = asset.get_all_nodes(flat=True)
perms = queryset.filter(Q(assets=asset) | Q(nodes__in=nodes))
return perms
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_asset_related(queryset)
return queryset
class AssetPermUserPermissionsListApi(BaseAssetPermUserOrUserGroupPermissionsListApiMixin):
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_user_related(queryset)
queryset = queryset.distinct()
return queryset
def filter_user_related(self, queryset):
user = self.get_perm_user()
user_groups = user.groups.all()
perms = queryset.filter(Q(users=user) | Q(user_groups__in=user_groups))
return perms
def get_perm_user(self):
user_id = self.kwargs.get('perm_user_id')
user = get_object_or_404(User, pk=user_id)
return user
class AssetPermUserGroupPermissionsListApi(BaseAssetPermUserOrUserGroupPermissionsListApiMixin):
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_user_group_related(queryset)
queryset = queryset.distinct()
return queryset
def filter_user_group_related(self, queryset):
user_group = self.get_perm_user_group()
perms = queryset.filter(user_groups=user_group)
return perms
def get_perm_user_group(self):
user_group_id = self.kwargs.get('perm_user_group_id')
user_group = get_object_or_404(UserGroup, pk=user_group_id)
return user_group
|
jumpserver/jumpserver
|
apps/assets/api/asset.py
|
Python
|
gpl-3.0
| 10,108 | 0.001187 |
from elasticsearch import Elasticsearch
from django.conf import settings
def get_es_client(silent=False):
"""
Returns the elasticsearch client which uses the configuration file
"""
es_client = Elasticsearch([settings.ELASTIC_SEARCH_HOST],
scheme='http',
port=9200,
http_compress=True)
# test if it works
if not silent and not es_client.cat.health(request_timeout=30):
raise ValueError('Credentials do not work for Elastic search')
return es_client
def get_index_config(lang):
"""
Returns the elasticsearch index configuration.
Configures the analysers based on the language passed in.
"""
return {
"settings": {
"index": {
"number_of_shards": 1,
"number_of_replicas": 0
}
},
'mappings': {
'_doc': {
'properties': {
'title': {
'type': 'text',
'analyzer': settings.ELASTIC_SEARCH_ANALYSERS[lang]
},
'content': {
'type': 'text',
'analyzer': settings.ELASTIC_SEARCH_ANALYSERS[lang]
},
'url': {'type': 'text'},
'title_plain': {'type': 'text'},
'content_plain': {'type': 'text'},
'author': {
'type': 'keyword'
},
'source': {
'type': 'keyword'
},
'argument_score': {
'type': 'float'
}
}
}
}
}
|
fako/datascope
|
src/online_discourse/elastic.py
|
Python
|
gpl-3.0
| 1,813 | 0 |
from superset import tables_cache
from flask import request
def view_cache_key(*unused_args, **unused_kwargs):
args_hash = hash(frozenset(request.args.items()))
return 'view/{}/{}'.format(request.path, args_hash)
def memoized_func(timeout=5 * 60, key=view_cache_key):
"""Use this decorator to cache functions that have predefined first arg.
memoized_func uses simple_cache and stored the data in memory.
Key is a callable function that takes function arguments and
returns the caching key.
"""
def wrap(f):
if tables_cache:
def wrapped_f(cls, *args, **kwargs):
cache_key = key(*args, **kwargs)
o = tables_cache.get(cache_key)
if not kwargs['force'] and o is not None:
return o
o = f(cls, *args, **kwargs)
tables_cache.set(cache_key, o, timeout=timeout)
return o
else:
# noop
def wrapped_f(cls, *args, **kwargs):
return f(cls, *args, **kwargs)
return wrapped_f
return wrap
|
asdf2014/superset
|
superset/cache_util.py
|
Python
|
apache-2.0
| 1,105 | 0 |
import os
import shutil
import hashlib
from collections import namedtuple, defaultdict
from calaldees.files.scan import fast_scan
ProcessedFileType = namedtuple('ProcessedFileType', ('source_hash_group', 'dict_key', 'attachment_type', 'ext', 'salt'))
class ProcessedFilesManager(object):
FILE_TYPES = (
ProcessedFileType('media', 'image1', 'image', 'jpg', ''),
ProcessedFileType('media', 'image2', 'image', 'jpg', ''),
ProcessedFileType('media', 'image3', 'image', 'jpg', ''),
ProcessedFileType('media', 'image4', 'image', 'jpg', ''),
ProcessedFileType('media', 'video', 'video', 'mp4', ''),
ProcessedFileType('media', 'preview', 'preview', 'mp4', ''),
ProcessedFileType('data', 'srt', 'srt', 'srt', ''),
ProcessedFileType('data', 'tags', 'tags', 'txt', ''),
)
FILE_TYPE_LOOKUP = {
processed_file_type.attachment_type: processed_file_type
for processed_file_type in FILE_TYPES
}
def __init__(self, path):
self.path = path
def get_processed_files(self, hash_dict):
if not hash_dict:
return {}
return {
file_type.dict_key: ProcessedFile(
self.path,
(hash_dict[file_type.source_hash_group], file_type.dict_key, file_type.salt),
file_type
)
for file_type in self.FILE_TYPES
}
@property
def scan(self):
return fast_scan(self.path)
class ProcessedFile(object):
def __init__(self, path, hashs, processed_file_type):
self.hash = gen_string_hash(hashs)
self.processed_file_type = processed_file_type
self.path = path
@property
def ext(self):
return self.processed_file_type.ext
@property
def attachment_type(self):
return self.processed_file_type.attachment_type
@property
def folder(self):
return self.hash[0]
@property
def relative(self):
return os.path.join(self.folder, '{}.{}'.format(self.hash, self.ext))
@property
def absolute(self):
return os.path.abspath(os.path.join(self.path, self.relative))
def _create_folders_if_needed(self):
os.makedirs(os.path.join(self.path, self.folder), exist_ok=True)
def move(self, source_file):
"""
It is important that 'move' is used rather than opening a stream to the
absolute path directly.
The remote destination could be 'scp' or another remote service.
Always using move allows for this abstraction at a later date
"""
self._create_folders_if_needed()
shutil.move(source_file, self.absolute)
def copy(self, source_file):
self._create_folders_if_needed()
shutil.copy2(source_file, self.absolute)
@property
def exists(self):
return os.path.exists(self.absolute)
def gen_string_hash(hashs):
if isinstance(hashs, str):
hash_str = hashs
else:
hasher = hashlib.sha256()
hasher.update(''.join(sorted(hashs)).encode('utf-8'))
hash_str = hasher.hexdigest()
return hash_str
|
calaldees/KaraKara
|
processmedia2/processmedia_libs/processed_files_manager.py
|
Python
|
gpl-3.0
| 3,137 | 0.000638 |
import hashlib
import hmac
def hmac_sha256_digest(key, msg):
"""
Return the HMAC-SHA256 message authentication code of the message
'msg' with key 'key'.
"""
return hmac.new(key, msg, hashlib.sha256).digest()
|
masterkorp/obfsproxy
|
obfsproxy/common/hmac_sha256.py
|
Python
|
bsd-3-clause
| 230 | 0.004348 |
import pytest
from thefuck.rules.git_remote_seturl_add import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', "fatal: No such remote")])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', ""),
Command('git remote add origin url', ''),
Command('git remote remove origin', ''),
Command('git remote prune origin', ''),
Command('git remote set-branches origin branch', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('git remote set-url origin git@github.com:nvbn/thefuck.git', ''),
'git remote add origin git@github.com:nvbn/thefuck.git')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
|
nvbn/thefuck
|
tests/rules/test_git_remote_seturl_add.py
|
Python
|
mit
| 920 | 0 |
def extractMyTranslations(item):
"""
Parser for 'My Translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMyTranslations.py
|
Python
|
bsd-3-clause
| 355 | 0.028169 |
"""Dev server used for running a chalice app locally.
This is intended only for local development purposes.
"""
from __future__ import print_function
import re
import threading
import time
import uuid
import base64
import functools
import warnings
from collections import namedtuple
import json
from six.moves.BaseHTTPServer import HTTPServer
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler
from six.moves.socketserver import ThreadingMixIn
from typing import (
List,
Any,
Dict,
Tuple,
Callable,
Optional,
Union,
) # noqa
from chalice.app import Chalice # noqa
from chalice.app import CORSConfig # noqa
from chalice.app import ChaliceAuthorizer # noqa
from chalice.app import CognitoUserPoolAuthorizer # noqa
from chalice.app import RouteEntry # noqa
from chalice.app import Request # noqa
from chalice.app import AuthResponse # noqa
from chalice.app import BuiltinAuthConfig # noqa
from chalice.config import Config # noqa
from chalice.compat import urlparse, parse_qs
MatchResult = namedtuple('MatchResult', ['route', 'captured', 'query_params'])
EventType = Dict[str, Any]
ContextType = Dict[str, Any]
HeaderType = Dict[str, Any]
ResponseType = Dict[str, Any]
HandlerCls = Callable[..., 'ChaliceRequestHandler']
ServerCls = Callable[..., 'HTTPServer']
class Clock(object):
def time(self):
# type: () -> float
return time.time()
def create_local_server(app_obj, config, host, port):
# type: (Chalice, Config, str, int) -> LocalDevServer
app_obj.__class__ = LocalChalice
return LocalDevServer(app_obj, config, host, port)
class LocalARNBuilder(object):
ARN_FORMAT = ('arn:aws:execute-api:{region}:{account_id}'
':{api_id}/{stage}/{method}/{resource_path}')
LOCAL_REGION = 'mars-west-1'
LOCAL_ACCOUNT_ID = '123456789012'
LOCAL_API_ID = 'ymy8tbxw7b'
LOCAL_STAGE = 'api'
def build_arn(self, method, path):
# type: (str, str) -> str
# In API Gateway the method and URI are separated by a / so typically
# the uri portion omits the leading /. In the case where the entire
# url is just '/' API Gateway adds a / to the end so that the arn end
# with a '//'.
if path != '/':
path = path[1:]
return self.ARN_FORMAT.format(
region=self.LOCAL_REGION,
account_id=self.LOCAL_ACCOUNT_ID,
api_id=self.LOCAL_API_ID,
stage=self.LOCAL_STAGE,
method=method,
resource_path=path
)
class ARNMatcher(object):
def __init__(self, target_arn):
# type: (str) -> None
self._arn = target_arn
def _resource_match(self, resource):
# type: (str) -> bool
# Arn matching supports two special case characetrs that are not
# escapable. * represents a glob which translates to a non-greedy
# match of any number of characters. ? which is any single character.
# These are easy to translate to a regex using .*? and . respectivly.
escaped_resource = re.escape(resource)
resource_regex = escaped_resource.replace(r'\?', '.').replace(
r'\*', '.*?')
resource_regex = '^%s$' % resource_regex
return re.match(resource_regex, self._arn) is not None
def does_any_resource_match(self, resources):
# type: (List[str]) -> bool
for resource in resources:
if self._resource_match(resource):
return True
return False
class RouteMatcher(object):
def __init__(self, route_urls):
# type: (List[str]) -> None
# Sorting the route_urls ensures we always check
# the concrete routes for a prefix before the
# variable/capture parts of the route, e.g
# '/foo/bar' before '/foo/{capture}'
self.route_urls = sorted(route_urls)
def match_route(self, url):
# type: (str) -> MatchResult
"""Match the url against known routes.
This method takes a concrete route "/foo/bar", and
matches it against a set of routes. These routes can
use param substitution corresponding to API gateway patterns.
For example::
match_route('/foo/bar') -> '/foo/{name}'
"""
# Otherwise we need to check for param substitution
parsed_url = urlparse(url)
query_params = parse_qs(parsed_url.query, keep_blank_values=True)
path = parsed_url.path
# API Gateway removes the trailing slash if the route is not the root
# path. We do the same here so our route matching works the same way.
if path != '/' and path.endswith('/'):
path = path[:-1]
parts = path.split('/')
captured = {}
for route_url in self.route_urls:
url_parts = route_url.split('/')
if len(parts) == len(url_parts):
for i, j in zip(parts, url_parts):
if j.startswith('{') and j.endswith('}'):
captured[j[1:-1]] = i
continue
if i != j:
break
else:
return MatchResult(route_url, captured, query_params)
raise ValueError("No matching route found for: %s" % url)
class LambdaEventConverter(object):
LOCAL_SOURCE_IP = '127.0.0.1'
"""Convert an HTTP request to an event dict used by lambda."""
def __init__(self, route_matcher, binary_types=None):
# type: (RouteMatcher, List[str]) -> None
self._route_matcher = route_matcher
if binary_types is None:
binary_types = []
self._binary_types = binary_types
def _is_binary(self, headers):
# type: (Dict[str,Any]) -> bool
return headers.get('content-type', '') in self._binary_types
def create_lambda_event(self, method, path, headers, body=None):
# type: (str, str, Dict[str, str], str) -> EventType
view_route = self._route_matcher.match_route(path)
event = {
'requestContext': {
'httpMethod': method,
'resourcePath': view_route.route,
'identity': {
'sourceIp': self.LOCAL_SOURCE_IP
},
'path': path.split('?')[0],
},
'headers': {k.lower(): v for k, v in headers.items()},
'pathParameters': view_route.captured,
'stageVariables': {},
}
if view_route.query_params:
event['multiValueQueryStringParameters'] = view_route.query_params
else:
# If no query parameters are provided, API gateway maps
# this to None so we're doing this for parity.
event['multiValueQueryStringParameters'] = None
if self._is_binary(headers) and body is not None:
event['body'] = base64.b64encode(body).decode('ascii')
event['isBase64Encoded'] = True
else:
event['body'] = body
return event
class LocalGatewayException(Exception):
CODE = 0
def __init__(self, headers, body=None):
# type: (HeaderType, Optional[bytes]) -> None
self.headers = headers
self.body = body
class InvalidAuthorizerError(LocalGatewayException):
CODE = 500
class ForbiddenError(LocalGatewayException):
CODE = 403
class NotAuthorizedError(LocalGatewayException):
CODE = 401
class LambdaContext(object):
def __init__(self, function_name, memory_size,
max_runtime_ms=3000, time_source=None):
# type: (str, int, int, Optional[Clock]) -> None
if time_source is None:
time_source = Clock()
self._time_source = time_source
self._start_time = self._current_time_millis()
self._max_runtime = max_runtime_ms
# Below are properties that are found on the real LambdaContext passed
# by lambda and their associated documentation.
# Name of the Lambda function that is executing.
self.function_name = function_name
# The Lambda function version that is executing. If an alias is used
# to invoke the function, then function_version will be the version
# the alias points to.
# Chalice local obviously does not support versioning so it will always
# be set to $LATEST.
self.function_version = '$LATEST'
# The ARN used to invoke this function. It can be function ARN or
# alias ARN. An unqualified ARN executes the $LATEST version and
# aliases execute the function version it is pointing to.
self.invoked_function_arn = ''
# Memory limit, in MB, you configured for the Lambda function. You set
# the memory limit at the time you create a Lambda function and you
# can change it later.
self.memory_limit_in_mb = memory_size
# AWS request ID associated with the request. This is the ID returned
# to the client that called the invoke method.
self.aws_request_id = str(uuid.uuid4())
# The name of the CloudWatch log group where you can find logs written
# by your Lambda function.
self.log_group_name = ''
# The name of the CloudWatch log stream where you can find logs
# written by your Lambda function. The log stream may or may not
# change for each invocation of the Lambda function.
#
# The value is null if your Lambda function is unable to create a log
# stream, which can happen if the execution role that grants necessary
# permissions to the Lambda function does not include permissions for
# the CloudWatch Logs actions.
self.log_stream_name = ''
# The last two attributes have the following comment in the
# documentation:
# Information about the client application and device when invoked
# through the AWS Mobile SDK, it can be null.
# Chalice local doens't need to set these since they are specifically
# for the mobile SDK.
self.identity = None
self.client_context = None
def _current_time_millis(self):
# type: () -> float
return self._time_source.time() * 1000
def get_remaining_time_in_millis(self):
# type: () -> float
runtime = self._current_time_millis() - self._start_time
return self._max_runtime - runtime
LocalAuthPair = Tuple[EventType, LambdaContext]
class LocalGatewayAuthorizer(object):
"""A class for running user defined authorizers in local mode."""
def __init__(self, app_object):
# type: (Chalice) -> None
self._app_object = app_object
self._arn_builder = LocalARNBuilder()
def authorize(self, raw_path, lambda_event, lambda_context):
# type: (str, EventType, LambdaContext) -> LocalAuthPair
method = lambda_event['requestContext']['httpMethod']
route_entry = self._route_for_event(lambda_event)
if not route_entry:
return lambda_event, lambda_context
authorizer = route_entry.authorizer
if not authorizer:
return lambda_event, lambda_context
# If authorizer is Cognito then try to parse the JWT and simulate an
# APIGateway validated request
if isinstance(authorizer, CognitoUserPoolAuthorizer):
if "headers" in lambda_event\
and "authorization" in lambda_event["headers"]:
token = lambda_event["headers"]["authorization"]
claims = self._decode_jwt_payload(token)
try:
cognito_username = claims["cognito:username"]
except KeyError:
# If a key error is raised when trying to get the cognito
# username then it is a machine-to-machine communication.
# This kind of cognito authorization flow is not
# supported in local mode. We can ignore it here to allow
# users to test their code local with a different cognito
# authorization flow.
warnings.warn(
'%s for machine-to-machine communicaiton is not '
'supported in local mode. All requests made against '
'a route will be authorized to allow local testing.'
% authorizer.__class__.__name__
)
return lambda_event, lambda_context
auth_result = {"context": {"claims": claims},
"principalId": cognito_username}
lambda_event = self._update_lambda_event(lambda_event,
auth_result)
if not isinstance(authorizer, ChaliceAuthorizer):
# Currently the only supported local authorizer is the
# BuiltinAuthConfig type. Anything else we will err on the side of
# allowing local testing by simply admiting the request. Otherwise
# there is no way for users to test their code in local mode.
warnings.warn(
'%s is not a supported in local mode. All requests made '
'against a route will be authorized to allow local testing.'
% authorizer.__class__.__name__
)
return lambda_event, lambda_context
arn = self._arn_builder.build_arn(method, raw_path)
auth_event = self._prepare_authorizer_event(arn, lambda_event,
lambda_context)
auth_result = authorizer(auth_event, lambda_context)
if auth_result is None:
raise InvalidAuthorizerError(
{'x-amzn-RequestId': lambda_context.aws_request_id,
'x-amzn-ErrorType': 'AuthorizerConfigurationException'},
b'{"message":null}'
)
authed = self._check_can_invoke_view_function(arn, auth_result)
if authed:
lambda_event = self._update_lambda_event(lambda_event, auth_result)
else:
raise ForbiddenError(
{'x-amzn-RequestId': lambda_context.aws_request_id,
'x-amzn-ErrorType': 'AccessDeniedException'},
(b'{"Message": '
b'"User is not authorized to access this resource"}'))
return lambda_event, lambda_context
def _check_can_invoke_view_function(self, arn, auth_result):
# type: (str, ResponseType) -> bool
policy = auth_result.get('policyDocument', {})
statements = policy.get('Statement', [])
allow_resource_statements = []
for statement in statements:
if statement.get('Effect') == 'Allow' and \
statement.get('Action') == 'execute-api:Invoke':
for resource in statement.get('Resource'):
allow_resource_statements.append(resource)
arn_matcher = ARNMatcher(arn)
return arn_matcher.does_any_resource_match(allow_resource_statements)
def _route_for_event(self, lambda_event):
# type: (EventType) -> Optional[RouteEntry]
# Authorizer had to be made into an Any type since mypy couldn't
# detect that app.ChaliceAuthorizer was callable.
resource_path = lambda_event.get(
'requestContext', {}).get('resourcePath')
http_method = lambda_event['requestContext']['httpMethod']
try:
route_entry = self._app_object.routes[resource_path][http_method]
except KeyError:
# If a key error is raised when trying to get the route entry
# then this route does not support this method. A method error
# will be raised by the chalice handler method. We can ignore it
# here by returning no authorizer to avoid duplicating the logic.
return None
return route_entry
def _update_lambda_event(self, lambda_event, auth_result):
# type: (EventType, ResponseType) -> EventType
auth_context = auth_result['context']
auth_context.update({
'principalId': auth_result['principalId']
})
lambda_event['requestContext']['authorizer'] = auth_context
return lambda_event
def _prepare_authorizer_event(self, arn, lambda_event, lambda_context):
# type: (str, EventType, LambdaContext) -> EventType
"""Translate event for an authorizer input."""
authorizer_event = lambda_event.copy()
authorizer_event['type'] = 'TOKEN'
try:
authorizer_event['authorizationToken'] = authorizer_event.get(
'headers', {})['authorization']
except KeyError:
raise NotAuthorizedError(
{'x-amzn-RequestId': lambda_context.aws_request_id,
'x-amzn-ErrorType': 'UnauthorizedException'},
b'{"message":"Unauthorized"}')
authorizer_event['methodArn'] = arn
return authorizer_event
def _decode_jwt_payload(self, jwt):
# type: (str) -> Dict
payload_segment = jwt.split(".", 2)[1]
payload = base64.urlsafe_b64decode(self._base64_pad(payload_segment))
return json.loads(payload)
def _base64_pad(self, value):
# type: (str) -> str
rem = len(value) % 4
if rem > 0:
value += "=" * (4 - rem)
return value
class LocalGateway(object):
"""A class for faking the behavior of API Gateway."""
def __init__(self, app_object, config):
# type: (Chalice, Config) -> None
self._app_object = app_object
self._config = config
self.event_converter = LambdaEventConverter(
RouteMatcher(list(app_object.routes)),
self._app_object.api.binary_types
)
self._authorizer = LocalGatewayAuthorizer(app_object)
def _generate_lambda_context(self):
# type: () -> LambdaContext
if self._config.lambda_timeout is None:
timeout = None
else:
timeout = self._config.lambda_timeout * 1000
return LambdaContext(
function_name=self._config.function_name,
memory_size=self._config.lambda_memory_size,
max_runtime_ms=timeout
)
def _generate_lambda_event(self, method, path, headers, body):
# type: (str, str, HeaderType, Optional[str]) -> EventType
lambda_event = self.event_converter.create_lambda_event(
method=method, path=path, headers=headers,
body=body,
)
return lambda_event
def _has_user_defined_options_method(self, lambda_event):
# type: (EventType) -> bool
route_key = lambda_event['requestContext']['resourcePath']
return 'OPTIONS' in self._app_object.routes[route_key]
def handle_request(self, method, path, headers, body):
# type: (str, str, HeaderType, Optional[str]) -> ResponseType
lambda_context = self._generate_lambda_context()
try:
lambda_event = self._generate_lambda_event(
method, path, headers, body)
except ValueError:
# API Gateway will return a different error on route not found
# depending on whether or not we have an authorization token in our
# request. Since we do not do that check until we actually find
# the authorizer that we will call we do not have that information
# available at this point. Instead we just check to see if that
# header is present and change our response if it is. This will
# need to be refactored later if we decide to more closely mirror
# how API Gateway does their auth and routing.
error_headers = {'x-amzn-RequestId': lambda_context.aws_request_id,
'x-amzn-ErrorType': 'UnauthorizedException'}
auth_header = headers.get('authorization')
if auth_header is None:
auth_header = headers.get('Authorization')
if auth_header is not None:
raise ForbiddenError(
error_headers,
(b'{"message": "Authorization header requires '
b'\'Credential\''
b' parameter. Authorization header requires \'Signature\''
b' parameter. Authorization header requires '
b'\'SignedHeaders\' parameter. Authorization header '
b'requires existence of either a \'X-Amz-Date\' or a'
b' \'Date\' header. Authorization=%s"}'
% auth_header.encode('ascii')))
raise ForbiddenError(
error_headers,
b'{"message": "Missing Authentication Token"}')
# This can either be because the user's provided an OPTIONS method
# *or* this is a preflight request, which chalice automatically
# responds to without invoking a user defined route.
if method == 'OPTIONS' and \
not self._has_user_defined_options_method(lambda_event):
# No options route was defined for this path. API Gateway should
# automatically generate our CORS headers.
options_headers = self._autogen_options_headers(lambda_event)
return {
'statusCode': 200,
'headers': options_headers,
'multiValueHeaders': {},
'body': None
}
# The authorizer call will be a noop if there is no authorizer method
# defined for route. Otherwise it will raise a ForbiddenError
# which will be caught by the handler that called this and a 403 or
# 401 will be sent back over the wire.
lambda_event, lambda_context = self._authorizer.authorize(
path, lambda_event, lambda_context)
response = self._app_object(lambda_event, lambda_context)
response = self._handle_binary(response)
return response
def _autogen_options_headers(self, lambda_event):
# type:(EventType) -> HeaderType
route_key = lambda_event['requestContext']['resourcePath']
route_dict = self._app_object.routes[route_key]
route_methods = list(route_dict.keys())
# Chalice ensures that routes with multiple views have the same
# CORS configuration, so if any view has a CORS Config we can use
# that config since they will all be the same.
cors_config = route_dict[route_methods[0]].cors
cors_headers = cors_config.get_access_control_headers()
# We need to add OPTIONS since it is not a part of the CORSConfig
# object. APIGateway handles this entirely based on the API definition.
# So our local version needs to add this manually to our set of allowed
# headers.
route_methods.append('OPTIONS')
# The Access-Control-Allow-Methods header is not added by the
# CORSConfig object it is added to the API Gateway route during
# deployment, so we need to manually add those headers here.
cors_headers.update({
'Access-Control-Allow-Methods': '%s' % ','.join(route_methods)
})
return cors_headers
def _handle_binary(self, response):
# type: (Dict[str,Any]) -> Dict[str,Any]
if response.get('isBase64Encoded'):
body = base64.b64decode(response['body'])
response['body'] = body
return response
class ChaliceRequestHandler(BaseHTTPRequestHandler):
"""A class for mapping raw HTTP events to and from LocalGateway."""
protocol_version = 'HTTP/1.1'
def __init__(self, request, client_address, server, app_object, config):
# type: (bytes, Tuple[str, int], HTTPServer, Chalice, Config) -> None
self.local_gateway = LocalGateway(app_object, config)
BaseHTTPRequestHandler.__init__(
self, request, client_address, server) # type: ignore
def _parse_payload(self):
# type: () -> Tuple[HeaderType, Optional[str]]
body = None
content_length = int(self.headers.get('content-length', '0'))
if content_length > 0:
body = self.rfile.read(content_length)
converted_headers = dict(self.headers)
return converted_headers, body
def _generic_handle(self):
# type: () -> None
headers, body = self._parse_payload()
try:
response = self.local_gateway.handle_request(
method=self.command,
path=self.path,
headers=headers,
body=body
)
status_code = response['statusCode']
headers = response['headers'].copy()
headers.update(response['multiValueHeaders'])
body = response['body']
self._send_http_response(status_code, headers, body)
except LocalGatewayException as e:
self._send_error_response(e)
def _send_error_response(self, error):
# type: (LocalGatewayException) -> None
code = error.CODE
headers = error.headers
body = error.body
self._send_http_response(code, headers, body)
def _send_http_response(self, code, headers, body):
# type: (int, HeaderType, Optional[Union[str,bytes]]) -> None
if body is None:
self._send_http_response_no_body(code, headers)
else:
self._send_http_response_with_body(code, headers, body)
def _send_http_response_with_body(self, code, headers, body):
# type: (int, HeaderType, Union[str,bytes]) -> None
self.send_response(code)
if not isinstance(body, bytes):
body = body.encode('utf-8')
self.send_header('Content-Length', str(len(body)))
content_type = headers.pop(
'Content-Type', 'application/json')
self.send_header('Content-Type', content_type)
self._send_headers(headers)
self.wfile.write(body)
do_GET = do_PUT = do_POST = do_HEAD = do_DELETE = do_PATCH = do_OPTIONS = \
_generic_handle
def _send_http_response_no_body(self, code, headers):
# type: (int, HeaderType) -> None
headers['Content-Length'] = '0'
self.send_response(code)
self._send_headers(headers)
def _send_headers(self, headers):
# type: (HeaderType) -> None
for header_name, header_value in headers.items():
if isinstance(header_value, list):
for value in header_value:
self.send_header(header_name, value)
else:
self.send_header(header_name, header_value)
self.end_headers()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Threading mixin to better support browsers.
When a browser sends a GET request to Chalice it keeps the connection open
for reuse. In the single threaded model this causes Chalice local to become
unresponsive to all clients other than that browser socket. Even sending a
header requesting that the client close the connection is not good enough,
the browswer will simply open another one and sit on it.
"""
daemon_threads = True
class LocalDevServer(object):
def __init__(self, app_object, config, host, port,
handler_cls=ChaliceRequestHandler,
server_cls=ThreadedHTTPServer):
# type: (Chalice, Config, str, int, HandlerCls, ServerCls) -> None
self.app_object = app_object
self.host = host
self.port = port
self._wrapped_handler = functools.partial(
handler_cls, app_object=app_object, config=config)
self.server = server_cls((host, port), self._wrapped_handler)
def handle_single_request(self):
# type: () -> None
self.server.handle_request()
def serve_forever(self):
# type: () -> None
print("Serving on http://%s:%s" % (self.host, self.port))
self.server.serve_forever()
def shutdown(self):
# type: () -> None
# This must be called from another thread of else it
# will deadlock.
self.server.shutdown()
class HTTPServerThread(threading.Thread):
"""Thread that manages starting/stopping local HTTP server.
This is a small wrapper around a normal threading.Thread except
that it adds shutdown capability of the HTTP server, which is
not part of the normal threading.Thread interface.
"""
def __init__(self, server_factory):
# type: (Callable[[], LocalDevServer]) -> None
threading.Thread.__init__(self)
self._server_factory = server_factory
self._server = None # type: Optional[LocalDevServer]
self.daemon = True
def run(self):
# type: () -> None
self._server = self._server_factory()
self._server.serve_forever()
def shutdown(self):
# type: () -> None
if self._server is not None:
self._server.shutdown()
class LocalChalice(Chalice):
_THREAD_LOCAL = threading.local()
# This is a known mypy bug where you can't override instance
# variables with properties. So this should be type safe, which
# is why we're adding the type: ignore comments here.
# See: https://github.com/python/mypy/issues/4125
@property # type: ignore
def current_request(self): # type: ignore
# type: () -> Request
return self._THREAD_LOCAL.current_request
@current_request.setter
def current_request(self, value): # type: ignore
# type: (Request) -> None
self._THREAD_LOCAL.current_request = value
|
awslabs/chalice
|
chalice/local.py
|
Python
|
apache-2.0
| 29,826 | 0 |
###########################################
# #
# cp_hand.py #
# author: irza pulungan #
# #
# this py will forward incoming MQTT #
# message to Serial USB port arduino #
# loaded with custom sketch #
# #
###########################################
import serial
import paho.mqtt.client as mqtt
ser = serial.Serial("/dev/ttyACM0",9600)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("$SYS/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
if (msg.topic == "servo"):
print str(msg.payload)
ser.write(str(msg.payload)+"\n\r")
ser.flushInput()
ser.flushOutput()
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.0.100", 1883, 60)
client.subscribe("servo",0)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
client.loop_forever()
|
irzaip/chippy
|
cp_hand.py
|
Python
|
lgpl-3.0
| 1,479 | 0.006085 |
#!/usr/bin/python
# Copyright 2013 Gandi SAS
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gandi_iface
version_added: "2.0"
short_description: create, attach, detach or delete Gandi network interfaces
description:
- Manage Gandi network interfaces
options:
state:
description:
- desired state of the resource
required: false
default: "created"
choices: ["created", "deleted"]
aliases: []
datacenter:
description:
- datacenter location for servers
required: true
choices: ["Saint Denis", "Bissen", "Baltimore"]
bandwith:
description:
- bandwith ot the interface in bits/s (float)
required: false
vlan:
description:
- private vlan name the interface belongs to (str)
required: false
default: null
ip_address:
description:
- CIDR IPv4|IPv6 address ot the interface on the vlan (str)
required: false
default: null
ip_version:
description:
- ip version of the interface (str)
required: false
default: null
requirements: [ "libcloud" ]
author: Eric Garrigues <eric@gandi.net>
'''
EXAMPLES = '''
# Basic provisioning example. Create a new iface on vlan mypvlan
# Luxembourg datacenter
- gandi_iface:
vlan: mypvlan
datacenter: "Bissen"
ip_address: 192.168.0.1
ip_version: 4
bandwidth: 50000.0
'''
import sys
USER_AGENT_PRODUCT = "Ansible-gandi"
USER_AGENT_VERSION = "v0.1"
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.gandi import GandiException
_ = Provider.GANDI
except ImportError:
print("failed=True " +
"msg='libcloud with Gandi support required for this module'")
sys.exit(1)
# Load in the libcloud secrets file
try:
import secrets
except ImportError:
secrets = None
ARGS = getattr(secrets, 'GANDI_PARAMS', ())
if not ARGS:
print("failed=True " +
"msg='Missing Gandi connection in libcloud secrets file.'")
sys.exit(1)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
# XXX : better error management
return error
def _get_by_name(name, entities):
find = [x for x in entities if x.name == name]
return find[0] if find else None
def _get_by_id(id, entities):
find = [x for x in entities if x.id == id]
return find[0] if find else None
def get_datacenter(driver, name):
"""Get datacenter by name
"""
dcs = driver.list_locations()
return _get_by_name(name, dcs)
def get_pvlan(driver, name):
pvlans = driver.ex_list_pvlans()
return _get_by_name(name, pvlans)
def get_iface(driver, id):
ifaces = driver.ex_list_ifaces()
return _get_by_id(id, ifaces)
def get_iface_info(iface):
"""Retrieves interface information from an interace object and returns it
as a dictionary.
"""
return({
'vlan': not iface.vlan is None and iface.vlan.name or None,
'bandwidth': iface.extra.get('bandwidth'),
'datacenter_id': iface.extra.get('datacenter_id')
})
def create_iface(module, driver):
"""Creates a new pvlan.
module : AnsibleModule object
driver: authenticated libcloud driver on Gandi provider
Returns:
A Dictionary with information about the vlan that was created.
"""
iface = {}
ip_address = module.params.get('ip_address')
ip_version = module.params.get('ip_version')
pvlan_name = module.params.get('vlan')
bandwidth = module.params.get('bandwidth')
datacenter = module.params.get('datacenter')
changed = False
lc_location = get_datacenter(driver, datacenter)
if not lc_location:
module.fail_json(msg='Invalid datacenter %s' % datacenter,
changed=False)
pvlan = get_pvlan(driver, pvlan_name)
# module.fail_json(msg=pvlan, changed=False)
if not pvlan and not ip_version:
module.fail_json(msg='ip_version is mandatory when not a vlan',
changed=False)
try:
iface = driver.ex_create_iface(location=lc_location,
ip_version=ip_version,
ip_address=ip_address,
vlan=pvlan,
bandwitdh=bandwidth)
changed = True
except GandiException as e:
module.fail_json(msg='Unexpected error attempting to create iface')
iface_json_data = get_iface_info(iface)
return (changed, iface_json_data)
def delete_iface(module, driver, iface_id):
"""Delete an interface.
module: Ansible module object
driver: authenticated Gandi connection object
iface_id: int id of the interface
Returns a dictionary of with operation status.
"""
changed = False
pvlan = None
try:
iface = get_iface(driver, iface_id)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if iface:
driver.ex_delete_iface(iface)
changed = True
return (changed, iface_id)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['created', 'deleted'],
default='created'),
datacenter=dict(default='Bissen'),
ip_version=dict(),
ip_address=dict(),
vlan=dict(),
bandwidth=dict()
)
)
ip_version = module.params.get('ip_version')
ip_address = module.params.get('ip_address')
vlan_name = module.params.get('vlan')
bandwidth = module.params.get('bandwidth')
state = module.params.get('state')
dc = module.params.get('datacenter')
changed = False
try:
gandi = get_driver(Provider.GANDI)(*ARGS)
gandi.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if not dc and state in ['created']:
module.fail_json(msg='Must specify a "datacenter"', changed=False)
json_output = {'datacenter': dc}
if state in ['deleted']:
json_output['state'] = 'deleted'
(changed, iface_id) = delete_iface(module, gandi, iface_id)
json_output['iface_id'] = iface_id
elif state in ['created']:
json_output['state'] = 'created'
(changed, iface_data) = create_iface(module, gandi)
json_output['iface_data'] = iface_data
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
from ansible.module_utils.basic import *
main()
|
ericgarrigues/ansible-modules-extras
|
cloud/gandi/gandi_iface.py
|
Python
|
gpl-3.0
| 7,326 | 0.000546 |
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.rarithmetic import intmask
from hippy import consts
from hippy.error import PHPException
from hippy.builtin import wrap_method, ThisUnwrapper, StringArg
from hippy.builtin_klass import GetterSetterWrapper, k_Exception
from hippy.klass import def_class
from hippy.module.date import timelib
from hippy.objects.instanceobject import W_InstanceObject
class W_DateInterval(W_InstanceObject):
pass
@wrap_method(['interp', ThisUnwrapper(W_DateInterval), StringArg(None)],
name='DateInterval::__construct')
def construct(interp, this, spec):
exc_obj = k_Exception.call_args(
interp, [interp.space.wrap('Unknown or bad format (%s)' % spec)]
)
if not (len(spec) > 1 and spec[0] == 'P'):
raise PHPException(exc_obj)
index = 1
time = False
formats = {'y': 0, 'm': 0, 'd':0, 'h':0, 'i':0 ,'s': 0}
while index < len(spec):
format = None
times = 0
if spec[index] == 'T':
index += 1
time = True
while spec[index].isdigit():
times = times * 10
times = times + (ord(spec[index]) - ord('0'))
index += 1
if times:
if spec[index] == 'Y':
format = 'y'
elif spec[index] == 'M' and not time:
format = 'm'
elif spec[index] == 'D':
format = 'd'
elif spec[index] == 'W':
format = 'd'
times *= 7
elif spec[index] == 'H':
format = 'h'
elif spec[index] == 'M' and time:
format = 'i'
elif spec[index] == 'S':
format = 's'
if not formats[format]:
formats[format] = times
else:
raise PHPException(exc_obj)
index += 1
this.time_diff = timelib.timelib_rel_time_ctor()
this.time_diff.c_y = rffi.cast(lltype.Signed, formats['y'])
this.time_diff.c_m = rffi.cast(lltype.Signed, formats['m'])
this.time_diff.c_d = rffi.cast(lltype.Signed, formats['d'])
this.time_diff.c_h = rffi.cast(lltype.Signed, formats['h'])
this.time_diff.c_i = rffi.cast(lltype.Signed, formats['i'])
this.time_diff.c_s = rffi.cast(lltype.Signed, formats['s'])
@wrap_method(['interp', StringArg(None)],
name='DateInterval::createFromDateString', flags=consts.ACC_STATIC)
def create_from_date_string(interp, string):
spec = "P%sY%sM%sDT%sH%sM%sS" % timelib.str_interval_to_time(string)
return k_DateInterval.call_args(interp, [interp.space.wrap(spec)])
@wrap_method(['interp', ThisUnwrapper(W_DateInterval), StringArg(None)], name='DateInterval::format')
def format(interp, this, format):
y = this.time_diff.c_y
m = this.time_diff.c_m
d = this.time_diff.c_d
h = this.time_diff.c_h
i = this.time_diff.c_i
s = this.time_diff.c_s
index = 0
results = []
while index < len(format):
c = format[index]
if c == '%':
index += 1
next_c = format[index]
if next_c == 'Y':
results.append(timelib.format_to(2, y))
elif next_c == 'y':
results.append("%d" % y)
elif next_c == 'M':
results.append(timelib.format_to(2, m))
elif next_c == 'm':
results.append("%d" % m)
elif next_c == 'D':
results.append(timelib.format_to(2, d))
elif next_c == 'd':
results.append("%d" % d)
elif next_c == 'H':
results.append(timelib.format_to(2, h))
elif next_c == 'h':
results.append("%d" % h)
elif next_c == 'I':
results.append(timelib.format_to(2, i))
elif next_c == 'i':
results.append("%d" % i)
elif next_c == 'S':
results.append(timelib.format_to(2, s))
elif next_c == 's':
results.append("%d" % s)
elif next_c == 'a':
if this.time_diff.c_d != -99999:
results.append("%d" % this.time_diff.c_days)
else:
results.append("(unknown)")
elif next_c == 'r':
results.append("-" if int(this.time_diff.c_invert) else "")
elif next_c == 'R':
results.append("-" if int(this.time_diff.c_invert) else "+")
elif next_c == '%':
results.append('%')
else:
results.append("%%%s" % next_c)
else:
results.append(c)
index += 1
return interp.space.wrap("".join(results))
def get_y(interp, this):
return interp.space.wrap(this.time_diff.c_y)
def set_y(interp, this, w_newvalue):
this.time_diff.c_y = interp.space.int_w(w_newvalue)
def get_m(interp, this):
return interp.space.wrap(this.time_diff.c_m)
def set_m(interp, this, w_newvalue):
this.time_diff.c_m = interp.space.int_w(w_newvalue)
def get_d(interp, this):
return interp.space.wrap(this.time_diff.c_d)
def set_d(interp, this, w_newvalue):
this.time_diff.c_d = interp.space.int_w(w_newvalue)
def get_h(interp, this):
return interp.space.wrap(this.time_diff.c_h)
def set_h(interp, this, w_newvalue):
this.time_diff.c_h = interp.space.int_w(w_newvalue)
def get_i(interp, this):
return interp.space.wrap(this.time_diff.c_i)
def set_i(interp, this, w_newvalue):
this.time_diff.c_i = interp.space.int_w(w_newvalue)
def get_s(interp, this):
return interp.space.wrap(this.time_diff.c_s)
def set_s(interp, this, w_newvalue):
this.time_diff.c_s = interp.space.int_w(w_newvalue)
def get_invert(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_invert))
def set_invert(interp, this, w_newvalue):
this.time_diff.c_invert = rffi.cast(rffi.INT, interp.space.int_w(w_newvalue))
def get_days(interp, this):
return interp.space.wrap(this.time_diff.c_days or False)
def set_days(interp, this, w_newvalue):
this.time_diff.c_days = interp.space.int_w(w_newvalue)
def get_weekday(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_weekday))
def set_weekday(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_weekday = rffi.cast(rffi.INT, value)
def get_weekday_behavior(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_weekday_behavior))
def set_weekday_behavior(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_weekday_behavior = rffi.cast(rffi.INT, value)
def get_first_last_day_of(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_first_last_day_of))
def set_first_last_day_of(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_first_last_day_of = rffi.cast(rffi.INT, value)
def get_special_type(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_special.c_type))
def set_special_type(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_special.c_type = rffi.cast(rffi.UINT, value)
def get_special_amount(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_special.c_amount))
def set_special_amount(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_special.c_amount = rffi.cast(lltype.Signed, value)
def get_have_weekday_relative(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_have_weekday_relative))
def set_have_weekday_relative(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_have_weekday_relative = rffi.cast(rffi.UINT, value)
def get_have_special_relative(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_have_special_relative))
def set_have_special_relative(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_have_special_relative = rffi.cast(rffi.UINT, value)
k_DateInterval = def_class(
'DateInterval',
[construct,
create_from_date_string,
format],
[GetterSetterWrapper(get_y, set_y,
"y", consts.ACC_PUBLIC),
GetterSetterWrapper(get_m, set_m,
"m", consts.ACC_PUBLIC),
GetterSetterWrapper(get_d, set_d,
"d", consts.ACC_PUBLIC),
GetterSetterWrapper(get_h, set_h,
"h", consts.ACC_PUBLIC),
GetterSetterWrapper(get_i, set_i,
"i", consts.ACC_PUBLIC),
GetterSetterWrapper(get_s, set_s,
"s", consts.ACC_PUBLIC),
GetterSetterWrapper(get_weekday, set_weekday,
"weekday", consts.ACC_PUBLIC),
GetterSetterWrapper(get_weekday_behavior, set_weekday_behavior,
"weekday_behavior", consts.ACC_PUBLIC),
GetterSetterWrapper(get_first_last_day_of, set_first_last_day_of,
"first_last_day_of", consts.ACC_PUBLIC),
GetterSetterWrapper(get_invert, set_invert,
"invert", consts.ACC_PUBLIC),
GetterSetterWrapper(get_days, set_days,
"days", consts.ACC_PUBLIC),
GetterSetterWrapper(get_special_type, set_special_type,
"special_type", consts.ACC_PUBLIC),
GetterSetterWrapper(get_special_amount, set_special_amount,
"special_amount", consts.ACC_PUBLIC),
GetterSetterWrapper(get_have_weekday_relative, set_have_weekday_relative,
"have_weekday_relative", consts.ACC_PUBLIC),
GetterSetterWrapper(get_have_special_relative, set_have_special_relative,
"have_special_relative", consts.ACC_PUBLIC)],
instance_class=W_DateInterval
)
|
ericpp/hippyvm
|
hippy/module/date/dateinterval_klass.py
|
Python
|
mit
| 9,931 | 0.003927 |
# -*- coding: utf-8 -*-
r'''This module contains a base class for modeling computation graphs.'''
import downhill
import gzip
import hashlib
import numpy as np
import pickle
import theano
import time
import warnings
from . import layers
from . import losses
from . import regularizers
from . import trainer
from . import util
class Network(object):
'''The network class encapsulates a network computation graph.
Notes
-----
Computation graphs are organized into :ref:`layers <layers>`. Each layer
receives one or more arrays of input data, transforms them, and generates
one or more arrays of output data.
Outputs in a computation graph are named according to their layer and output
type, so the 'pre' output of a layer named 'hid1' would be named 'hid1:pre'.
The 'out' output is the default output for a layer. By default the last
layer in a network is named 'out'.
The parameters in a network graph are optimized by minimizing a :ref:`loss
function <losses>` with respect to some set of training data. Typically the
value produced by 'out:out' is compared to some target value, creating an
error value of some sort. This error value is then propagated back through
the computation graph to update the parameters in the model.
Parameters
----------
layers : int, tuple, dict, or :class:`Layer <theanets.layers.base.Layer>`
A sequence of values specifying the layer configuration for the network.
For more information, please see :ref:`guide-creating-specifying-layers`.
loss : str or :class:`Loss <theanets.losses.Loss>`
The name of a loss function to optimize when training this network
model.
weighted : bool, optional
If True, optimize this model using a "weighted" loss. Weighted losses
typically require an additional array as input during optimization.
For more information, see :ref:`losses-weighted`. Defaults to False.
rng : int or RandomState, optional
A seed or numpy ``RandomState`` instance for generating randomness in
the model. Defaults to 13.
Attributes
----------
layers : list of :class:`Layer <theanets.layers.base.Layer>`
A list of the layers in this network model.
losses : list of :class:`Loss <theanets.losses.Loss>`
A list of losses to be computed when optimizing this network model.
'''
DEFAULT_OUTPUT_ACTIVATION = 'linear'
'''Default activation for the output layer.'''
INPUT_NDIM = 2
'''Number of dimensions for holding input data arrays.'''
OUTPUT_NDIM = 2
'''Number of dimensions for holding output data arrays.'''
def __init__(self, layers=(), loss='mse', weighted=False, rng=13):
self._graphs = {} # cache of symbolic computation graphs
self._functions = {} # cache of callable feedforward functions
self._rng = rng
# create layers based on specs provided in the constructor.
self.layers = []
for i, layer in enumerate(layers):
first = i == 0
last = i == len(layers) - 1
name = 'in' if first else 'out' if last else 'hid{}'.format(i)
activation = self.DEFAULT_OUTPUT_ACTIVATION if last else 'relu'
self.add_layer(layer=layer, name=name, activation=activation)
# bind layers to this graph after construction. this finalizes layer
# shapes and does other consistency checks based on the entire graph.
[l.bind(self) for l in self.layers]
# create a default loss (usually).
self.losses = []
if loss and self.layers:
self.set_loss(loss,
weighted=weighted,
target=self.OUTPUT_NDIM,
output_name=self.layers[-1].output_name)
def add_layer(self, layer=None, **kwargs):
'''Add a :ref:`layer <layers>` to our network graph.
Parameters
----------
layer : int, tuple, dict, or :class:`Layer <theanets.layers.base.Layer>`
A value specifying the layer to add. For more information, please
see :ref:`guide-creating-specifying-layers`.
'''
# if the given layer is a Layer instance, just add it and move on.
if isinstance(layer, layers.Layer):
self.layers.append(layer)
return
form = kwargs.pop('form', 'ff' if self.layers else 'input').lower()
if isinstance(layer, util.basestring):
if not layers.Layer.is_registered(layer):
raise util.ConfigurationError('unknown layer type: {}'.format(layer))
form = layer
layer = None
# if layer is a tuple/list of integers, assume it's a shape.
if isinstance(layer, (tuple, list)) and all(isinstance(x, int) for x in layer):
kwargs['shape'] = tuple(layer)
layer = None
# if layer is some other tuple/list, assume it's a list of:
# - the name of a layers.Layer class (str)
# - the name of an activation function (str)
# - the number of units in the layer (int)
if isinstance(layer, (tuple, list)):
for el in layer:
if isinstance(el, util.basestring) and layers.Layer.is_registered(el):
form = el
elif isinstance(el, util.basestring):
kwargs['activation'] = el
elif isinstance(el, int):
if 'size' in kwargs:
raise util.ConfigurationError(
'duplicate layer sizes! {}'.format(kwargs))
kwargs['size'] = el
layer = None
# if layer is a dictionary, try to extract a form for the layer, and
# override our default keyword arguments with the rest.
if isinstance(layer, dict):
for key, value in layer.items():
if key == 'form':
form = value.lower()
else:
kwargs[key] = value
layer = None
# if neither shape nor size have been specified yet, check that the
# "layer" param is an int and use it for "size".
if 'shape' not in kwargs and 'size' not in kwargs and isinstance(layer, int):
kwargs['size'] = layer
# if it hasn't been provided in some other way yet, set input
# dimensionality based on the model.
if form == 'input' and 'shape' not in kwargs:
kwargs.setdefault('ndim', self.INPUT_NDIM)
# set some default layer parameters.
if form != 'input':
kwargs.setdefault('inputs', self.layers[-1].output_name)
kwargs.setdefault('rng', self._rng)
if form.lower() == 'tied' and 'partner' not in kwargs:
# we look backward through our list of layers for a partner.
# any "tied" layer that we find increases a counter by one,
# and any "untied" layer decreases the counter by one. our
# partner is the first layer we find with count zero.
#
# this is intended to handle the hopefully common case of a
# (possibly deep) tied-weights autoencoder.
tied = 1
partner = None
for l in self.layers[::-1]:
tied += 1 if isinstance(l, layers.Tied) else -1
if tied == 0:
partner = l.name
break
else:
raise util.ConfigurationError(
'cannot find partner for "{}"'.format(kwargs))
kwargs['partner'] = partner
layer = layers.Layer.build(form, **kwargs)
# check that graph inputs have unique names.
if isinstance(layer, layers.Input):
if any(layer.name == i.name for i in self.inputs):
raise util.ConfigurationError(
'"{}": duplicate input name!'.format(layer.name))
self.layers.append(layer)
def add_loss(self, loss=None, **kwargs):
'''Add a :ref:`loss function <losses>` to the model.
Parameters
----------
loss : str, dict, or :class:`theanets.losses.Loss`
A loss function to add. If this is a Loss instance, it will be added
immediately. If this is a string, it names a loss function to build
and add. If it is a dictionary, it should contain a ``'form'`` key
whose string value names the loss function to add. Other arguments
will be passed to :func:`theanets.losses.Loss.build`.
'''
if isinstance(loss, losses.Loss):
self.losses.append(loss)
return
form = loss or 'mse'
if 'form' in kwargs:
form = kwargs.pop('form').lower()
kw = dict(target=self.INPUT_NDIM, output_name=self.layers[-1].output_name)
kw.update(kwargs)
if isinstance(loss, dict):
loss = dict(loss)
if 'form' in loss:
form = loss.pop('form').lower()
kw.update(loss)
self.losses.append(losses.Loss.build(form, **kw))
def set_loss(self, *args, **kwargs):
'''Clear the current loss functions from the network and add a new one.
All parameters and keyword arguments are passed to :func:`add_loss`
after clearing the current losses.
'''
self.losses = []
self.add_loss(*args, **kwargs)
def itertrain(self, train, valid=None, algo='rmsprop', subalgo='rmsprop',
save_every=0, save_progress=None, **kwargs):
'''Train our network, one batch at a time.
This method yields a series of ``(train, valid)`` monitor pairs. The
``train`` value is a dictionary mapping names to monitor values
evaluated on the training dataset. The ``valid`` value is also a
dictionary mapping names to values, but these values are evaluated on
the validation dataset.
Because validation might not occur every training iteration, the
validation monitors might be repeated for multiple training iterations.
It is probably most helpful to think of the validation monitors as being
the "most recent" values that have been computed.
After training completes, the network attribute of this class will
contain the trained network parameters.
Parameters
----------
train : :class:`Dataset <downhill.dataset.Dataset>` or list
A dataset to use when training the network. If this is a
``downhill.Dataset`` instance, it will be used directly as the
training datset. If it is a list of numpy arrays or a list of
callables, it will be converted to a ``downhill.Dataset`` and then
used as the training set.
valid : :class:`Dataset <downhill.dataset.Dataset>` or list, optional
If this is provided, it will be used as a validation dataset. If not
provided, the training set will be used for validation. (This is not
recommended!)
algo : str, optional
An optimization algorithm to use for training our network. If not
provided, :class:`RMSProp <downhill.adaptive.RMSProp>` will be used.
subalgo : str, optional
An optimization algorithm to use for a trainer that requires a
"sub-algorithm," sugh as an unsupervised pretrainer. Defaults to
:class:`RMSProp <downhill.adaptive.RMSProp>`.
save_every : int or float, optional
If this is nonzero and ``save_progress`` is not None, then the model
being trained will be saved periodically. If this is a float, it is
treated as a number of minutes to wait between savings. If it is an
int, it is treated as the number of training epochs to wait between
savings. Defaults to 0.
save_progress : str or file handle, optional
If this is not None, and ``save_progress`` is nonzero, then save the
model periodically during training. This parameter gives either (a)
the full path of a file to save the model, or (b) a file-like object
where the model should be saved. If it is a string and the given
name contains a "{}" format specifier, it will be filled with the
integer Unix timestamp at the time the model is saved. Defaults to
None, which does not save models.
Yields
------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
if 'rng' not in kwargs:
kwargs['rng'] = self._rng
def create_dataset(data, **kwargs):
name = kwargs.get('name', 'dataset')
s = '{}_batches'.format(name)
return downhill.Dataset(
data,
name=name,
batch_size=kwargs.get('batch_size', 32),
iteration_size=kwargs.get('iteration_size', kwargs.get(s)),
axis=kwargs.get('axis', 0),
rng=kwargs['rng'])
# set up datasets ...
if valid is None:
valid = train
if not isinstance(valid, downhill.Dataset):
valid = create_dataset(valid, name='valid', **kwargs)
if not isinstance(train, downhill.Dataset):
train = create_dataset(train, name='train', **kwargs)
if 'algorithm' in kwargs:
warnings.warn(
'please use the "algo" keyword arg instead of "algorithm"',
DeprecationWarning)
algo = kwargs.pop('algorithm')
if isinstance(algo, (list, tuple)):
algo = algo[0]
# set up trainer ...
if isinstance(algo, util.basestring):
algo = algo.lower()
if algo == 'sample':
algo = trainer.SampleTrainer(self)
elif algo.startswith('layer') or algo.startswith('sup'):
algo = trainer.SupervisedPretrainer(subalgo, self)
elif algo.startswith('pre') or algo.startswith('unsup'):
algo = trainer.UnsupervisedPretrainer(subalgo, self)
else:
algo = trainer.DownhillTrainer(algo, self)
# set up check to save model ...
def needs_saving(elapsed, iteration):
if save_progress is None:
return False
if isinstance(save_every, float):
return elapsed > 60 * save_every
if isinstance(save_every, int):
return iteration % save_every == 0
return False
# train it!
start = time.time()
for i, monitors in enumerate(algo.itertrain(train, valid, **kwargs)):
yield monitors
now = time.time()
if i and needs_saving(now - start, i):
filename_or_handle = save_progress
if isinstance(filename_or_handle, util.basestring):
filename_or_handle = save_progress.format(int(now))
self.save(filename_or_handle)
start = now
def train(self, *args, **kwargs):
'''Train the network until the trainer converges.
All arguments are passed to :func:`itertrain`.
Returns
-------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
monitors = None
for monitors in self.itertrain(*args, **kwargs):
pass
return monitors
def _hash(self, regularizers=()):
'''Construct a string key for representing a computation graph.
This key will be unique for a given (a) network topology, (b) set of
losses, and (c) set of regularizers.
Returns
-------
key : str
A hash representing the computation graph for the current network.
'''
def add(s):
h.update(str(s).encode('utf-8'))
h = hashlib.md5()
for l in self.layers:
add('{}{}{}'.format(l.__class__.__name__, l.name, l.output_shape))
for l in self.losses:
add('{}{}'.format(l.__class__.__name__, l.weight))
for r in regularizers:
add('{}{}{}'.format(r.__class__.__name__, r.weight, r.pattern))
return h.hexdigest()
def build_graph(self, regularizers=()):
'''Connect the layers in this network to form a computation graph.
Parameters
----------
regularizers : list of :class:`theanets.regularizers.Regularizer`
A list of the regularizers to apply while building the computation
graph.
Returns
-------
outputs : list of Theano variables
A list of expressions giving the output of each layer in the graph.
updates : list of update tuples
A list of updates that should be performed by a Theano function that
computes something using this graph.
'''
key = self._hash(regularizers)
if key not in self._graphs:
util.log('building computation graph')
for loss in self.losses:
loss.log()
for reg in regularizers:
reg.log()
outputs = {}
updates = []
for layer in self.layers:
out, upd = layer.connect(outputs)
for reg in regularizers:
reg.modify_graph(out)
outputs.update(out)
updates.extend(upd)
self._graphs[key] = outputs, updates
return self._graphs[key]
@property
def inputs(self):
'''A list of Theano variables for feedforward computations.'''
return [l.input for l in self.layers if isinstance(l, layers.Input)]
@property
def variables(self):
'''A list of Theano variables for loss computations.'''
result = self.inputs
seen = set(i.name for i in result)
for loss in self.losses:
for v in loss.variables:
if v.name not in seen:
result.append(v)
seen.add(v.name)
return result
@property
def params(self):
'''A list of the learnable Theano parameters for this network.'''
return [p for l in self.layers for p in l.params]
def find(self, which, param):
'''Get a parameter from a layer in the network.
Parameters
----------
which : int or str
The layer that owns the parameter to return.
If this is an integer, then 0 refers to the input layer, 1 refers
to the first hidden layer, 2 to the second, and so on.
If this is a string, the layer with the corresponding name, if any,
will be used.
param : int or str
Name of the parameter to retrieve from the specified layer, or its
index in the parameter list of the layer.
Raises
------
KeyError
If there is no such layer, or if there is no such parameter in the
specified layer.
Returns
-------
param : Theano shared variable
A shared parameter variable from the indicated layer.
'''
for i, layer in enumerate(self.layers):
if which == i or which == layer.name:
return layer.find(param)
raise KeyError(which)
def feed_forward(self, x, **kwargs):
'''Compute a forward pass of all layers from the given input.
All keyword arguments are passed directly to :func:`build_graph`.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
layers : list of ndarray (num-examples, num-units)
The activation values of each layer in the the network when given
input `x`. For each of the hidden layers, an array is returned
containing one row per input example; the columns of each array
correspond to units in the respective layer. The "output" of the
network is the last element of this list.
'''
regs = regularizers.from_kwargs(self, **kwargs)
key = self._hash(regs)
if key not in self._functions:
outputs, updates = self.build_graph(regs)
labels, exprs = list(outputs.keys()), list(outputs.values())
util.log('compiling feed_forward function')
self._functions[key] = (labels, theano.function(
self.inputs, exprs, updates=updates))
labels, f = self._functions[key]
return dict(zip(labels, f(x)))
def predict(self, x, **kwargs):
'''Compute a forward pass of the inputs, returning the network output.
All keyword arguments end up being passed to :func:`build_graph`.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
y : ndarray (num-examples, num-variables)
Returns the values of the network output units when given input `x`.
Rows in this array correspond to examples, and columns to output
variables.
'''
return self.feed_forward(x, **kwargs)[self.layers[-1].output_name]
def score(self, x, y, w=None, **kwargs):
'''Compute R^2 coefficient of determination for a given labeled input.
Parameters
----------
x : ndarray (num-examples, num-inputs)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
y : ndarray (num-examples, num-outputs)
An array containing expected target data for the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
r2 : float
The R^2 correlation between the prediction of this netork and its
target output.
'''
u = y - self.predict(x, **kwargs)
v = y - y.mean()
if w is None:
w = np.ones_like(u)
return 1 - (w * u * u).sum() / (w * v * v).sum()
def __getstate__(self):
return (self.layers, self.losses)
def __setstate__(self, state):
self.layers, self.losses = state
self._graphs = {}
self._functions = {}
def save(self, filename_or_handle):
'''Save the state of this network to a pickle file on disk.
Parameters
----------
filename_or_handle : str or file handle
Save the state of this network to a pickle file. If this parameter
is a string, it names the file where the pickle will be saved. If it
is a file-like object, this object will be used for writing the
pickle. If the filename ends in ".gz" then the output will
automatically be gzipped.
'''
if isinstance(filename_or_handle, util.basestring):
opener = gzip.open if filename_or_handle.lower().endswith('.gz') else open
handle = opener(filename_or_handle, 'wb')
else:
handle = filename_or_handle
pickle.dump(self, handle, -1)
if isinstance(filename_or_handle, util.basestring):
handle.close()
util.log('saved model to {}', filename_or_handle)
@classmethod
def load(cls, filename_or_handle):
'''Load a saved network from disk.
Parameters
----------
filename_or_handle : str or file handle
Load the state of this network from a pickle file. If this parameter
is a string, it names the file where the pickle will be saved. If it
is a file-like object, this object will be used for reading the
pickle. If the filename ends in ".gz" then the output will
automatically be gunzipped.
'''
assert not isinstance(cls, Network), \
'cannot load an instance! say instead: net = Network.load(source)'
if isinstance(filename_or_handle, util.basestring):
opener = gzip.open if filename_or_handle.lower().endswith('.gz') else open
handle = opener(filename_or_handle, 'rb')
else:
handle = filename_or_handle
model = pickle.load(handle)
if isinstance(filename_or_handle, util.basestring):
handle.close()
util.log('loaded model from {}', filename_or_handle)
return model
def loss(self, **kwargs):
'''Return a variable representing the regularized loss for this network.
The regularized loss includes both the :ref:`loss computation <losses>`
for the network as well as any :ref:`regularizers <regularizers>` that
are in place.
Keyword arguments are passed directly to
:func:`theanets.regularizers.from_kwargs`.
Returns
-------
loss : Theano expression
A Theano expression representing the loss of this network.
'''
regs = regularizers.from_kwargs(self, **kwargs)
outputs, _ = self.build_graph(regs)
return sum(l.weight * l(outputs) for l in self.losses) + \
sum(r.weight * r.loss(self.layers, outputs) for r in regs)
def monitors(self, **kwargs):
'''Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
'''
regs = regularizers.from_kwargs(self, **kwargs)
outputs, _ = self.build_graph(regs)
monitors = [('err', self.losses[0](outputs))]
def matching(pattern):
'''Yield all matching outputs or parameters from the graph.'''
for name, expr in util.outputs_matching(outputs, pattern):
yield name, expr
for name, expr in util.params_matching(self.layers, pattern):
yield name, expr
def parse_levels(levels):
'''Yield named monitor callables.'''
if isinstance(levels, dict):
levels = levels.items()
if isinstance(levels, (int, float)):
levels = [levels]
for level in levels:
if isinstance(level, (tuple, list)):
label, call = level
yield ':{}'.format(label), call
if isinstance(level, (int, float)):
def call(expr):
return (expr < level).mean()
yield '<{}'.format(level), call
inputs = kwargs.get('monitors', {})
if isinstance(inputs, dict):
inputs = inputs.items()
for pattern, levels in inputs:
for name, expr in matching(pattern):
for key, value in parse_levels(levels):
monitors.append(('{}{}'.format(name, key), value(expr)))
return monitors
def updates(self, **kwargs):
'''Return expressions to run as updates during network training.
Returns
-------
updates : list of (parameter, expression) pairs
A list of named parameter update expressions for this network.
'''
regs = regularizers.from_kwargs(self, **kwargs)
_, updates = self.build_graph(regs)
return updates
|
lmjohns3/theanets
|
theanets/graph.py
|
Python
|
mit
| 28,912 | 0.001349 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ExportPipelinesOperations, ImportPipelinesOperations, Operations, PipelineRunsOperations, PrivateEndpointConnectionsOperations, RegistriesOperations, ReplicationsOperations, WebhooksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerRegistryManagementClient:
"""ContainerRegistryManagementClient.
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ReplicationsOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerRegistryManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.export_pipelines = ExportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
Azure/azure-sdk-for-python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_12_01_preview/aio/_container_registry_management_client.py
|
Python
|
mit
| 6,266 | 0.003671 |
#from apetools.commands.ifconfig import IfconfigCommand
#from apetools.commands.iwconfig import Iwconfig
from apetools.commons.enumerations import OperatingSystem
from apetools.devices.basedevice import BaseDevice
from apetools.commons.errors import ConfigurationError
class IosDevice(BaseDevice):
"""
A class to query ios devices (pretty much nothing is implemented on the ipad).
* this is mostly a dummy to hold settings
"""
def __init__(self, *args, **kwargs):
"""
:param:
- `connection`: a connection to the device
- `interface`: the name of the test interface (to get the ip address)
"""
super(IosDevice, self).__init__(*args, **kwargs)
return
@property
def address(self):
"""
:return: the address of the device
:raise: ConfigurationError if not set by user
"""
if self._address is None:
raise ConfigurationError("'test_address' must be set in config for IOS devices")
return self._address
@property
def mac_address(self):
"""
Not implemented
:return: the MAC address of the device
"""
self.logger.warning('mac address query not implemented')
return 'NA'
@property
def bssid(self):
"""
Not implemented
"""
self.logger.warning('bssid query not implemented')
return 'NA'
@property
def ssid(self):
"""
Not implemented
"""
self.logger.warning('ssid query not implemented')
return 'NA'
@property
def noise(self):
"""
Not Implemented
"""
self.logger.warning('noise query not implemented')
return 'NA'
@property
def channel(self):
"""
Not implemented
"""
self.logger.warning('channel not implemented')
return "NA"
@property
def rssi(self):
"""
Not implemented
:return: rssi from the wifi_query
"""
self.logger.warning('rssi query not implemented')
return "NA"
@property
def bitrate(self):
"""
Not implemented
:return: NA
"""
self.logger.warning("bitrate query not implemented")
return "NA"
def disable_wifi(self):
"""
Not implemented
"""
self.logger.warning('disable wifi not implemented')
return
def enable_wifi(self):
"""
Not implemented
"""
self.logger.warning('enable wifi not implemented')
return
def log(self, message):
"""
Sends the message to the syslog (Not implemented)
:param:
- `message`: a string to send to the syslog
:postcondition: message sent to the syslog
"""
# This uses the call interface because the connection has its own logger property
self.logger.warning('log not implemented')
return
# end IosDevice
|
rsnakamura/oldape
|
apetools/devices/iosdevice.py
|
Python
|
apache-2.0
| 3,060 | 0.003922 |
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function
import errno
import logging
import os
import pickle
import shutil
import sys
import tempfile
import textwrap
import six
from ipalib.install import certmonger, sysrestore
from ipapython import ipautil
from ipapython.ipautil import (
format_netloc, ipa_generate_password, run, user_input)
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipalib import api, errors, x509
from ipalib.constants import DOMAIN_LEVEL_0
from ipalib.util import (
validate_domain_name,
no_matching_interface_for_ip_address_warning,
)
import ipaclient.install.ntpconf
from ipaserver.install import (
adtrust, bindinstance, ca, dns, dsinstance,
httpinstance, installutils, kra, krbinstance,
ntpinstance, otpdinstance, custodiainstance, replication, service,
sysupgrade)
from ipaserver.install.installutils import (
IPA_MODULES, BadHostError, get_fqdn, get_server_ip_address,
is_ipa_configured, load_pkcs12, read_password, verify_fqdn,
update_hosts_file)
if six.PY3:
unicode = str
try:
from ipaserver.install import adtrustinstance
_server_trust_ad_installed = True
except ImportError:
_server_trust_ad_installed = False
NoneType = type(None)
logger = logging.getLogger(__name__)
SYSRESTORE_DIR_PATH = paths.SYSRESTORE
def validate_dm_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
# TODO: Check https://fedorahosted.org/389/ticket/47849
# Actual behavior of setup-ds.pl is that it does not accept white
# space characters in password when called interactively but does when
# provided such password in INF file. But it ignores leading and trailing
# white spaces in INF file.
# Disallow leading/trailing whaitespaces
if password.strip() != password:
raise ValueError('Password must not start or end with whitespace.')
def validate_admin_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
def read_cache(dm_password):
"""
Returns a dict of cached answers or empty dict if no cache file exists.
"""
if not os.path.isfile(paths.ROOT_IPA_CACHE):
return {}
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
installutils.decrypt_file(paths.ROOT_IPA_CACHE,
fname,
dm_password,
top_dir)
except Exception as e:
shutil.rmtree(top_dir)
raise Exception("Decryption of answer cache in %s failed, please "
"check your password." % paths.ROOT_IPA_CACHE)
try:
with open(fname, 'rb') as f:
try:
optdict = pickle.load(f)
except Exception as e:
raise Exception("Parse error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
except IOError as e:
raise Exception("Read error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
finally:
shutil.rmtree(top_dir)
# These are the only ones that may be overridden
try:
del optdict['external_cert_files']
except KeyError:
pass
return optdict
def write_cache(options):
"""
Takes a dict as input and writes a cached file of answers
"""
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
with open(fname, 'wb') as f:
pickle.dump(options, f)
installutils.encrypt_file(fname,
paths.ROOT_IPA_CACHE,
options['dm_password'],
top_dir)
except IOError as e:
raise Exception("Unable to cache command-line options %s" % str(e))
finally:
shutil.rmtree(top_dir)
def read_host_name(host_default, no_host_dns=False):
print("Enter the fully qualified domain name of the computer")
print("on which you're setting up server software. Using the form")
print("<hostname>.<domainname>")
print("Example: master.example.com.")
print("")
print("")
if host_default == "":
host_default = "master.example.com"
host_name = user_input("Server host name", host_default, allow_empty=False)
print("")
verify_fqdn(host_name, no_host_dns)
return host_name
def read_domain_name(domain_name, unattended):
print("The domain name has been determined based on the host name.")
print("")
if not unattended:
domain_name = str(user_input("Please confirm the domain name",
domain_name))
print("")
return domain_name
def read_realm_name(domain_name, unattended):
print("The kerberos protocol requires a Realm name to be defined.")
print("This is typically the domain name converted to uppercase.")
print("")
if unattended:
return domain_name.upper()
realm_name = str(user_input("Please provide a realm name",
domain_name.upper()))
upper_dom = realm_name.upper()
if upper_dom != realm_name:
print("An upper-case realm name is required.")
if not user_input("Do you want to use " + upper_dom +
" as realm name?", True):
raise ScriptError(
"An upper-case realm name is required. Unable to continue.")
else:
realm_name = upper_dom
print("")
return realm_name
def read_dm_password():
print("Certain directory server operations require an administrative user.")
print("This user is referred to as the Directory Manager and has full "
"access")
print("to the Directory for system management tasks and will be added to "
"the")
print("instance of directory server created for IPA.")
print("The password must be at least 8 characters long.")
print("")
# TODO: provide the option of generating a random password
dm_password = read_password("Directory Manager",
validator=validate_dm_password)
return dm_password
def read_admin_password():
print("The IPA server requires an administrative user, named 'admin'.")
print("This user is a regular system account used for IPA server "
"administration.")
print("")
# TODO: provide the option of generating a random password
admin_password = read_password("IPA admin",
validator=validate_admin_password)
return admin_password
def check_dirsrv(unattended):
(ds_unsecure, ds_secure) = dsinstance.check_ports()
if not ds_unsecure or not ds_secure:
msg = ("IPA requires ports 389 and 636 for the Directory Server.\n"
"These are currently in use:\n")
if not ds_unsecure:
msg += "\t389\n"
if not ds_secure:
msg += "\t636\n"
raise ScriptError(msg)
def common_cleanup(func):
def decorated(installer):
success = False
try:
func(installer)
success = True
except KeyboardInterrupt:
ds = installer._ds
print("\nCleaning up...")
if ds:
print("Removing configuration for %s instance" % ds.serverid)
ds.stop()
if ds.serverid:
try:
dsinstance.remove_ds_instance(ds.serverid)
except ipautil.CalledProcessError:
logger.error("Failed to remove DS instance. You "
"may need to remove instance data "
"manually")
raise ScriptError()
finally:
if not success and installer._installation_cleanup:
# Do a cautious clean up as we don't know what failed and
# what is the state of the environment
try:
installer._fstore.restore_file(paths.HOSTS)
except Exception:
pass
return decorated
def remove_master_from_managed_topology(api_instance, options):
try:
# we may force the removal
server_del_options = dict(
force=True,
ignore_topology_disconnect=options.ignore_topology_disconnect,
ignore_last_of_role=options.ignore_last_of_role
)
replication.run_server_del_as_cli(
api_instance, api_instance.env.host, **server_del_options)
except errors.ServerRemovalError as e:
raise ScriptError(str(e))
except Exception as e:
# if the master was already deleted we will just get a warning
logger.warning("Failed to delete master: %s", e)
@common_cleanup
def install_check(installer):
options = installer
dirsrv_pkcs12_file = installer._dirsrv_pkcs12_file
http_pkcs12_file = installer._http_pkcs12_file
pkinit_pkcs12_file = installer._pkinit_pkcs12_file
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
external_cert_file = installer._external_cert_file
external_ca_file = installer._external_ca_file
http_ca_cert = installer._ca_cert
tasks.check_ipv6_stack_enabled()
tasks.check_selinux_status()
if options.master_password:
msg = ("WARNING:\noption '-P/--master-password' is deprecated. "
"KDC master password of sufficient strength is autogenerated "
"during IPA server installation and should not be set "
"manually.")
print(textwrap.fill(msg, width=79, replace_whitespace=False))
installer._installation_cleanup = True
print("\nThe log file for this installation can be found in "
"/var/log/ipaserver-install.log")
if (not options.external_ca and not options.external_cert_files and
is_ipa_configured()):
installer._installation_cleanup = False
raise ScriptError(
"IPA server is already configured on this system.\n"
"If you want to reinstall the IPA server, please uninstall "
"it first using 'ipa-server-install --uninstall'.")
client_fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
if client_fstore.has_files():
installer._installation_cleanup = False
raise ScriptError(
"IPA client is already configured on this system.\n"
"Please uninstall it before configuring the IPA server, "
"using 'ipa-client-install --uninstall'")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# This will override any settings passed in on the cmdline
if os.path.isfile(paths.ROOT_IPA_CACHE):
if options.dm_password is not None:
dm_password = options.dm_password
else:
dm_password = read_password("Directory Manager", confirm=False)
if dm_password is None:
raise ScriptError("Directory Manager password required")
try:
cache_vars = read_cache(dm_password)
options.__dict__.update(cache_vars)
if cache_vars.get('external_ca', False):
options.external_ca = False
options.interactive = False
except Exception as e:
raise ScriptError("Cannot process the cache file: %s" % str(e))
# We only set up the CA if the PKCS#12 options are not given.
if options.dirsrv_cert_files:
setup_ca = False
else:
setup_ca = True
options.setup_ca = setup_ca
if not setup_ca and options.ca_subject:
raise ScriptError(
"--ca-subject cannot be used with CA-less installation")
if not setup_ca and options.subject_base:
raise ScriptError(
"--subject-base cannot be used with CA-less installation")
if not setup_ca and options.setup_kra:
raise ScriptError(
"--setup-kra cannot be used with CA-less installation")
print("======================================="
"=======================================")
print("This program will set up the FreeIPA Server.")
print("")
print("This includes:")
if setup_ca:
print(" * Configure a stand-alone CA (dogtag) for certificate "
"management")
if not options.no_ntp:
print(" * Configure the Network Time Daemon (ntpd)")
print(" * Create and configure an instance of Directory Server")
print(" * Create and configure a Kerberos Key Distribution Center (KDC)")
print(" * Configure Apache (httpd)")
if options.setup_kra:
print(" * Configure KRA (dogtag) for secret management")
if options.setup_dns:
print(" * Configure DNS (bind)")
if options.setup_adtrust:
print(" * Configure Samba (smb) and winbind for managing AD trusts")
if not options.no_pkinit:
print(" * Configure the KDC to enable PKINIT")
if options.no_ntp:
print("")
print("Excluded by options:")
print(" * Configure the Network Time Daemon (ntpd)")
if installer.interactive:
print("")
print("To accept the default shown in brackets, press the Enter key.")
print("")
if not options.external_cert_files:
# Make sure the 389-ds ports are available
check_dirsrv(not installer.interactive)
if not options.no_ntp:
try:
ipaclient.install.ntpconf.check_timedate_services()
except ipaclient.install.ntpconf.NTPConflictingService as e:
print(("WARNING: conflicting time&date synchronization service '%s'"
" will be disabled" % e.conflicting_service))
print("in favor of ntpd")
print("")
except ipaclient.install.ntpconf.NTPConfigurationError:
pass
# Check to see if httpd is already configured to listen on 443
if httpinstance.httpd_443_configured():
raise ScriptError("Aborting installation")
if not options.setup_dns and installer.interactive:
if ipautil.user_input("Do you want to configure integrated DNS "
"(BIND)?", False):
options.setup_dns = True
print("")
# check bind packages are installed
if options.setup_dns:
# Don't require an external DNS to say who we are if we are
# setting up a local DNS server.
options.no_host_dns = True
# check the hostname is correctly configured, it must be as the kldap
# utilities just use the hostname as returned by getaddrinfo to set
# up some of the standard entries
if options.host_name:
host_default = options.host_name
else:
host_default = get_fqdn()
try:
if not installer.interactive or options.host_name:
verify_fqdn(host_default, options.no_host_dns)
host_name = host_default
else:
host_name = read_host_name(host_default, options.no_host_dns)
except BadHostError as e:
raise ScriptError(e)
host_name = host_name.lower()
logger.debug("will use host_name: %s\n", host_name)
if not options.domain_name:
domain_name = read_domain_name(host_name[host_name.find(".")+1:],
not installer.interactive)
logger.debug("read domain_name: %s\n", domain_name)
try:
validate_domain_name(domain_name)
except ValueError as e:
raise ScriptError("Invalid domain name: %s" % unicode(e))
else:
domain_name = options.domain_name
domain_name = domain_name.lower()
if not options.realm_name:
realm_name = read_realm_name(domain_name, not installer.interactive)
logger.debug("read realm_name: %s\n", realm_name)
try:
validate_domain_name(realm_name, entity="realm")
except ValueError as e:
raise ScriptError("Invalid realm name: {}".format(unicode(e)))
else:
realm_name = options.realm_name.upper()
if not options.subject_base:
options.subject_base = installutils.default_subject_base(realm_name)
if not options.ca_subject:
options.ca_subject = \
installutils.default_ca_subject_dn(options.subject_base)
if options.http_cert_files:
if options.http_pin is None:
options.http_pin = installutils.read_password(
"Enter Apache Server private key unlock",
confirm=False, validate=False, retry=False)
if options.http_pin is None:
raise ScriptError(
"Apache Server private key unlock password required")
http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12(
cert_files=options.http_cert_files,
key_password=options.http_pin,
key_nickname=options.http_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
http_pkcs12_info = (http_pkcs12_file.name, http_pin)
if options.dirsrv_cert_files:
if options.dirsrv_pin is None:
options.dirsrv_pin = read_password(
"Enter Directory Server private key unlock",
confirm=False, validate=False, retry=False)
if options.dirsrv_pin is None:
raise ScriptError(
"Directory Server private key unlock password required")
dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12(
cert_files=options.dirsrv_cert_files,
key_password=options.dirsrv_pin,
key_nickname=options.dirsrv_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, dirsrv_pin)
if options.pkinit_cert_files:
if options.pkinit_pin is None:
options.pkinit_pin = read_password(
"Enter Kerberos KDC private key unlock",
confirm=False, validate=False, retry=False)
if options.pkinit_pin is None:
raise ScriptError(
"Kerberos KDC private key unlock password required")
pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12(
cert_files=options.pkinit_cert_files,
key_password=options.pkinit_pin,
key_nickname=options.pkinit_cert_name,
ca_cert_files=options.ca_cert_files,
realm_name=realm_name)
pkinit_pkcs12_info = (pkinit_pkcs12_file.name, pkinit_pin)
if (options.http_cert_files and options.dirsrv_cert_files and
http_ca_cert != dirsrv_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and Directory Server SSL "
"certificate are not signed by the same CA certificate")
if (options.http_cert_files and
options.pkinit_cert_files and
http_ca_cert != pkinit_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and PKINIT KDC "
"certificate are not signed by the same CA certificate")
if not options.dm_password:
dm_password = read_dm_password()
if dm_password is None:
raise ScriptError("Directory Manager password required")
else:
dm_password = options.dm_password
if not options.master_password:
master_password = ipa_generate_password()
else:
master_password = options.master_password
if not options.admin_password:
admin_password = read_admin_password()
if admin_password is None:
raise ScriptError("IPA admin password required")
else:
admin_password = options.admin_password
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
# make sure host name specified by user is used instead of default
host=host_name,
)
if setup_ca:
# we have an IPA-integrated CA
cfg['ca_host'] = host_name
# Create the management framework config file and finalize api
target_fname = paths.IPA_DEFAULT_CONF
fd = open(target_fname, "w")
fd.write("[global]\n")
fd.write("host=%s\n" % host_name)
fd.write("basedn=%s\n" % ipautil.realm_to_suffix(realm_name))
fd.write("realm=%s\n" % realm_name)
fd.write("domain=%s\n" % domain_name)
fd.write("xmlrpc_uri=https://%s/ipa/xml\n" % format_netloc(host_name))
fd.write("ldap_uri=ldapi://%%2fvar%%2frun%%2fslapd-%s.socket\n" %
installutils.realm_to_serverid(realm_name))
if setup_ca:
fd.write("enable_ra=True\n")
fd.write("ra_plugin=dogtag\n")
fd.write("dogtag_version=10\n")
else:
fd.write("enable_ra=False\n")
fd.write("ra_plugin=none\n")
fd.write("mode=production\n")
fd.close()
# Must be readable for everyone
os.chmod(target_fname, 0o644)
api.bootstrap(**cfg)
api.finalize()
if setup_ca:
ca.install_check(False, None, options)
if options.setup_kra:
kra.install_check(api, None, options)
if options.setup_dns:
dns.install_check(False, api, False, options, host_name)
ip_addresses = dns.ip_addresses
else:
ip_addresses = get_server_ip_address(host_name,
not installer.interactive, False,
options.ip_addresses)
# check addresses here, dns module is doing own check
no_matching_interface_for_ip_address_warning(ip_addresses)
instance_name = "-".join(realm_name.split("."))
dirsrv = services.knownservices.dirsrv
if (options.external_cert_files
and dirsrv.is_installed(instance_name)
and not dirsrv.is_running(instance_name)):
logger.debug('Starting Directory Server')
services.knownservices.dirsrv.start(instance_name)
if options.setup_adtrust:
adtrust.install_check(False, options, api)
# installer needs to update hosts file when DNS subsystem will be
# installed or custom addresses are used
if options.ip_addresses or options.setup_dns:
installer._update_hosts_file = True
print()
print("The IPA Master Server will be configured with:")
print("Hostname: %s" % host_name)
print("IP address(es): %s" % ", ".join(str(ip) for ip in ip_addresses))
print("Domain name: %s" % domain_name)
print("Realm name: %s" % realm_name)
print()
if setup_ca:
ca.print_ca_configuration(options)
print()
if options.setup_dns:
print("BIND DNS server will be configured to serve IPA domain with:")
print("Forwarders: %s" % (
"No forwarders" if not options.forwarders
else ", ".join([str(ip) for ip in options.forwarders])
))
print('Forward policy: %s' % options.forward_policy)
print("Reverse zone(s): %s" % (
"No reverse zone" if options.no_reverse or not dns.reverse_zones
else ", ".join(str(rz) for rz in dns.reverse_zones)
))
print()
if not options.setup_adtrust:
# If domain name and realm does not match, IPA server will not be able
# to establish trust with Active Directory. Print big fat warning.
realm_not_matching_domain = (domain_name.upper() != realm_name)
if realm_not_matching_domain:
print("WARNING: Realm name does not match the domain name.\n"
"You will not be able to establish trusts with Active "
"Directory unless\nthe realm name of the IPA server matches "
"its domain name.\n\n")
if installer.interactive and not user_input(
"Continue to configure the system with these values?", False):
raise ScriptError("Installation aborted")
options.realm_name = realm_name
options.domain_name = domain_name
options.dm_password = dm_password
options.master_password = master_password
options.admin_password = admin_password
options._host_name_overridden = bool(options.host_name)
options.host_name = host_name
options.ip_addresses = ip_addresses
installer._fstore = fstore
installer._sstore = sstore
installer._dirsrv_pkcs12_file = dirsrv_pkcs12_file
installer._http_pkcs12_file = http_pkcs12_file
installer._pkinit_pkcs12_file = pkinit_pkcs12_file
installer._dirsrv_pkcs12_info = dirsrv_pkcs12_info
installer._http_pkcs12_info = http_pkcs12_info
installer._pkinit_pkcs12_info = pkinit_pkcs12_info
installer._external_cert_file = external_cert_file
installer._external_ca_file = external_ca_file
installer._ca_cert = http_ca_cert
@common_cleanup
def install(installer):
options = installer
fstore = installer._fstore
sstore = installer._sstore
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
http_ca_cert = installer._ca_cert
realm_name = options.realm_name
domain_name = options.domain_name
dm_password = options.dm_password
master_password = options.master_password
admin_password = options.admin_password
host_name = options.host_name
ip_addresses = options.ip_addresses
setup_ca = options.setup_ca
# Installation has started. No IPA sysrestore items are restored in case of
# failure to enable root cause investigation
installer._installation_cleanup = False
if installer.interactive:
print("")
print("The following operations may take some minutes to complete.")
print("Please wait until the prompt is returned.")
print("")
# set hostname (transient and static) if user instructed us to do so
if options._host_name_overridden:
tasks.backup_hostname(fstore, sstore)
tasks.set_hostname(host_name)
if installer._update_hosts_file:
update_hosts_file(ip_addresses, host_name, fstore)
# Create a directory server instance
if not options.external_cert_files:
# Configure ntpd
if not options.no_ntp:
ipaclient.install.ntpconf.force_ntpd(sstore)
ntp = ntpinstance.NTPInstance(fstore)
if not ntp.is_configured():
ntp.create_instance()
if options.dirsrv_cert_files:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password, dirsrv_pkcs12_info,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
else:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
ntpinstance.ntp_ldap_enable(host_name, ds.suffix, realm_name)
else:
api.Backend.ldap2.connect()
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel)
installer._ds = ds
ds.init_info(
realm_name, host_name, domain_name, dm_password,
options.subject_base, options.ca_subject, 1101, 1100, None,
setup_pkinit=not options.no_pkinit)
krb = krbinstance.KrbInstance(fstore)
if not options.external_cert_files:
krb.create_instance(realm_name, host_name, domain_name,
dm_password, master_password,
setup_pkinit=not options.no_pkinit,
pkcs12_info=pkinit_pkcs12_info,
subject_base=options.subject_base)
else:
krb.init_info(realm_name, host_name,
setup_pkinit=not options.no_pkinit,
subject_base=options.subject_base)
if setup_ca:
if not options.external_cert_files and options.external_ca:
# stage 1 of external CA installation
options.realm_name = realm_name
options.domain_name = domain_name
options.master_password = master_password
options.dm_password = dm_password
options.admin_password = admin_password
options.host_name = host_name
options.reverse_zones = dns.reverse_zones
cache_vars = {n: options.__dict__[n] for o, n in installer.knobs()
if n in options.__dict__}
write_cache(cache_vars)
ca.install_step_0(False, None, options)
else:
# Put the CA cert where other instances expect it
x509.write_certificate(http_ca_cert, paths.IPA_CA_CRT)
os.chmod(paths.IPA_CA_CRT, 0o444)
if not options.no_pkinit:
x509.write_certificate(http_ca_cert, paths.KDC_CA_BUNDLE_PEM)
else:
with open(paths.KDC_CA_BUNDLE_PEM, 'w'):
pass
os.chmod(paths.KDC_CA_BUNDLE_PEM, 0o444)
x509.write_certificate(http_ca_cert, paths.CA_BUNDLE_PEM)
os.chmod(paths.CA_BUNDLE_PEM, 0o444)
# we now need to enable ssl on the ds
ds.enable_ssl()
if setup_ca:
ca.install_step_1(False, None, options)
otpd = otpdinstance.OtpdInstance()
otpd.create_instance('OTPD', host_name,
ipautil.realm_to_suffix(realm_name))
custodia = custodiainstance.CustodiaInstance(host_name, realm_name)
custodia.create_instance()
# Create a HTTP instance
http = httpinstance.HTTPInstance(fstore)
if options.http_cert_files:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
pkcs12_info=http_pkcs12_info, subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
else:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
tasks.restore_context(paths.CACHE_IPA_SESSIONS)
ca.set_subject_base_in_config(options.subject_base)
# configure PKINIT now that all required services are in place
krb.enable_ssl()
# Apply any LDAP updates. Needs to be done after the configuration file
# is created. DS is restarted in the process.
service.print_msg("Applying LDAP updates")
ds.apply_updates()
# Restart krb after configurations have been changed
service.print_msg("Restarting the KDC")
krb.restart()
if options.setup_kra:
kra.install(api, None, options)
if options.setup_dns:
dns.install(False, False, options)
else:
# Create a BIND instance
bind = bindinstance.BindInstance(fstore)
bind.setup(host_name, ip_addresses, realm_name,
domain_name, (), 'first', (),
zonemgr=options.zonemgr,
no_dnssec_validation=options.no_dnssec_validation)
bind.create_file_with_system_records()
if options.setup_adtrust:
adtrust.install(False, options, fstore, api)
# Set the admin user kerberos password
ds.change_admin_password(admin_password)
# Call client install script
service.print_msg("Configuring client side components")
try:
args = [paths.IPA_CLIENT_INSTALL, "--on-master", "--unattended",
"--domain", domain_name, "--server", host_name,
"--realm", realm_name, "--hostname", host_name]
if options.no_dns_sshfp:
args.append("--no-dns-sshfp")
if options.ssh_trust_dns:
args.append("--ssh-trust-dns")
if options.no_ssh:
args.append("--no-ssh")
if options.no_sshd:
args.append("--no-sshd")
if options.mkhomedir:
args.append("--mkhomedir")
run(args, redirect_output=True)
print()
except Exception:
raise ScriptError("Configuration of client side components failed!")
# Everything installed properly, activate ipa service.
services.knownservices.ipa.enable()
print("======================================="
"=======================================")
print("Setup complete")
print("")
print("Next steps:")
print("\t1. You must make sure these network ports are open:")
print("\t\tTCP Ports:")
print("\t\t * 80, 443: HTTP/HTTPS")
print("\t\t * 389, 636: LDAP/LDAPS")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
print("\t\tUDP Ports:")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
if not options.no_ntp:
print("\t\t * 123: ntp")
print("")
print("\t2. You can now obtain a kerberos ticket using the command: "
"'kinit admin'")
print("\t This ticket will allow you to use the IPA tools (e.g., ipa "
"user-add)")
print("\t and the web user interface.")
if not services.knownservices.ntpd.is_running():
print("\t3. Kerberos requires time synchronization between clients")
print("\t and servers for correct operation. You should consider "
"enabling ntpd.")
print("")
if setup_ca:
print(("Be sure to back up the CA certificates stored in " +
paths.CACERT_P12))
print("These files are required to create replicas. The password for "
"these")
print("files is the Directory Manager password")
if os.path.isfile(paths.ROOT_IPA_CACHE):
os.remove(paths.ROOT_IPA_CACHE)
@common_cleanup
def uninstall_check(installer):
options = installer
tasks.check_selinux_status()
installer._installation_cleanup = False
if not is_ipa_configured():
print("WARNING:\nIPA server is not configured on this system. "
"If you want to install the\nIPA server, please install "
"it using 'ipa-server-install'.")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
)
# We will need at least api.env, finalize api now. This system is
# already installed, so the configuration file is there.
api.bootstrap(**cfg)
api.finalize()
if installer.interactive:
print("\nThis is a NON REVERSIBLE operation and will delete all data "
"and configuration!\nIt is highly recommended to take a backup of "
"existing data and configuration using ipa-backup utility "
"before proceeding.\n")
if not user_input("Are you sure you want to continue with the "
"uninstall procedure?", False):
raise ScriptError("Aborting uninstall operation.")
try:
api.Backend.ldap2.connect(autobind=True)
domain_level = dsinstance.get_domain_level(api)
except Exception:
msg = ("\nWARNING: Failed to connect to Directory Server to find "
"information about replication agreements. Uninstallation "
"will continue despite the possible existing replication "
"agreements.\n\n"
"If this server is the last instance of CA, KRA, or DNSSEC "
"master, uninstallation may result in data loss.\n\n"
)
print(textwrap.fill(msg, width=80, replace_whitespace=False))
if (installer.interactive and not user_input(
"Are you sure you want to continue with the uninstall "
"procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
dns.uninstall_check(options)
if domain_level == DOMAIN_LEVEL_0:
rm = replication.ReplicationManager(
realm=api.env.realm,
hostname=api.env.host,
dirman_passwd=None,
conn=api.Backend.ldap2
)
agreements = rm.find_ipa_replication_agreements()
if agreements:
other_masters = [a.get('cn')[0][4:] for a in agreements]
msg = (
"\nReplication agreements with the following IPA masters "
"found: %s. Removing any replication agreements before "
"uninstalling the server is strongly recommended. You can "
"remove replication agreements by running the following "
"command on any other IPA master:\n" % ", ".join(
other_masters)
)
cmd = "$ ipa-replica-manage del %s\n" % api.env.host
print(textwrap.fill(msg, width=80, replace_whitespace=False))
print(cmd)
if (installer.interactive and
not user_input("Are you sure you want to continue with"
" the uninstall procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
remove_master_from_managed_topology(api, options)
api.Backend.ldap2.disconnect()
installer._fstore = fstore
installer._sstore = sstore
@common_cleanup
def uninstall(installer):
fstore = installer._fstore
sstore = installer._sstore
rv = 0
print("Shutting down all IPA services")
try:
services.knownservices.ipa.stop()
except Exception:
# Fallback to direct ipactl stop only if system command fails
try:
run([paths.IPACTL, "stop"], raiseonerr=False)
except Exception:
pass
ntpinstance.NTPInstance(fstore).uninstall()
kra.uninstall()
ca.uninstall()
dns.uninstall()
httpinstance.HTTPInstance(fstore).uninstall()
krbinstance.KrbInstance(fstore).uninstall()
dsinstance.DsInstance(fstore=fstore).uninstall()
if _server_trust_ad_installed:
adtrustinstance.ADTRUSTInstance(fstore).uninstall()
custodiainstance.CustodiaInstance().uninstall()
otpdinstance.OtpdInstance().uninstall()
tasks.restore_hostname(fstore, sstore)
fstore.restore_all_files()
try:
os.remove(paths.ROOT_IPA_CACHE)
except Exception:
pass
try:
os.remove(paths.ROOT_IPA_CSR)
except Exception:
pass
# ipa-client-install removes /etc/ipa/default.conf
sstore._load()
ipaclient.install.ntpconf.restore_forced_ntpd(sstore)
# Clean up group_exists (unused since IPA 2.2, not being set since 4.1)
sstore.restore_state("install", "group_exists")
services.knownservices.ipa.disable()
# remove upgrade state file
sysupgrade.remove_upgrade_file()
if fstore.has_files():
logger.error('Some files have not been restored, see '
'%s/sysrestore.index', SYSRESTORE_DIR_PATH)
has_state = False
for module in IPA_MODULES: # from installutils
if sstore.has_state(module):
logger.error('Some installation state for %s has not been '
'restored, see %s/sysrestore.state',
module, SYSRESTORE_DIR_PATH)
has_state = True
rv = 1
if has_state:
logger.error('Some installation state has not been restored.\n'
'This may cause re-installation to fail.\n'
'It should be safe to remove %s/sysrestore.state '
'but it may\n'
'mean your system hasn\'t be restored to its '
'pre-installation state.', SYSRESTORE_DIR_PATH)
# Note that this name will be wrong after the first uninstall.
dirname = dsinstance.config_dirname(
installutils.realm_to_serverid(api.env.realm))
dirs = [dirname, paths.PKI_TOMCAT_ALIAS_DIR, paths.HTTPD_ALIAS_DIR]
ids = certmonger.check_state(dirs)
if ids:
logger.error('Some certificates may still be tracked by '
'certmonger.\n'
'This will cause re-installation to fail.\n'
'Start the certmonger service and list the '
'certificates being tracked\n'
' # getcert list\n'
'These may be untracked by executing\n'
' # getcert stop-tracking -i <request_id>\n'
'for each id in: %s', ', '.join(ids))
# Remove the cert renewal lock file
try:
os.remove(paths.IPA_RENEWAL_LOCK)
except OSError as e:
if e.errno != errno.ENOENT:
logger.warning("Failed to remove file %s: %s",
paths.IPA_RENEWAL_LOCK, e)
print("Removing IPA client configuration")
try:
result = run([paths.IPA_CLIENT_INSTALL, "--on-master",
"--unattended", "--uninstall"],
raiseonerr=False, redirect_output=True)
if result.returncode not in [0, 2]:
raise RuntimeError("Failed to configure the client")
except Exception:
rv = 1
print("Uninstall of client side components failed!")
sys.exit(rv)
def init(installer):
installer.unattended = not installer.interactive
installer.domainlevel = installer.domain_level
installer._installation_cleanup = True
installer._ds = None
installer._dirsrv_pkcs12_file = None
installer._http_pkcs12_file = None
installer._pkinit_pkcs12_file = None
installer._dirsrv_pkcs12_info = None
installer._http_pkcs12_info = None
installer._pkinit_pkcs12_info = None
installer._external_cert_file = None
installer._external_ca_file = None
installer._ca_cert = None
installer._update_hosts_file = False
|
apophys/freeipa
|
ipaserver/install/server/install.py
|
Python
|
gpl-3.0
| 44,536 | 0.000135 |
# Tai Sakuma <tai.sakuma@gmail.com>
import numbers
##__________________________________________________________________||
def cmp_obj_list_almost_equal(list1, list2, rtol=1e-05, atol=1e-08):
if not len(list1) == len(list2):
return False
for obj1, obj2 in zip(list1, list2):
if not cmp_obj_almost_equal(obj1, obj2, rtol=rtol, atol=atol):
return False
return True
def cmp_obj_almost_equal(obj1, obj2, rtol=1e-05, atol=1e-08):
# use the same formula as in
# https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.isclose.html
attrs1 = list(obj1._attrdict.keys())
attrs2 = list(obj2._attrdict.keys())
if not attrs1 == attrs2:
return False
for attr in attrs2:
v1 = getattr(obj1, attr)
v2 = getattr(obj2, attr)
if v1 == v2:
continue
if isinstance(v2, numbers.Integral):
return False
if isinstance(v2, numbers.Real):
if abs(v1 - v2) > (atol + rtol * abs(v2)):
return False
return True
##__________________________________________________________________||
|
TaiSakuma/scribblers
|
tests/compare_obj.py
|
Python
|
bsd-3-clause
| 1,134 | 0.003527 |
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GridFS is a specification for storing large objects in Mongo.
The :mod:`gridfs` package is an implementation of GridFS on top of
:mod:`pymongo`, exposing a file-like interface.
.. mongodoc:: gridfs
"""
from gridfs.errors import (NoFile,
UnsupportedAPI)
from gridfs.grid_file import (GridIn,
GridOut)
from pymongo import (ASCENDING,
DESCENDING)
from pymongo.database import Database
class GridFS(object):
"""An instance of GridFS on top of a single Database.
"""
def __init__(self, database, collection="fs"):
"""Create a new instance of :class:`GridFS`.
Raises :class:`TypeError` if `database` is not an instance of
:class:`~pymongo.database.Database`.
:Parameters:
- `database`: database to use
- `collection` (optional): root collection to use
.. versionadded:: 1.6
The `collection` parameter.
.. mongodoc:: gridfs
"""
if not isinstance(database, Database):
raise TypeError("database must be an instance of Database")
self.__database = database
self.__collection = database[collection]
self.__files = self.__collection.files
self.__chunks = self.__collection.chunks
connection = database.connection
if not hasattr(connection, 'is_primary') or connection.is_primary:
self.__chunks.ensure_index([("files_id", ASCENDING),
("n", ASCENDING)],
unique=True)
def new_file(self, **kwargs):
"""Create a new file in GridFS.
Returns a new :class:`~gridfs.grid_file.GridIn` instance to
which data can be written. Any keyword arguments will be
passed through to :meth:`~gridfs.grid_file.GridIn`.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
:Parameters:
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.6
"""
return GridIn(self.__collection, **kwargs)
def put(self, data, **kwargs):
"""Put data in GridFS as a new file.
Equivalent to doing::
try:
f = new_file(**kwargs)
f.write(data)
finally
f.close()
`data` can be either an instance of :class:`str` (:class:`bytes`
in python 3) or a file-like object providing a :meth:`read` method.
If an `encoding` keyword argument is passed, `data` can also be a
:class:`unicode` (:class:`str` in python 3) instance, which will
be encoded as `encoding` before being written. Any keyword arguments
will be passed through to the created file - see
:meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the
``"_id"`` of the created file.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
:Parameters:
- `data`: data to be written as a file.
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.9
The ability to write :class:`unicode`, if an `encoding` has
been specified as a keyword argument.
.. versionadded:: 1.6
"""
grid_file = GridIn(self.__collection, **kwargs)
# Start a request - necessary if w=0, harmless otherwise
request = self.__collection.database.connection.start_request()
try:
try:
grid_file.write(data)
finally:
grid_file.close()
finally:
# Ensure request is ended even if close() throws error
request.end()
return grid_file._id
def get(self, file_id):
"""Get a file from GridFS by ``"_id"``.
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
which provides a file-like interface for reading.
:Parameters:
- `file_id`: ``"_id"`` of the file to get
.. versionadded:: 1.6
"""
return GridOut(self.__collection, file_id)
def get_version(self, filename=None, version=-1, **kwargs):
"""Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
An index on ``{filename: 1, uploadDate: -1}`` will
automatically be created when this method is called the first
time.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 1.11
`filename` defaults to None;
.. versionadded:: 1.11
Accept keyword arguments to find files by custom metadata.
.. versionadded:: 1.9
"""
connection = self.__database.connection
if not hasattr(connection, 'is_primary') or connection.is_primary:
self.__files.ensure_index([("filename", ASCENDING),
("uploadDate", DESCENDING)])
query = kwargs
if filename is not None:
query["filename"] = filename
cursor = self.__files.find(query)
if version < 0:
skip = abs(version) - 1
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
else:
cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING)
try:
grid_file = cursor.next()
return GridOut(self.__collection, file_document=grid_file)
except StopIteration:
raise NoFile("no version %d for filename %r" % (version, filename))
def get_last_version(self, filename=None, **kwargs):
"""Get the most recent version of a file in GridFS by ``"filename"``
or metadata fields.
Equivalent to calling :meth:`get_version` with the default
`version` (``-1``).
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 1.11
`filename` defaults to None;
.. versionadded:: 1.11
Accept keyword arguments to find files by custom metadata. See
:meth:`get_version`.
.. versionadded:: 1.6
"""
return self.get_version(filename=filename, **kwargs)
# TODO add optional safe mode for chunk removal?
def delete(self, file_id):
"""Delete a file from GridFS by ``"_id"``.
Removes all data belonging to the file with ``"_id"``:
`file_id`.
.. warning:: Any processes/threads reading from the file while
this method is executing will likely see an invalid/corrupt
file. Care should be taken to avoid concurrent reads to a file
while it is being deleted.
.. note:: Deletes of non-existent files are considered successful
since the end result is the same: no file with that _id remains.
:Parameters:
- `file_id`: ``"_id"`` of the file to delete
.. versionadded:: 1.6
"""
self.__files.remove({"_id": file_id},
**self.__files._get_wc_override())
self.__chunks.remove({"files_id": file_id})
def list(self):
"""List the names of all files stored in this instance of
:class:`GridFS`.
.. versionchanged:: 1.6
Removed the `collection` argument.
"""
return self.__files.distinct("filename")
def exists(self, document_or_id=None, **kwargs):
"""Check if a file exists in this instance of :class:`GridFS`.
The file to check for can be specified by the value of its
``_id`` key, or by passing in a query document. A query
document can be passed in as dictionary, or by using keyword
arguments. Thus, the following three calls are equivalent:
>>> fs.exists(file_id)
>>> fs.exists({"_id": file_id})
>>> fs.exists(_id=file_id)
As are the following two calls:
>>> fs.exists({"filename": "mike.txt"})
>>> fs.exists(filename="mike.txt")
And the following two:
>>> fs.exists({"foo": {"$gt": 12}})
>>> fs.exists(foo={"$gt": 12})
Returns ``True`` if a matching file exists, ``False``
otherwise. Calls to :meth:`exists` will not automatically
create appropriate indexes; application developers should be
sure to create indexes if needed and as appropriate.
:Parameters:
- `document_or_id` (optional): query document, or _id of the
document to check for
- `**kwargs` (optional): keyword arguments are used as a
query document, if they're present.
.. versionadded:: 1.8
"""
if kwargs:
return self.__files.find_one(kwargs, ["_id"]) is not None
return self.__files.find_one(document_or_id, ["_id"]) is not None
def open(self, *args, **kwargs):
"""No longer supported.
.. versionchanged:: 1.6
The open method is no longer supported.
"""
raise UnsupportedAPI("The open method is no longer supported.")
def remove(self, *args, **kwargs):
"""No longer supported.
.. versionchanged:: 1.6
The remove method is no longer supported.
"""
raise UnsupportedAPI("The remove method is no longer supported. "
"Please use the delete method instead.")
|
edisonlz/fruit
|
web_project/base/site-packages/gridfs/__init__.py
|
Python
|
apache-2.0
| 11,413 | 0 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.template.defaultfilters import floatformat # noqa
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from horizon import views
from horizon import exceptions
from horizon.utils import csvbase
from oslo_log import log as logging
from openstack_dashboard import api
from openstack_dashboard import usage
LOG = logging.getLogger(__name__)
class GlobalUsageCsvRenderer(csvbase.BaseCsvResponse):
columns = [_("Project Name"), _("VCPUs"), _("RAM (MB)"),
_("Disk (GB)"), _("Usage (Hours)")]
def get_row_data(self):
for u in self.context['usage'].usage_list:
yield (u.project_name or u.tenant_id,
u.vcpus,
u.memory_mb,
u.local_gb,
floatformat(u.vcpu_hours, 2))
class GlobalOverview(usage.UsageView):
table_class = usage.GlobalUsageTable
usage_class = usage.GlobalUsage
template_name = 'admin/overview/usage.html'
csv_response_class = GlobalUsageCsvRenderer
def get_context_data(self, **kwargs):
context = super(GlobalOverview, self).get_context_data(**kwargs)
# #context['monitoring'] = getattr(settings, 'EXTERNAL_MONITORING', [])
# LOG.info("usage =================== %s" % context['usage'].__dict__)
LOG.info("aaaaaaaaaaaaaaaaaaaaaaa =================== %s" % context)
return context
# def get_data(self):
# data = super(GlobalOverview, self).get_data()
# # Pre-fill project names
# try:
# projects, has_more = api.keystone.tenant_list(self.request)
# except Exception:
# projects = []
# exceptions.handle(self.request,
# _('Unable to retrieve project list.'))
# for instance in data:
# project = [t for t in projects if t.id == instance.tenant_id]
# # If we could not get the project name, show the tenant_id with
# # a 'Deleted' identifier instead.
# if project:
# instance.project_name = getattr(project[0], "name", None)
# else:
# deleted = _("Deleted")
# instance.project_name = translation.string_concat(
# instance.tenant_id, " (", deleted, ")")
# return data
|
xuweiliang/Codelibrary
|
openstack_dashboard/dashboards/admin/overview/views.py
|
Python
|
apache-2.0
| 3,126 | 0.00032 |
import os
from unittest import TestCase
from cookiejar.client import CookiejarClient
class ClientTests(TestCase):
maxDiff = None
def test_pagination(self):
index = os.path.join((os.path.dirname(os.path.abspath(__file__))), 'index.1.json')
client = CookiejarClient(index=index)
expected = [
{
u'id': 1,
u'name': u'audreyr/pypackage',
u'url': u'https://github.com/audreyr/cookiecutter-pypackage/archive/fe165c5242cc889db0c58476abde905cecf14dfa.zip',
u'version': u'0.0.1',
u'author': u'Audrey Roy',
u'description': u'Cookiecutter template for a Python package.',
u'checksum': "md5$a79cc0ef3897d14eeb3b5be6a37a5ff8",
u'user': u'audreyr',
},
{
u'id': 2,
u'name': u'sloria/flask',
u'url': u'https://github.com/sloria/cookiecutter-flask/archive/97e835461d31c00e9f16ac79ef3af9aeb13ae84a.zip',
u'version': u'0.0.1',
u'author': u'Steven Loria',
u'description': u'A flask template with Twitter Bootstrap 3, starter templates, and basic registration/authentication.',
u'checksum': "md5$72aa94d5768756231c66d8ce03ca51cc",
u'user': u'sloria',
},
{
u'id': 3,
u'name': u'pydanny/django',
u'url': u'https://github.com/pydanny/cookiecutter-django/archive/172036f8f34b82c29bdc0bb3f31f5b703d0ce8f8.zip',
u'version': u'0.0.1',
u'author': u'Daniel Greenfeld',
u'description': u'A cookiecutter template for creating reusable Django projects quickly.',
u'checksum': "md5$874ce3c00faabde6a11fb3c9d3909649",
u'user': u'pydanny',
}
]
results = client.filter()
res = list(results)
self.assertEqual(len(res), len(expected))
self.assertEqual(res, expected)
def test_get(self):
index = os.path.join((os.path.dirname(os.path.abspath(__file__))), 'index.1.json')
client = CookiejarClient(index=index)
expected = {
u'id': 2,
u'name': u'sloria/flask',
u'url': u'https://github.com/sloria/cookiecutter-flask/archive/97e835461d31c00e9f16ac79ef3af9aeb13ae84a.zip',
u'version': u'0.0.1',
u'author': u'Steven Loria',
u'description': u'A flask template with Twitter Bootstrap 3, starter templates, and basic registration/authentication.',
u'checksum': "md5$72aa94d5768756231c66d8ce03ca51cc",
u'user': u'sloria',
}
client.fetch()
result = client.get('sloria/flask')
self.assertEqual(result, expected)
self.assertRaises(RuntimeError, client.get, 'unexisting_tmeplate')
|
fcurella/cookiejar
|
tests/test_client.py
|
Python
|
mit
| 2,942 | 0.004079 |
'''
Copyright (c) 2015, Salesforce.com, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#-- Load libraries
import datetime
import imp
import os, sys
import getpass
import json
import pytz
import logging
import argparse
from uuid import uuid4
from apscheduler.scheduler import Scheduler
from alerts import Alert
from Empire.creds import CredentialManager
#-- Load configuration
#-- This method may change in the future
import config
#-- remember command line settings
import settings
def set_global_config():
configuration = config.Configuration('config.json')
config.providence_configuration = configuration
return configuration
def set_logging_from_configuration(configuration):
#-- Setup Logging
logging.basicConfig(filename=configuration.get(('logging','filename')),
filemode='w',
level=logging.DEBUG if configuration.get(('logging','loglevel')) == 'debug' else logging.INFO,
format=configuration.get(('logging','stringfmt')),
datefmt=configuration.get(('logging','datefmt')))
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(configuration.get(('logging','formatter')))
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
if __name__ == "__main__":
configuration = set_global_config()
set_logging_from_configuration(configuration)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='Providence Monitor Framework')
parser.add_argument('--tests','-t', help="run plugin tests", action='store_true')
parser.add_argument('--mode', help="specify production for production mode, or anything otherwise. Database will be reset if not in production, and providence will start from the current commit")
parser.add_argument('--p4change', help="specify the p4 change number to debug. Must not be in production mode")
parser.add_argument('--timestamp', help="timestamp in PST to pull commits from, in the format YYYY-MM-DD HH:MM:SS")
args = parser.parse_args()
settings.init(args.mode, args.p4change)
#-- Basic Credentials setup
credentials_file = configuration.get('credentials_file')
credential_key = os.environ.get('CREDENTIAL_KEY')
if credential_key is None:
credential_key = getpass.getpass('Credential Key:')
credential_manager = CredentialManager(credentials_file, credential_key)
config.credential_manager = credential_manager
from models import Base
from db import engine
#-- reset db if not in production or timestamp specified
if not settings.in_production() or args.timestamp:
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
from repos import repotracker
from plugins import RepoWatcher, Plugins
from plugins.base import PluginTestError
#-- Load plugins
loaded_plugins = Plugins(configuration)
if args.tests:
print "======================= Loading plugins ======================="
plugins = loaded_plugins.enabled_plugins()
print "======================= Running Plugin Tests ======================="
for plugin_area in plugins:
for plugin in plugins[plugin_area]:
print "Running test for ", plugin.__module__
try:
plugin.test()
except PluginTestError, pte:
print "Test Failed for ", plugin.__module__
print pte.message
sys.exit(1)
print "======================= Tests Successful ======================="
sys.exit(0)
def run_watchers(startTime=None):
# run watcher plugins
logger.info("Running watchers")
plugins = loaded_plugins.enabled_plugins()
repositories = loaded_plugins.get_repositories(plugins["repositories"])
watchers = loaded_plugins.get_watchers(plugins["watchers"])
tracker = repotracker.RepoTracker();
for repository_name, repository_data in repositories.items():
repository_watchers_by_path = watchers.get(repository_name)
logger.info("In repo %s", repository_name)
if repository_watchers_by_path is None:
continue
for repository_path, repo_watchers in repository_watchers_by_path.items():
repository_db_identifier = repository_name
if repository_path is not None:
repository_db_identifier += "::" + repository_path
def commit_started_callback(repo_commit):
if repo_watchers:
for repo_watcher in repo_watchers:
plugin = repo_watcher.plugin
if hasattr(plugin, 'commit_started'):
plugin.commit_started(repository_name, repo_commit)
def commit_finished_callback(repo_commit):
if repo_watchers:
for repo_watcher in repo_watchers:
plugin = repo_watcher.plugin
if hasattr(plugin, 'commit_finished'):
plugin.commit_finished(repository_name, repo_commit)
if repo_commit.identifier:
new_identifier = repo_commit.identifier
tracker.update_identifier(repository_db_identifier, new_identifier)
def patch_callback(repo_patch):
if repo_watchers:
for repo_watcher in repo_watchers:
plugin = repo_watcher.plugin
if hasattr(plugin, 'patch'):
plugin.patch(repository_name, repo_patch)
last_run_completed = tracker.last_run_completed(repository_db_identifier)
if repository_data.get("check-every-x-minutes"):
now = datetime.datetime.utcnow()
if last_run_completed:
if (now - last_run_completed) < datetime.timedelta(minutes=repository_data.get("check-every-x-minutes")):
pass;
try:
last_identifier = tracker.last_identifier(repository_db_identifier)
if not last_identifier and startTime:
last_identifier = startTime + " PST"
repository_data["source"].processSinceIdentifier(last_identifier,
commit_started_callback=commit_started_callback,
patch_callback=patch_callback,
commit_finished_callback=commit_finished_callback,
path=repository_path);
tracker.update_last_run_completed(repository_db_identifier, datetime.datetime.utcnow())
except Exception, e:
logger.exception("Repo Error: name=%s error=%s", repository_name, e, exc_info=True)
def run_hourly():
# run hourly plugins
hour = datetime.datetime.now().hour
logger.info("Running hourly")
plugins = loaded_plugins.enabled_plugins()
hourly_plugins = plugins.get("hourly")
for plugin in hourly_plugins:
try:
hour = 1
plugin.run_hourly(hour)
except Exception, e:
logger.exception("Exception running hourly: %s" % (plugin))
def run_seven_minutes():
# run seven minute plugins
hour = datetime.datetime.now().hour
logger.info("Running 7 minutes")
plugins = loaded_plugins.enabled_plugins()
seven_minute_plugins = plugins.get("seven_minutes")
for plugin in seven_minute_plugins:
try:
plugin.run_seven_minutes()
except Exception, e:
logger.exception("Exception running 7 minutes: %s" % (plugin))
if args.timestamp:
run_watchers(args.timestamp)
else:
run_watchers()
# run_seven_minutes()
# run_hourly()
sched = Scheduler(standalone=True)
watcher_interval = "*/" + configuration.get(("cron", "watcher_interval"))
sched.add_cron_job(run_watchers, minute=watcher_interval);
# un-comment the following two lines if you'd like to use seven-minute or hourly plugins
# sched.add_cron_job(run_seven_minutes, minute="*/7");
# sched.add_cron_job(run_hourly, hour="*", minute="5");
try:
sched.start()
except (KeyboardInterrupt, SystemExit):
pass
|
SalesforceEng/Providence
|
providence.py
|
Python
|
bsd-3-clause
| 10,230 | 0.006158 |
#!/usr/bin/python
##--Michael duPont (flyinactor91.com)
##--Display increasing values on the seven-segment display
from Adafruit_7Segment import SevenSegment
import time
segment = SevenSegment(address=0x70)
num = 0
rest = float(raw_input('Step: '))
while True:
segment.setColon((num / 10000)%2)
segment.writeDigit(0 , (num / 1000)%10)
segment.writeDigit(1 , (num / 100)%10)
segment.writeDigit(3 , (num / 10)%10)
segment.writeDigit(4 , num % 10)
num += 1
time.sleep(rest)
|
flyinactor91/Raspi-Hardware
|
7Segment/Counter.py
|
Python
|
mit
| 481 | 0.035343 |
import subprocess
import pandas as pd
import tempfile
import os
__all__ = ['runRscript']
def runRscript(Rcmd, inDf=None, outputFiles=0, removeTempFiles=None):
"""Runs an R cmd with option to provide a DataFrame as input and file
as output.
Params
------
Rcmd : str
String containing the R-script to run.
inDf : pd.DataFrame or list of pd.DataFrame's
Data to be passed to the R script via a CSV file.
Object should be referenced in the script as "INPUTDF" or "INPUTDF0" etc. if list
outputFiles : int
Number of output CSV files available for writing by the R-script.
The contents of the file are returned as a pd.DataFrame.
File name should be referenced as "OUTPUTFNX" in the R-script
removeTempFiles : True, False or None
For debugging. If True then the temporary script and data files will
always be removed. If None then they will be removed if there is not an error.
If False they will not be removed.
Returns
-------
stdout : str
Output of the R-script at the terminal (including stderr)
output : pd.DataFrame or list of pd.DataFrames
Optionally, the contents of CSV file(s) written by the R-script as a pd.DataFrame"""
"""Write data to a tempfile if required"""
if not inDf is None:
if not type(inDf) is list:
inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput-', text=True)
readCmd = 'INPUTDF <- read.csv("%s")\n' % inputFn
Rcmd = readCmd + Rcmd
os.close(inputH)
inDf.to_csv(inputFn)
else:
inputFilenames = []
for i, idf in enumerate(inDf):
inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput%d-' % i, text=True)
readCmd = 'INPUTDF%d <- read.csv("%s")\n' % (i, inputFn)
Rcmd = readCmd + Rcmd
os.close(inputH)
idf.to_csv(inputFn)
inputFilenames.append(inputFn)
"""Set up an output file if required"""
outFn = []
for outi in range(outputFiles):
outputH, outputFn = tempfile.mkstemp(suffix='.txt', prefix='tmp-Routput-', text=True)
outCmd = 'OUTPUTFN%d <- "%s"\n' % (outi, outputFn)
Rcmd = outCmd + Rcmd
outFn.append(outputFn)
os.close(outputH)
"""Write script to tempfile"""
scriptH, scriptFn = tempfile.mkstemp(suffix='.R', prefix='tmp-Rscript-', text=True)
with open(scriptFn, 'w') as fh:
fh.write(Rcmd)
os.close(scriptH)
"""Run the R script and collect output"""
try:
cmdList = ['Rscript', '--vanilla', scriptFn]
res = subprocess.check_output(cmdList, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
res = bytes('STDOUT:\n%s\nSTDERR:\n%s' % (e.stdout, e.stderr), 'utf-8')
print('R process returned an error')
if removeTempFiles is None:
print('Leaving tempfiles for debugging.')
print(' '.join(cmdList))
if not inDf is None:
print(inputFn)
for outputFn in outFn:
print(outputFn)
removeTempFiles = False
"""Read the ouptfile if required"""
outDf = []
for outputFn in outFn:
try:
tmp = pd.read_csv(outputFn)
outDf.append(tmp)
except:
print('Cannot read output CSV: reading as text (%s)' % outputFn)
with open(outputFn, 'r') as fh:
tmp = fh.read()
if len(tmp) == 0:
print('Output file is empty! (%s)' % outputFn)
tmp = None
outDf.append(tmp)
# outDf = [pd.read_csv(outputFn) for outputFn in outFn]
if len(outDf) == 0:
outDf = None
elif len(outDf) == 1:
outDf = outDf[0]
"""Cleanup the temporary files"""
if removeTempFiles is None or removeTempFiles:
os.remove(scriptFn)
if not inDf is None:
if not type(inDf) is list:
os.remove(inputFn)
else:
for inputFn in inputFilenames:
os.remove(inputFn)
else:
print('Leaving tempfiles for debugging.')
print(' '.join(cmdList))
if not inDf is None:
print(inputFn)
for outputFn in outFn:
print(outputFn)
if outputFiles == 0:
return res.decode('utf-8')
else:
return res.decode('utf-8'), outDf
def _test_simple():
Rcmd = """ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
anova(lm.D9)
summary(lm.D90)"""
res = runRscript(Rcmd)
print(res)
def _test_io():
ctrl = [4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14]
trt = [4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69]
inDf = pd.DataFrame({'weight':ctrl + trt,
'group': ['Ctl']*len(ctrl) + ['Trt']*len(trt)})
Rcmd = """print(head(INPUTDF))
lm.D9 <- lm(weight ~ group, data=INPUTDF)
lm.D90 <- lm(weight ~ group - 1, data=INPUTDF) # omitting intercept
anova(lm.D9)
summary(lm.D90)
write.csv(data.frame(summary(lm.D90)$coefficients), OUTPUTFN)
"""
res, outputFile = runRscript(Rcmd, inDf=inDf, outputFiles=1)
print(res)
print(outputFile)
|
agartland/utils
|
.ipynb_checkpoints/quickr-checkpoint.py
|
Python
|
mit
| 5,563 | 0.007011 |
import unittest
from google.appengine.api import mail
from google.appengine.ext import testbed
class MailTestCase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_mail_stub()
self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def testMailSent(self):
mail.send_mail(to='alice@example.com',
subject='This is a test',
sender='bob@example.com',
body='This is a test e-mail')
messages = self.mail_stub.get_sent_messages(to='alice@example.com')
self.assertEqual(1, len(messages))
self.assertEqual('alice@example.com', messages[0].to)
|
ioGrow/iogrowCRM
|
crm/tests/test_mail.py
|
Python
|
agpl-3.0
| 811 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the multi project management functionality.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QFileInfo, QFile, \
QIODevice, QObject
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QMenu, QApplication, QDialog, QToolBar
from Globals import recentNameMultiProject
from E5Gui.E5Action import E5Action, createActionGroup
from E5Gui import E5FileDialog, E5MessageBox
import UI.PixmapCache
import Preferences
import Utilities
class MultiProject(QObject):
"""
Class implementing the project management functionality.
@signal dirty(bool) emitted when the dirty state changes
@signal newMultiProject() emitted after a new multi project was generated
@signal multiProjectOpened() emitted after a multi project file was read
@signal multiProjectClosed() emitted after a multi project was closed
@signal multiProjectPropertiesChanged() emitted after the multi project
properties were changed
@signal showMenu(string, QMenu) emitted when a menu is about to be shown.
The name of the menu and a reference to the menu are given.
@signal projectDataChanged(project data dict) emitted after a project entry
has been changed
@signal projectAdded(project data dict) emitted after a project entry
has been added
@signal projectRemoved(project data dict) emitted after a project entry
has been removed
@signal projectOpened(filename) emitted after the project has been opened
"""
dirty = pyqtSignal(bool)
newMultiProject = pyqtSignal()
multiProjectOpened = pyqtSignal()
multiProjectClosed = pyqtSignal()
multiProjectPropertiesChanged = pyqtSignal()
showMenu = pyqtSignal(str, QMenu)
projectDataChanged = pyqtSignal(dict)
projectAdded = pyqtSignal(dict)
projectRemoved = pyqtSignal(dict)
projectOpened = pyqtSignal(str)
def __init__(self, project, parent=None, filename=None):
"""
Constructor
@param project reference to the project object (Project.Project)
@param parent parent widget (usually the ui object) (QWidget)
@param filename optional filename of a multi project file to open
(string)
"""
super(MultiProject, self).__init__(parent)
self.ui = parent
self.projectObject = project
self.__initData()
self.recent = []
self.__loadRecent()
if filename is not None:
self.openMultiProject(filename)
def __initData(self):
"""
Private method to initialize the multi project data part.
"""
self.loaded = False # flag for the loaded status
self.__dirty = False # dirty flag
self.pfile = "" # name of the multi project file
self.ppath = "" # name of the multi project directory
self.description = "" # description of the multi project
self.name = ""
self.opened = False
self.projects = []
# list of project info; each info entry is a dictionary
# 'name' : name of the project
# 'file' : project file name
# 'master' : flag indicating the master
# project
# 'description' : description of the project
# 'category' : name of the group
# 'uid' : unique identifier
self.categories = []
def __loadRecent(self):
"""
Private method to load the recently opened multi project filenames.
"""
self.recent = []
Preferences.Prefs.rsettings.sync()
rp = Preferences.Prefs.rsettings.value(recentNameMultiProject)
if rp is not None:
for f in rp:
if QFileInfo(f).exists():
self.recent.append(f)
def __saveRecent(self):
"""
Private method to save the list of recently opened filenames.
"""
Preferences.Prefs.rsettings.setValue(
recentNameMultiProject, self.recent)
Preferences.Prefs.rsettings.sync()
def getMostRecent(self):
"""
Public method to get the most recently opened multiproject.
@return path of the most recently opened multiproject (string)
"""
if len(self.recent):
return self.recent[0]
else:
return None
def setDirty(self, b):
"""
Public method to set the dirty state.
It emits the signal dirty(int).
@param b dirty state (boolean)
"""
self.__dirty = b
self.saveAct.setEnabled(b)
self.dirty.emit(bool(b))
def isDirty(self):
"""
Public method to return the dirty state.
@return dirty state (boolean)
"""
return self.__dirty
def isOpen(self):
"""
Public method to return the opened state.
@return open state (boolean)
"""
return self.opened
def getMultiProjectPath(self):
"""
Public method to get the multi project path.
@return multi project path (string)
"""
return self.ppath
def getMultiProjectFile(self):
"""
Public method to get the path of the multi project file.
@return path of the multi project file (string)
"""
return self.pfile
def __checkFilesExist(self):
"""
Private method to check, if the files in a list exist.
The project files are checked for existance in the
filesystem. Non existant projects are removed from the list and the
dirty state of the multi project is changed accordingly.
"""
removelist = []
for project in self.projects:
if not os.path.exists(project['file']):
removelist.append(project)
if removelist:
for project in removelist:
self.projects.remove(project)
self.setDirty(True)
def __extractCategories(self):
"""
Private slot to extract the categories used in the project definitions.
"""
for project in self.projects:
if project['category'] and \
project['category'] not in self.categories:
self.categories.append(project['category'])
def getCategories(self):
"""
Public method to get the list of defined categories.
@return list of categories (list of string)
"""
return [c for c in self.categories if c]
def __readMultiProject(self, fn):
"""
Private method to read in a multi project (.e4m, .e5m) file.
@param fn filename of the multi project file to be read (string)
@return flag indicating success
"""
f = QFile(fn)
if f.open(QIODevice.ReadOnly):
from E5XML.MultiProjectReader import MultiProjectReader
reader = MultiProjectReader(f, self)
reader.readXML()
f.close()
if reader.hasError():
return False
else:
QApplication.restoreOverrideCursor()
E5MessageBox.critical(
self.ui,
self.tr("Read multiproject file"),
self.tr(
"<p>The multiproject file <b>{0}</b> could not be"
" read.</p>").format(fn))
return False
self.pfile = os.path.abspath(fn)
self.ppath = os.path.abspath(os.path.dirname(fn))
self.__extractCategories()
# insert filename into list of recently opened multi projects
self.__syncRecent()
self.name = os.path.splitext(os.path.basename(fn))[0]
# check, if the files of the multi project still exist
self.__checkFilesExist()
return True
def __writeMultiProject(self, fn=None):
"""
Private method to save the multi project infos to a multi project file.
@param fn optional filename of the multi project file to be written.
If fn is None, the filename stored in the multi project object
is used. This is the 'save' action. If fn is given, this filename
is used instead of the one in the multi project object. This is the
'save as' action.
@return flag indicating success
"""
if fn is None:
fn = self.pfile
f = QFile(fn)
if f.open(QIODevice.WriteOnly):
from E5XML.MultiProjectWriter import MultiProjectWriter
MultiProjectWriter(
f, self, os.path.splitext(os.path.basename(fn))[0])\
.writeXML()
res = True
else:
E5MessageBox.critical(
self.ui,
self.tr("Save multiproject file"),
self.tr(
"<p>The multiproject file <b>{0}</b> could not be "
"written.</p>").format(fn))
res = False
if res:
self.pfile = os.path.abspath(fn)
self.ppath = os.path.abspath(os.path.dirname(fn))
self.name = os.path.splitext(os.path.basename(fn))[0]
self.setDirty(False)
# insert filename into list of recently opened projects
self.__syncRecent()
return res
@pyqtSlot()
def addProject(self, startdir=None):
"""
Public slot used to add files to the project.
@param startdir start directory for the selection dialog (string)
"""
from .AddProjectDialog import AddProjectDialog
if not startdir:
startdir = self.ppath
if not startdir:
startdir = Preferences.getMultiProject("Workspace")
dlg = AddProjectDialog(self.ui, startdir=startdir,
categories=self.categories)
if dlg.exec_() == QDialog.Accepted:
name, filename, isMaster, description, category, uid = \
dlg.getData()
# step 1: check, if project was already added
for project in self.projects:
if project['file'] == filename:
return
# step 2: check, if master should be changed
if isMaster:
for project in self.projects:
if project['master']:
project['master'] = False
self.projectDataChanged.emit(project)
self.setDirty(True)
break
# step 3: add the project entry
project = {
'name': name,
'file': filename,
'master': isMaster,
'description': description,
'category': category,
'uid': uid,
}
self.projects.append(project)
if category not in self.categories:
self.categories.append(category)
self.projectAdded.emit(project)
self.setDirty(True)
def changeProjectProperties(self, pro):
"""
Public method to change the data of a project entry.
@param pro dictionary with the project data (string)
"""
# step 1: check, if master should be changed
if pro['master']:
for project in self.projects:
if project['master']:
if project['uid'] != pro['uid']:
project['master'] = False
self.projectDataChanged.emit(project)
self.setDirty(True)
break
# step 2: change the entry
for project in self.projects:
if project['uid'] == pro['uid']:
# project UID is not changeable via interface
project['file'] = pro['file']
project['name'] = pro['name']
project['master'] = pro['master']
project['description'] = pro['description']
project['category'] = pro['category']
if project['category'] not in self.categories:
self.categories.append(project['category'])
self.projectDataChanged.emit(project)
self.setDirty(True)
def getProjects(self):
"""
Public method to get all project entries.
@return list of all project entries (list of dictionaries)
"""
return self.projects
def getProject(self, fn):
"""
Public method to get a reference to a project entry.
@param fn filename of the project (string)
@return dictionary containing the project data
"""
for project in self.projects:
if project['file'] == fn:
return project
return None
def removeProject(self, fn):
"""
Public slot to remove a project from the multi project.
@param fn filename of the project to be removed from the multi
project (string)
"""
for project in self.projects:
if project['file'] == fn:
self.projects.remove(project)
self.projectRemoved.emit(project)
self.setDirty(True)
break
def __newMultiProject(self):
"""
Private slot to build a new multi project.
This method displays the new multi project dialog and initializes
the multi project object with the data entered.
"""
if not self.checkDirty():
return
from .PropertiesDialog import PropertiesDialog
dlg = PropertiesDialog(self, True)
if dlg.exec_() == QDialog.Accepted:
self.closeMultiProject()
dlg.storeData()
self.opened = True
self.setDirty(True)
self.closeAct.setEnabled(True)
self.saveasAct.setEnabled(True)
self.addProjectAct.setEnabled(True)
self.propsAct.setEnabled(True)
self.newMultiProject.emit()
def __showProperties(self):
"""
Private slot to display the properties dialog.
"""
from .PropertiesDialog import PropertiesDialog
dlg = PropertiesDialog(self, False)
if dlg.exec_() == QDialog.Accepted:
dlg.storeData()
self.setDirty(True)
self.multiProjectPropertiesChanged.emit()
@pyqtSlot()
@pyqtSlot(str)
def openMultiProject(self, fn=None, openMaster=True):
"""
Public slot to open a multi project.
@param fn optional filename of the multi project file to be
read (string)
@param openMaster flag indicating, that the master project
should be opened depending on the configuration (boolean)
"""
if not self.checkDirty():
return
if fn is None:
fn = E5FileDialog.getOpenFileName(
self.parent(),
self.tr("Open multiproject"),
Preferences.getMultiProject("Workspace") or
Utilities.getHomeDir(),
self.tr("Multiproject Files (*.e5m *.e4m)"))
if fn == "":
fn = None
QApplication.processEvents()
if fn is not None:
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
self.closeMultiProject()
if self.__readMultiProject(fn):
self.opened = True
QApplication.restoreOverrideCursor()
QApplication.processEvents()
self.closeAct.setEnabled(True)
self.saveasAct.setEnabled(True)
self.addProjectAct.setEnabled(True)
self.propsAct.setEnabled(True)
self.multiProjectOpened.emit()
if openMaster and Preferences.getMultiProject(
"OpenMasterAutomatically"):
self.__openMasterProject(False)
else:
QApplication.restoreOverrideCursor()
def saveMultiProject(self):
"""
Public slot to save the current multi project.
@return flag indicating success (boolean)
"""
if self.isDirty():
if len(self.pfile) > 0:
if self.pfile.endswith(".e4m"):
self.pfile = self.pfile.replace(".e4m", ".e5m")
self.__syncRecent()
ok = self.__writeMultiProject()
else:
ok = self.saveMultiProjectAs()
else:
ok = True
return ok
def saveMultiProjectAs(self):
"""
Public slot to save the current multi project to a different file.
@return flag indicating success (boolean)
"""
defaultFilter = self.tr("Multiproject Files (*.e5m)")
if self.ppath:
defaultPath = self.ppath
else:
defaultPath = Preferences.getMultiProject("Workspace") or \
Utilities.getHomeDir()
fn, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(
self.parent(),
self.tr("Save multiproject as"),
defaultPath,
self.tr("Multiproject Files (*.e5m)"),
defaultFilter,
E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))
if fn:
ext = QFileInfo(fn).suffix()
if not ext:
ex = selectedFilter.split("(*")[1].split(")")[0]
if ex:
fn += ex
if QFileInfo(fn).exists():
res = E5MessageBox.yesNo(
self.parent(),
self.tr("Save File"),
self.tr("<p>The file <b>{0}</b> already exists."
" Overwrite it?</p>").format(fn),
icon=E5MessageBox.Warning)
if not res:
return False
self.name = QFileInfo(fn).baseName()
self.__writeMultiProject(fn)
self.multiProjectClosed.emit()
self.multiProjectOpened.emit()
return True
else:
return False
def checkDirty(self):
"""
Public method to check the dirty status and open a message window.
@return flag indicating whether this operation was successful (boolean)
"""
if self.isDirty():
res = E5MessageBox.okToClearData(
self.parent(),
self.tr("Close Multiproject"),
self.tr("The current multiproject has unsaved changes."),
self.saveMultiProject)
if res:
self.setDirty(False)
return res
return True
def closeMultiProject(self):
"""
Public slot to close the current multi project.
@return flag indicating success (boolean)
"""
# save the list of recently opened projects
self.__saveRecent()
if not self.isOpen():
return True
if not self.checkDirty():
return False
# now close the current project, if it belongs to the multi project
pfile = self.projectObject.getProjectFile()
if pfile:
for project in self.projects:
if project['file'] == pfile:
if not self.projectObject.closeProject():
return False
break
self.__initData()
self.closeAct.setEnabled(False)
self.saveasAct.setEnabled(False)
self.saveAct.setEnabled(False)
self.addProjectAct.setEnabled(False)
self.propsAct.setEnabled(False)
self.multiProjectClosed.emit()
return True
def initActions(self):
"""
Public slot to initialize the multi project related actions.
"""
self.actions = []
self.actGrp1 = createActionGroup(self)
act = E5Action(
self.tr('New multiproject'),
UI.PixmapCache.getIcon("multiProjectNew.png"),
self.tr('&New...'), 0, 0,
self.actGrp1, 'multi_project_new')
act.setStatusTip(self.tr('Generate a new multiproject'))
act.setWhatsThis(self.tr(
"""<b>New...</b>"""
"""<p>This opens a dialog for entering the info for a"""
""" new multiproject.</p>"""
))
act.triggered.connect(self.__newMultiProject)
self.actions.append(act)
act = E5Action(
self.tr('Open multiproject'),
UI.PixmapCache.getIcon("multiProjectOpen.png"),
self.tr('&Open...'), 0, 0,
self.actGrp1, 'multi_project_open')
act.setStatusTip(self.tr('Open an existing multiproject'))
act.setWhatsThis(self.tr(
"""<b>Open...</b>"""
"""<p>This opens an existing multiproject.</p>"""
))
act.triggered.connect(self.openMultiProject)
self.actions.append(act)
self.closeAct = E5Action(
self.tr('Close multiproject'),
UI.PixmapCache.getIcon("multiProjectClose.png"),
self.tr('&Close'), 0, 0, self, 'multi_project_close')
self.closeAct.setStatusTip(self.tr(
'Close the current multiproject'))
self.closeAct.setWhatsThis(self.tr(
"""<b>Close</b>"""
"""<p>This closes the current multiproject.</p>"""
))
self.closeAct.triggered.connect(self.closeMultiProject)
self.actions.append(self.closeAct)
self.saveAct = E5Action(
self.tr('Save multiproject'),
UI.PixmapCache.getIcon("multiProjectSave.png"),
self.tr('&Save'), 0, 0, self, 'multi_project_save')
self.saveAct.setStatusTip(self.tr('Save the current multiproject'))
self.saveAct.setWhatsThis(self.tr(
"""<b>Save</b>"""
"""<p>This saves the current multiproject.</p>"""
))
self.saveAct.triggered.connect(self.saveMultiProject)
self.actions.append(self.saveAct)
self.saveasAct = E5Action(
self.tr('Save multiproject as'),
UI.PixmapCache.getIcon("multiProjectSaveAs.png"),
self.tr('Save &as...'), 0, 0, self,
'multi_project_save_as')
self.saveasAct.setStatusTip(self.tr(
'Save the current multiproject to a new file'))
self.saveasAct.setWhatsThis(self.tr(
"""<b>Save as</b>"""
"""<p>This saves the current multiproject to a new file.</p>"""
))
self.saveasAct.triggered.connect(self.saveMultiProjectAs)
self.actions.append(self.saveasAct)
self.addProjectAct = E5Action(
self.tr('Add project to multiproject'),
UI.PixmapCache.getIcon("fileProject.png"),
self.tr('Add &project...'), 0, 0,
self, 'multi_project_add_project')
self.addProjectAct.setStatusTip(self.tr(
'Add a project to the current multiproject'))
self.addProjectAct.setWhatsThis(self.tr(
"""<b>Add project...</b>"""
"""<p>This opens a dialog for adding a project"""
""" to the current multiproject.</p>"""
))
self.addProjectAct.triggered.connect(self.addProject)
self.actions.append(self.addProjectAct)
self.propsAct = E5Action(
self.tr('Multiproject properties'),
UI.PixmapCache.getIcon("multiProjectProps.png"),
self.tr('&Properties...'), 0, 0, self,
'multi_project_properties')
self.propsAct.setStatusTip(self.tr(
'Show the multiproject properties'))
self.propsAct.setWhatsThis(self.tr(
"""<b>Properties...</b>"""
"""<p>This shows a dialog to edit the multiproject"""
""" properties.</p>"""
))
self.propsAct.triggered.connect(self.__showProperties)
self.actions.append(self.propsAct)
self.closeAct.setEnabled(False)
self.saveAct.setEnabled(False)
self.saveasAct.setEnabled(False)
self.addProjectAct.setEnabled(False)
self.propsAct.setEnabled(False)
def initMenu(self):
"""
Public slot to initialize the multi project menu.
@return the menu generated (QMenu)
"""
menu = QMenu(self.tr('&Multiproject'), self.parent())
self.recentMenu = QMenu(self.tr('Open &Recent Multiprojects'),
menu)
self.__menus = {
"Main": menu,
"Recent": self.recentMenu,
}
# connect the aboutToShow signals
self.recentMenu.aboutToShow.connect(self.__showContextMenuRecent)
self.recentMenu.triggered.connect(self.__openRecent)
menu.aboutToShow.connect(self.__showMenu)
# build the main menu
menu.setTearOffEnabled(True)
menu.addActions(self.actGrp1.actions())
self.menuRecentAct = menu.addMenu(self.recentMenu)
menu.addSeparator()
menu.addAction(self.closeAct)
menu.addSeparator()
menu.addAction(self.saveAct)
menu.addAction(self.saveasAct)
menu.addSeparator()
menu.addAction(self.addProjectAct)
menu.addSeparator()
menu.addAction(self.propsAct)
self.menu = menu
return menu
def initToolbar(self, toolbarManager):
"""
Public slot to initialize the multi project toolbar.
@param toolbarManager reference to a toolbar manager object
(E5ToolBarManager)
@return the toolbar generated (QToolBar)
"""
tb = QToolBar(self.tr("Multiproject"), self.ui)
tb.setIconSize(UI.Config.ToolBarIconSize)
tb.setObjectName("MultiProjectToolbar")
tb.setToolTip(self.tr('Multiproject'))
tb.addActions(self.actGrp1.actions())
tb.addAction(self.closeAct)
tb.addSeparator()
tb.addAction(self.saveAct)
tb.addAction(self.saveasAct)
toolbarManager.addToolBar(tb, tb.windowTitle())
toolbarManager.addAction(self.addProjectAct, tb.windowTitle())
toolbarManager.addAction(self.propsAct, tb.windowTitle())
return tb
def __showMenu(self):
"""
Private method to set up the multi project menu.
"""
self.menuRecentAct.setEnabled(len(self.recent) > 0)
self.showMenu.emit("Main", self.__menus["Main"])
def __syncRecent(self):
"""
Private method to synchronize the list of recently opened multi
projects with the central store.
"""
for recent in self.recent[:]:
if Utilities.samepath(self.pfile, recent):
self.recent.remove(recent)
self.recent.insert(0, self.pfile)
maxRecent = Preferences.getProject("RecentNumber")
if len(self.recent) > maxRecent:
self.recent = self.recent[:maxRecent]
self.__saveRecent()
def __showContextMenuRecent(self):
"""
Private method to set up the recent multi projects menu.
"""
self.__loadRecent()
self.recentMenu.clear()
idx = 1
for rp in self.recent:
if idx < 10:
formatStr = '&{0:d}. {1}'
else:
formatStr = '{0:d}. {1}'
act = self.recentMenu.addAction(
formatStr.format(
idx,
Utilities.compactPath(rp, self.ui.maxMenuFilePathLen)))
act.setData(rp)
act.setEnabled(QFileInfo(rp).exists())
idx += 1
self.recentMenu.addSeparator()
self.recentMenu.addAction(self.tr('&Clear'), self.__clearRecent)
def __openRecent(self, act):
"""
Private method to open a multi project from the list of rencently
opened multi projects.
@param act reference to the action that triggered (QAction)
"""
file = act.data()
if file:
self.openMultiProject(file)
def __clearRecent(self):
"""
Private method to clear the recent multi projects menu.
"""
self.recent = []
def getActions(self):
"""
Public method to get a list of all actions.
@return list of all actions (list of E5Action)
"""
return self.actions[:]
def addE5Actions(self, actions):
"""
Public method to add actions to the list of actions.
@param actions list of actions (list of E5Action)
"""
self.actions.extend(actions)
def removeE5Actions(self, actions):
"""
Public method to remove actions from the list of actions.
@param actions list of actions (list of E5Action)
"""
for act in actions:
try:
self.actions.remove(act)
except ValueError:
pass
def getMenu(self, menuName):
"""
Public method to get a reference to the main menu or a submenu.
@param menuName name of the menu (string)
@return reference to the requested menu (QMenu) or None
"""
try:
return self.__menus[menuName]
except KeyError:
return None
def openProject(self, filename):
"""
Public slot to open a project.
@param filename filename of the project file (string)
"""
self.projectObject.openProject(filename)
self.projectOpened.emit(filename)
def __openMasterProject(self, reopen=True):
"""
Private slot to open the master project.
@param reopen flag indicating, that the master project should be
reopened, if it has been opened already (boolean)
"""
for project in self.projects:
if project['master']:
if reopen or \
not self.projectObject.isOpen() or \
self.projectObject.getProjectFile() != project['file']:
self.openProject(project['file'])
return
def getMasterProjectFile(self):
"""
Public method to get the filename of the master project.
@return name of the master project file (string)
"""
for project in self.projects:
if project['master']:
return project['file']
return None
def getDependantProjectFiles(self):
"""
Public method to get the filenames of the dependent projects.
@return names of the dependent project files (list of strings)
"""
files = []
for project in self.projects:
if not project['master']:
files.append(project['file'])
return files
|
davy39/eric
|
MultiProject/MultiProject.py
|
Python
|
gpl-3.0
| 32,000 | 0.004 |
import json, time
import glob
import timer
from utils import *
class Item(object):
__slots__ = ( 'type', 'cbid', 'dbid', 'wfid', 'step', 'when', 'meta', 'body', 'repo', 'path', 'size', 'id' )
def __init__( self, obj={}, **kwargs ):
# Three situations: Network streamed data (obj), Database data (obj), Newly created data (kwargs)
kwargs.update( obj )
self.type = kwargs['type']
self.cbid = kwargs['cbid'] if 'cbid' in kwargs else None
self.dbid = kwargs['dbid'] if 'dbid' in kwargs else None
self.wfid = kwargs['wfid'] if 'wfid' in kwargs else glob.workflow_id
self.step = kwargs['step'] if 'step' in kwargs else glob.workflow_step
self.when = kwargs['when'] if 'when' in kwargs else time.time()
self.id = kwargs['id'] if 'id' in kwargs else None
if 'meta' in kwargs:
if isinstance( kwargs['meta'], basestring ):
self.meta = json.loads( kwargs['meta'] )
else:
self.meta = kwargs['meta']
else:
self.meta = {}
self.body = None
self.repo = None
self.path = None
self.size = 0
if 'body' in kwargs and kwargs['body'] != None:
if isinstance( kwargs['body'], basestring ):
self.body = json.loads( kwargs['body'] )
tmp_str = kwargs['body']
else:
self.body = kwargs['body']
tmp_str = json.dumps( kwargs['body'], sort_keys=True )
self.size = len(tmp_str)
if not self.cbid:
self.cbid = hashstring(tmp_str)
elif 'repo' in kwargs and kwargs['repo'] != None:
self.repo = kwargs['repo']
if not self.cbid:
log.error("No cbid for an object in a remote repository. There is no way to obtain it.")
elif 'path' in kwargs and kwargs['path'] != None:
self.path = kwargs['path']
if not self.cbid:
if 'new_path' in kwargs:
self.cbid, self.size = hashfile_copy(self.path, kwargs['new_path'])
self.path = kwargs['new_path']
else:
self.cbid, self.size = hashfile(self.path)
elif 'size' in kwargs and kwargs['size'] != None:
self.size = int(kwargs['size'])
elif os.path.isfile(self.path):
statinfo = os.stat(self.path)
self.size = statinfo.st_size
elif os.path.isfile(glob.data_file_directory+self.path):
statinfo = os.stat(glob.data_file_directory+self.path)
self.size = statinfo.st_size
elif os.path.isfile(glob.cache_file_directory+self.path):
statinfo = os.stat(glob.cache_file_directory+self.path)
self.size = statinfo.st_size
else:
print "Can't find the file!!!"
def __str__( self ):
obj = dict(type=self.type, cbid=self.cbid, when=self.when)
if self.dbid: obj['dbid'] = self.dbid
if self.wfid: obj['wfid'] = self.wfid
if self.step: obj['step'] = self.step
if self.meta:
if isinstance( self.meta, basestring ):
#obj['meta'] = self.meta
obj['meta'] = json.loads(self.meta)
else:
#obj['meta'] = json.dumps(self.meta, sort_keys=True)
obj['meta'] = self.meta
if self.size:
obj['size'] = self.size
if self.body:
if isinstance( self.body, basestring ):
#obj['body'] = self.body[0:20]+' ... '+self.body[-20:]
obj['body'] = json.loads(self.body)
else:
#obj['body'] = json.dumps(self.body, sort_keys=True)
obj['body'] = self.body
elif self.repo:
obj['repo'] = self.repo
elif self.path:
obj['path'] = self.path
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')) + "\n"
def stream( self, active_stream ):
obj = dict(type=self.type, cbid=self.cbid, when=self.when)
summary = ''
if self.dbid: obj['dbid'] = self.dbid
if self.wfid: obj['wfid'] = self.wfid
if self.step: obj['step'] = self.step
if self.meta: obj['meta'] = self.meta
if self.size: obj['size'] = self.size
if self.body:
obj['body'] = self.body
active_stream.write( json.dumps(obj, sort_keys=True) + "\n" )
elif self.repo:
obj['repo'] = self.repo
active_stream.write( json.dumps(obj, sort_keys=True) + "\n" )
elif self.path:
active_stream.write( json.dumps(obj, sort_keys=True) + "\n" )
summary = self.stream_content( active_stream )
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')) + "\n" + summary + "\n"
def export( self ):
obj = dict(type=self.type, cbid=self.cbid, when=self.when)
if self.dbid: obj['dbid'] = self.dbid
if self.wfid: obj['wfid'] = self.wfid
if self.step: obj['step'] = self.step
if self.meta: obj['meta'] = self.meta
if self.size: obj['size'] = self.size
if self.body:
obj['body'] = self.body
elif self.repo:
obj['repo'] = self.repo
elif self.path:
obj['path'] = self.path
return obj
def dump( self ):
return json.dumps(self.export(), sort_keys=True)
def stream_content( self, active_stream ):
if self.body:
if isinstance( self.body, basestring ):
wstr = self.body
else:
wstr = json.dumps(self.body, sort_keys=True)
active_stream.write( wstr )
if len(wstr)>45:
return wstr[0:20] + ' ... ' + wstr[-20:]
else:
return wstr
elif self.repo:
print "Stream from repository not implemented..."
return None
elif self.path:
if self.type=='temp':
pathname = glob.cache_file_directory + self.path
else:
pathname = glob.data_file_directory + self.path
with open( pathname, 'r' ) as f:
buf = f.read(1024*1024)
lastbuf = ''
summary = buf[0:20] + ' ... ' if len(buf)>20 else buf
while buf:
active_stream.write( buf )
lastbuf = buf
buf = f.read(1024*1024)
summary = summary + buf[-20:] if len(lastbuf)>20 else summary + buf
return summary
def sqlite3_insert( self ):
keys = ['type','cbid','"when"']
vals = [self.type, self.cbid, self.when]
if self.id:
keys.append( 'id' )
vals.append( self.id )
if self.dbid:
keys.append( 'dbid' )
vals.append( self.dbid )
if self.wfid:
keys.append( 'wfid' )
vals.append( self.wfid )
if self.step:
keys.append( 'step' )
vals.append( self.step )
if self.meta:
keys.append( 'meta' )
vals.append( json.dumps(self.meta, sort_keys=True) )
if self.size:
keys.append( 'size' )
vals.append( self.size )
if self.body:
keys.append( 'body' )
vals.append( json.dumps(self.body, sort_keys=True) )
elif self.repo:
keys.append( 'repo' )
vals.append( self.repo )
elif self.path:
keys.append( 'path' )
vals.append( self.path )
qs = ['?'] * len(keys)
ins = 'INSERT INTO items (%s) VALUES (%s);' % (','.join(keys), ','.join(qs))
return ins, tuple(vals)
#def __add__( self, other ):
# return str(self) + other
#def __radd__( self, other ):
# return other + str(self)
def __eq__(self, other):
return self.cbid == other.cbid
def __ne__(self, other):
return not self.__eq__(other)
def __len__( self ):
return len(str(self))
|
nkremerh/cctools
|
prune/src/prune/class_item.py
|
Python
|
gpl-2.0
| 6,649 | 0.048428 |
"""
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
from __future__ import unicode_literals
import datetime
import os
import tempfile
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six.moves import range
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Person(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
@python_2_unicode_compatible
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save(*args, **kwargs)
def __str__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
@python_2_unicode_compatible
class Publication(models.Model):
title = models.CharField(max_length=30)
date_published = models.DateField()
def __str__(self):
return self.title
class Author(models.Model):
publication = models.OneToOneField(Publication, null=True, blank=True)
full_name = models.CharField(max_length=255)
class Author1(models.Model):
publication = models.OneToOneField(Publication, null=False)
full_name = models.CharField(max_length=255)
@python_2_unicode_compatible
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
class Document(models.Model):
myfile = models.FileField(upload_to='unused', blank=True)
@python_2_unicode_compatible
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __str__(self):
return self.description
class CustomFileField(models.FileField):
def save_form_data(self, instance, data):
been_here = getattr(self, 'been_saved', False)
assert not been_here, "save_form_data called more than once"
setattr(self, 'been_saved', True)
class CustomFF(models.Model):
f = CustomFileField(upload_to='unused', blank=True)
class FilePathModel(models.Model):
path = models.FilePathField(path=os.path.dirname(upath(__file__)), match=".*\.py$", blank=True)
try:
from PIL import Image # NOQA: detect if Pillow is installed
test_images = True
@python_2_unicode_compatible
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
@python_2_unicode_compatible
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
except ImportError:
test_images = False
@python_2_unicode_compatible
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __str__(self):
return self.field
class Homepage(models.Model):
url = models.URLField()
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class Triple(models.Model):
left = models.IntegerField()
middle = models.IntegerField()
right = models.IntegerField()
class Meta:
unique_together = (('left', 'middle'), ('middle', 'right'))
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
@python_2_unicode_compatible
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __str__(self):
return self.key
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.title
@python_2_unicode_compatible
class DateTimePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateTimeField(editable=False)
def __str__(self):
return self.title
class DerivedPost(Post):
pass
@python_2_unicode_compatible
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __str__(self):
return six.text_type(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
@python_2_unicode_compatible
class Colour(models.Model):
name = models.CharField(max_length=50)
def __iter__(self):
for number in range(5):
yield number
def __str__(self):
return self.name
class ColourfulItem(models.Model):
name = models.CharField(max_length=50)
colours = models.ManyToManyField(Colour)
class CustomErrorMessage(models.Model):
name1 = models.CharField(max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'})
name2 = models.CharField(max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'})
def clean(self):
if self.name1 == 'FORBIDDEN_VALUE':
raise ValidationError({'name1': [ValidationError('Model.clean() error messages.')]})
elif self.name1 == 'FORBIDDEN_VALUE2':
raise ValidationError({'name1': 'Model.clean() error messages (simpler syntax).'})
elif self.name1 == 'GLOBAL_ERROR':
raise ValidationError("Global error message.")
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
class StumpJoke(models.Model):
most_recently_fooled = models.ForeignKey(Character, limit_choices_to=today_callable_dict, related_name="+")
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
# Model for #13776
class Student(models.Model):
character = models.ForeignKey(Character)
study = models.CharField(max_length=30)
# Model for #639
class Photo(models.Model):
title = models.CharField(max_length=30)
image = models.FileField(storage=temp_storage, upload_to='tests')
# Support code for the tests; this keeps track of how many times save()
# gets called on each instance.
def __init__(self, *args, **kwargs):
super(Photo, self).__init__(*args, **kwargs)
self._savecount = 0
def save(self, force_insert=False, force_update=False):
super(Photo, self).save(force_insert, force_update)
self._savecount += 1
|
h4r5h1t/django-hauthy
|
tests/model_forms/models.py
|
Python
|
bsd-3-clause
| 12,503 | 0.00192 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from pypom import Region
from selenium.webdriver.common.by import By
from pages.base_page import CrashStatsBasePage
class CrashReport(CrashStatsBasePage):
_reports_tab_locator = (By.ID, 'reports')
_results_count_locator = (By.CSS_SELECTOR, 'span.totalItems')
_reports_row_locator = (By.CSS_SELECTOR, '#reports-list tbody tr')
_report_tab_button_locator = (By.CSS_SELECTOR, '#panels-nav .reports')
_summary_table_locator = (By.CSS_SELECTOR, '.content')
def wait_for_page_to_load(self):
super(CrashReport, self).wait_for_page_to_load()
self.wait.until(lambda s: self.is_element_displayed(*self._summary_table_locator))
return self
@property
def reports(self):
return [self.Report(self, el) for el in self.find_elements(*self._reports_row_locator)]
@property
def results_count_total(self):
return int(self.find_element(*self._results_count_locator).text.replace(",", ""))
def click_reports_tab(self):
self.find_element(*self._report_tab_button_locator).click()
self.wait.until(lambda s: len(self.reports))
class Report(Region):
_product_locator = (By.CSS_SELECTOR, 'td:nth-of-type(3)')
_version_locator = (By.CSS_SELECTOR, 'td:nth-of-type(4)')
_report_date_link_locator = (By.CSS_SELECTOR, '#reports-list a.external-link')
@property
def product(self):
return self.find_element(*self._product_locator).text
@property
def version(self):
return self.find_element(*self._version_locator).text
def click_report_date(self):
self.find_element(*self._report_date_link_locator).click()
from uuid_report import UUIDReport
return UUIDReport(self.selenium, self.page.base_url).wait_for_page_to_load()
|
Tayamarn/socorro
|
e2e-tests/pages/crash_report_page.py
|
Python
|
mpl-2.0
| 2,049 | 0.00244 |
'''
Copyright 2015 - 2018 University College London.
This file is part of Nammu.
Nammu is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Nammu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Nammu. If not, see <http://www.gnu.org/licenses/>.
'''
import filecmp
import os
import yaml
from python.nammu.controller.NammuController import NammuController
from ..utils import get_home_env_var, update_yaml_config
def test_update_yaml_config():
"""
Ensure that, upon updating yaml settings files from jar, a user's
default project settings are not overwritten.
"""
pth = "resources/test/"
local_file = os.path.join(pth, "user_settings.yaml")
jar_file = os.path.join(pth, "jar_settings.yaml")
new_config = update_yaml_config(path_to_jar=jar_file,
yaml_path=local_file,
path_to_config=local_file,
test_mode=True)
with open(local_file, "r") as f:
orig_config = yaml.safe_load(f)
# Make sure the user (project) setting is not overwritten
assert (new_config["projects"]["default"] ==
orig_config["projects"]["default"])
def test_settings_copied_correctly(monkeypatch, tmpdir):
"""
Check that the settings are initialised correctly at first launch.
More specifically, this test ensures that, if the user starts Nammu without
already having any configuration files, then local configuration files with
the correct content will be created, without affecting the original files.
"""
# Mock the user's home directory
home_env_var = get_home_env_var() # will vary depending on OS
monkeypatch.setitem(os.environ, home_env_var, str(tmpdir))
assert os.listdir(str(tmpdir)) == [] # sanity check!
NammuController() # start up Nammu, but don't do anything with it
settings_dir = os.path.join(os.environ[home_env_var], '.nammu')
for filename in ['settings.yaml', 'logging.yaml']:
target_file = os.path.join(settings_dir, filename)
original_file = os.path.join('resources', 'config', filename)
assert os.path.isfile(target_file)
assert filecmp.cmp(target_file, original_file)
# Check that the original config files have not been emptied (see #347)
with open(original_file, 'r') as orig:
assert orig.readlines()
|
oracc/nammu
|
python/nammu/test/test_yaml_update.py
|
Python
|
gpl-3.0
| 2,813 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Eike Frost <ei@kefro.st>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: keycloak_client
short_description: Allows administration of Keycloak clients via Keycloak API
version_added: "2.5"
description:
- This module allows the administration of Keycloak clients via the Keycloak REST API. It
requires access to the REST API via OpenID Connect; the user connecting and the client being
used must have the requisite access rights. In a default Keycloak installation, admin-cli
and an admin user would work, as would a separate client definition with the scope tailored
to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the
Keycloak API and its documentation at U(http://www.keycloak.org/docs-api/3.3/rest-api/).
Aliases are provided so camelCased versions can be used as well.
- The Keycloak API does not always sanity check inputs e.g. you can set
SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
If you do not specify a setting, usually a sensible default is chosen.
options:
state:
description:
- State of the client
- On C(present), the client will be created (or updated if it exists already).
- On C(absent), the client will be removed if it exists
choices: ['present', 'absent']
default: 'present'
realm:
description:
- The realm to create the client in.
client_id:
description:
- Client id of client to be worked on. This is usually an alphanumeric name chosen by
you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
This is 'clientId' in the Keycloak REST API.
aliases:
- clientId
id:
description:
- Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
is required. If you specify both, this takes precedence.
name:
description:
- Name of the client (this is not the same as I(client_id))
description:
description:
- Description of the client in Keycloak
root_url:
description:
- Root URL appended to relative URLs for this client
This is 'rootUrl' in the Keycloak REST API.
aliases:
- rootUrl
admin_url:
description:
- URL to the admin interface of the client
This is 'adminUrl' in the Keycloak REST API.
aliases:
- adminUrl
base_url:
description:
- Default URL to use when the auth server needs to redirect or link back to the client
This is 'baseUrl' in the Keycloak REST API.
aliases:
- baseUrl
enabled:
description:
- Is this client enabled or not?
type: bool
client_authenticator_type:
description:
- How do clients authenticate with the auth server? Either C(client-secret) or
C(client-jwt) can be chosen. When using C(client-secret), the module parameter
I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
to configure its behavior.
This is 'clientAuthenticatorType' in the Keycloak REST API.
choices: ['client-secret', 'client-jwt']
aliases:
- clientAuthenticatorType
secret:
description:
- When using I(client_authenticator_type) C(client-secret) (the default), you can
specify a secret here (otherwise one will be generated if it does not exit). If
changing this secret, the module will not register a change currently (but the
changed secret will be saved).
registration_access_token:
description:
- The registration access token provides access for clients to the client registration
service.
This is 'registrationAccessToken' in the Keycloak REST API.
aliases:
- registrationAccessToken
default_roles:
description:
- list of default roles for this client. If the client roles referenced do not exist
yet, they will be created.
This is 'defaultRoles' in the Keycloak REST API.
aliases:
- defaultRoles
redirect_uris:
description:
- Acceptable redirect URIs for this client.
This is 'redirectUris' in the Keycloak REST API.
aliases:
- redirectUris
web_origins:
description:
- List of allowed CORS origins.
This is 'webOrigins' in the Keycloak REST API.
aliases:
- webOrigins
not_before:
description:
- Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
This is 'notBefore' in the Keycloak REST API.
aliases:
- notBefore
bearer_only:
description:
- The access type of this client is bearer-only.
This is 'bearerOnly' in the Keycloak REST API.
aliases:
- bearerOnly
type: bool
consent_required:
description:
- If enabled, users have to consent to client access.
This is 'consentRequired' in the Keycloak REST API.
aliases:
- consentRequired
type: bool
standard_flow_enabled:
description:
- Enable standard flow for this client or not (OpenID connect).
This is 'standardFlowEnabled' in the Keycloak REST API.
aliases:
- standardFlowEnabled
type: bool
implicit_flow_enabled:
description:
- Enable implicit flow for this client or not (OpenID connect).
This is 'implicitFlowEnabled' in the Keycloak REST API.
aliases:
- implicitFlowEnabled
type: bool
direct_access_grants_enabled:
description:
- Are direct access grants enabled for this client or not (OpenID connect).
This is 'directAccessGrantsEnabled' in the Keycloak REST API.
aliases:
- directAccessGrantsEnabled
type: bool
service_accounts_enabled:
description:
- Are service accounts enabled for this client or not (OpenID connect).
This is 'serviceAccountsEnabled' in the Keycloak REST API.
aliases:
- serviceAccountsEnabled
type: bool
authorization_services_enabled:
description:
- Are authorization services enabled for this client or not (OpenID connect).
This is 'authorizationServicesEnabled' in the Keycloak REST API.
aliases:
- authorizationServicesEnabled
type: bool
public_client:
description:
- Is the access type for this client public or not.
This is 'publicClient' in the Keycloak REST API.
aliases:
- publicClient
type: bool
frontchannel_logout:
description:
- Is frontchannel logout enabled for this client or not.
This is 'frontchannelLogout' in the Keycloak REST API.
aliases:
- frontchannelLogout
type: bool
protocol:
description:
- Type of client (either C(openid-connect) or C(saml).
choices: ['openid-connect', 'saml']
full_scope_allowed:
description:
- Is the "Full Scope Allowed" feature set for this client or not.
This is 'fullScopeAllowed' in the Keycloak REST API.
aliases:
- fullScopeAllowed
type: bool
node_re_registration_timeout:
description:
- Cluster node re-registration timeout for this client.
This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
aliases:
- nodeReRegistrationTimeout
registered_nodes:
description:
- dict of registered cluster nodes (with C(nodename) as the key and last registration
time as the value).
This is 'registeredNodes' in the Keycloak REST API.
aliases:
- registeredNodes
client_template:
description:
- Client template to use for this client. If it does not exist this field will silently
be dropped.
This is 'clientTemplate' in the Keycloak REST API.
aliases:
- clientTemplate
use_template_config:
description:
- Whether or not to use configuration from the I(client_template).
This is 'useTemplateConfig' in the Keycloak REST API.
aliases:
- useTemplateConfig
type: bool
use_template_scope:
description:
- Whether or not to use scope configuration from the I(client_template).
This is 'useTemplateScope' in the Keycloak REST API.
aliases:
- useTemplateScope
type: bool
use_template_mappers:
description:
- Whether or not to use mapper configuration from the I(client_template).
This is 'useTemplateMappers' in the Keycloak REST API.
aliases:
- useTemplateMappers
type: bool
surrogate_auth_required:
description:
- Whether or not surrogate auth is required.
This is 'surrogateAuthRequired' in the Keycloak REST API.
aliases:
- surrogateAuthRequired
type: bool
authorization_settings:
description:
- a data structure defining the authorization settings for this client. For reference,
please see the Keycloak API docs at U(http://www.keycloak.org/docs-api/3.3/rest-api/index.html#_resourceserverrepresentation).
This is 'authorizationSettings' in the Keycloak REST API.
aliases:
- authorizationSettings
protocol_mappers:
description:
- a list of dicts defining protocol mappers for this client.
This is 'protocolMappers' in the Keycloak REST API.
aliases:
- protocolMappers
suboptions:
consentRequired:
description:
- Specifies whether a user needs to provide consent to a client for this mapper to be active.
consentText:
description:
- The human-readable name of the consent the user is presented to accept.
id:
description:
- Usually a UUID specifying the internal ID of this protocol mapper instance.
name:
description:
- The name of this protocol mapper.
protocol:
description:
- This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper
is active.
choices: ['openid-connect', 'saml']
protocolMapper:
description:
- The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
impossible to provide since this may be extended through SPIs by the user of Keycloak,
by default Keycloak as of 3.4 ships with at least
- C(docker-v2-allow-all-mapper)
- C(oidc-address-mapper)
- C(oidc-full-name-mapper)
- C(oidc-group-membership-mapper)
- C(oidc-hardcoded-claim-mapper)
- C(oidc-hardcoded-role-mapper)
- C(oidc-role-name-mapper)
- C(oidc-script-based-protocol-mapper)
- C(oidc-sha256-pairwise-sub-mapper)
- C(oidc-usermodel-attribute-mapper)
- C(oidc-usermodel-client-role-mapper)
- C(oidc-usermodel-property-mapper)
- C(oidc-usermodel-realm-role-mapper)
- C(oidc-usersessionmodel-note-mapper)
- C(saml-group-membership-mapper)
- C(saml-hardcode-attribute-mapper)
- C(saml-hardcode-role-mapper)
- C(saml-role-list-mapper)
- C(saml-role-name-mapper)
- C(saml-user-attribute-mapper)
- C(saml-user-property-mapper)
- C(saml-user-session-note-mapper)
- An exhaustive list of available mappers on your installation can be obtained on
the admin console by going to Server Info -> Providers and looking under
'protocol-mapper'.
config:
description:
- Dict specifying the configuration options for the protocol mapper; the
contents differ depending on the value of I(protocolMapper) and are not documented
other than by the source of the mappers and its parent class(es). An example is given
below. It is easiest to obtain valid config values by dumping an already-existing
protocol mapper configuration through check-mode in the I(existing) field.
attributes:
description:
- A dict of further attributes for this client. This can contain various configuration
settings; an example is given in the examples section. While an exhaustive list of
permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
API does not validate whether a given option is appropriate for the protocol used; if specified
anyway, Keycloak will simply not use it.
suboptions:
saml.authnstatement:
description:
- For SAML clients, boolean specifying whether or not a statement containing method and timestamp
should be included in the login response.
saml.client.signature:
description:
- For SAML clients, boolean specifying whether a client signature is required and validated.
saml.encrypt:
description:
- Boolean specifying whether SAML assertions should be encrypted with the client's public key.
saml.force.post.binding:
description:
- For SAML clients, boolean specifying whether always to use POST binding for responses.
saml.onetimeuse.condition:
description:
- For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
saml.server.signature:
description:
- Boolean specifying whether SAML documents should be signed by the realm.
saml.server.signature.keyinfo.ext:
description:
- For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
of the signing key id in the SAML Extensions element.
saml.signature.algorithm:
description:
- Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
saml.signing.certificate:
description:
- SAML signing key certificate, base64-encoded.
saml.signing.private.key:
description:
- SAML signing key private key, base64-encoded.
saml_assertion_consumer_url_post:
description:
- SAML POST Binding URL for the client's assertion consumer service (login responses).
saml_assertion_consumer_url_redirect:
description:
- SAML Redirect Binding URL for the client's assertion consumer service (login responses).
saml_force_name_id_format:
description:
- For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
saml_name_id_format:
description:
- For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
saml_signature_canonicalization_method:
description:
- SAML signature canonicalization method. This is one of four values, namely
C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
saml_single_logout_service_url_post:
description:
- SAML POST binding url for the client's single logout service.
saml_single_logout_service_url_redirect:
description:
- SAML redirect binding url for the client's single logout service.
user.info.response.signature.alg:
description:
- For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
request.object.signature.alg:
description:
- For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
OIDC request object. One of C(any), C(none), C(RS256).
use.jwks.url:
description:
- For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
public keys.
jwks.url:
description:
- For OpenID-Connect clients, URL where client keys in JWK are stored.
jwt.credential.certificate:
description:
- For OpenID-Connect clients, client certificate for validating JWT issued by
client and signed by its key, base64-encoded.
extends_documentation_fragment:
- keycloak
author:
- Eike Frost (@eikef)
'''
EXAMPLES = '''
- name: Create or update Keycloak client (minimal example)
local_action:
module: keycloak_client
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
client_id: test
state: present
- name: Delete a Keycloak client
local_action:
module: keycloak_client
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
client_id: test
state: absent
- name: Create or update a Keycloak client (with all the bells and whistles)
local_action:
module: keycloak_client
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
auth_realm: master
auth_username: USERNAME
auth_password: PASSWORD
state: present
realm: master
client_id: test
id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95
name: this_is_a_test
description: Description of this wonderful client
root_url: https://www.example.com/
admin_url: https://www.example.com/admin_url
base_url: basepath
enabled: True
client_authenticator_type: client-secret
secret: REALLYWELLKEPTSECRET
redirect_uris:
- https://www.example.com/*
- http://localhost:8888/
web_origins:
- https://www.example.com/*
not_before: 1507825725
bearer_only: False
consent_required: False
standard_flow_enabled: True
implicit_flow_enabled: False
direct_access_grants_enabled: False
service_accounts_enabled: False
authorization_services_enabled: False
public_client: False
frontchannel_logout: False
protocol: openid-connect
full_scope_allowed: false
node_re_registration_timeout: -1
client_template: test
use_template_config: False
use_template_scope: false
use_template_mappers: no
registered_nodes:
node01.example.com: 1507828202
registration_access_token: eyJWT_TOKEN
surrogate_auth_required: false
default_roles:
- test01
- test02
protocol_mappers:
- config:
access.token.claim: True
claim.name: "family_name"
id.token.claim: True
jsonType.label: String
user.attribute: lastName
userinfo.token.claim: True
consentRequired: True
consentText: "${familyName}"
name: family name
protocol: openid-connect
protocolMapper: oidc-usermodel-property-mapper
- config:
attribute.name: Role
attribute.nameformat: Basic
single: false
consentRequired: false
name: role list
protocol: saml
protocolMapper: saml-role-list-mapper
attributes:
saml.authnstatement: True
saml.client.signature: True
saml.force.post.binding: True
saml.server.signature: True
saml.signature.algorithm: RSA_SHA256
saml.signing.certificate: CERTIFICATEHERE
saml.signing.private.key: PRIVATEKEYHERE
saml_force_name_id_format: False
saml_name_id_format: username
saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#"
user.info.response.signature.alg: RS256
request.object.signature.alg: RS256
use.jwks.url: true
jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
'''
RETURN = '''
msg:
description: Message as to what action was taken
returned: always
type: str
sample: "Client testclient has been updated"
proposed:
description: client representation of proposed changes to client
returned: always
type: dict
sample: {
clientId: "test"
}
existing:
description: client representation of existing client (sample is truncated)
returned: always
type: dict
sample: {
"adminUrl": "http://www.example.com/admin_url",
"attributes": {
"request.object.signature.alg": "RS256",
}
}
end_state:
description: client representation of client after module execution (sample is truncated)
returned: always
type: dict
sample: {
"adminUrl": "http://www.example.com/admin_url",
"attributes": {
"request.object.signature.alg": "RS256",
}
}
'''
from ansible.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
keycloak_argument_spec, get_token, KeycloakError
from ansible.module_utils.basic import AnsibleModule
def sanitize_cr(clientrep):
""" Removes probably sensitive details from a client representation
:param clientrep: the clientrep dict to be sanitized
:return: sanitized clientrep dict
"""
result = clientrep.copy()
if 'secret' in result:
result['secret'] = 'no_log'
if 'attributes' in result:
if 'saml.signing.private.key' in result['attributes']:
result['attributes']['saml.signing.private.key'] = 'no_log'
return result
def main():
"""
Module execution
:return:
"""
argument_spec = keycloak_argument_spec()
protmapper_spec = dict(
consentRequired=dict(type='bool'),
consentText=dict(type='str'),
id=dict(type='str'),
name=dict(type='str'),
protocol=dict(type='str', choices=['openid-connect', 'saml']),
protocolMapper=dict(type='str'),
config=dict(type='dict'),
)
meta_args = dict(
state=dict(default='present', choices=['present', 'absent']),
realm=dict(type='str', default='master'),
id=dict(type='str'),
client_id=dict(type='str', aliases=['clientId']),
name=dict(type='str'),
description=dict(type='str'),
root_url=dict(type='str', aliases=['rootUrl']),
admin_url=dict(type='str', aliases=['adminUrl']),
base_url=dict(type='str', aliases=['baseUrl']),
surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']),
enabled=dict(type='bool'),
client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
secret=dict(type='str', no_log=True),
registration_access_token=dict(type='str', aliases=['registrationAccessToken']),
default_roles=dict(type='list', aliases=['defaultRoles']),
redirect_uris=dict(type='list', aliases=['redirectUris']),
web_origins=dict(type='list', aliases=['webOrigins']),
not_before=dict(type='int', aliases=['notBefore']),
bearer_only=dict(type='bool', aliases=['bearerOnly']),
consent_required=dict(type='bool', aliases=['consentRequired']),
standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']),
implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']),
direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']),
service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']),
authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
public_client=dict(type='bool', aliases=['publicClient']),
frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
protocol=dict(type='str', choices=['openid-connect', 'saml']),
attributes=dict(type='dict'),
full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
registered_nodes=dict(type='dict', aliases=['registeredNodes']),
client_template=dict(type='str', aliases=['clientTemplate']),
use_template_config=dict(type='bool', aliases=['useTemplateConfig']),
use_template_scope=dict(type='bool', aliases=['useTemplateScope']),
use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']),
protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
authorization_settings=dict(type='dict', aliases=['authorizationSettings']),
)
argument_spec.update(meta_args)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['client_id', 'id']]))
result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
# Obtain access token, initialize API
try:
connection_header = get_token(
base_url=module.params.get('auth_keycloak_url'),
validate_certs=module.params.get('validate_certs'),
auth_realm=module.params.get('auth_realm'),
client_id=module.params.get('auth_client_id'),
auth_username=module.params.get('auth_username'),
auth_password=module.params.get('auth_password'),
client_secret=module.params.get('auth_client_secret'),
)
except KeycloakError as e:
module.fail_json(msg=str(e))
kc = KeycloakAPI(module, connection_header)
realm = module.params.get('realm')
cid = module.params.get('id')
state = module.params.get('state')
# convert module parameters to client representation parameters (if they belong in there)
client_params = [x for x in module.params
if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
module.params.get(x) is not None]
keycloak_argument_spec().keys()
# See whether the client already exists in Keycloak
if cid is None:
before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm)
if before_client is not None:
cid = before_client['id']
else:
before_client = kc.get_client_by_id(cid, realm=realm)
if before_client is None:
before_client = dict()
# Build a proposed changeset from parameters given to this module
changeset = dict()
for client_param in client_params:
new_param_value = module.params.get(client_param)
# some lists in the Keycloak API are sorted, some are not.
if isinstance(new_param_value, list):
if client_param in ['attributes']:
try:
new_param_value = sorted(new_param_value)
except TypeError:
pass
# Unfortunately, the ansible argument spec checker introduces variables with null values when
# they are not specified
if client_param == 'protocol_mappers':
new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
changeset[camel(client_param)] = new_param_value
# Whether creating or updating a client, take the before-state and merge the changeset into it
updated_client = before_client.copy()
updated_client.update(changeset)
result['proposed'] = sanitize_cr(changeset)
result['existing'] = sanitize_cr(before_client)
# If the client does not exist yet, before_client is still empty
if before_client == dict():
if state == 'absent':
# do nothing and exit
if module._diff:
result['diff'] = dict(before='', after='')
result['msg'] = 'Client does not exist, doing nothing.'
module.exit_json(**result)
# create new client
result['changed'] = True
if 'clientId' not in updated_client:
module.fail_json(msg='client_id needs to be specified when creating a new client')
if module._diff:
result['diff'] = dict(before='', after=sanitize_cr(updated_client))
if module.check_mode:
module.exit_json(**result)
kc.create_client(updated_client, realm=realm)
after_client = kc.get_client_by_clientid(updated_client['clientId'], realm=realm)
result['end_state'] = sanitize_cr(after_client)
result['msg'] = 'Client %s has been created.' % updated_client['clientId']
module.exit_json(**result)
else:
if state == 'present':
# update existing client
result['changed'] = True
if module.check_mode:
# We can only compare the current client with the proposed updates we have
if module._diff:
result['diff'] = dict(before=sanitize_cr(before_client),
after=sanitize_cr(updated_client))
result['changed'] = (before_client != updated_client)
module.exit_json(**result)
kc.update_client(cid, updated_client, realm=realm)
after_client = kc.get_client_by_id(cid, realm=realm)
if before_client == after_client:
result['changed'] = False
if module._diff:
result['diff'] = dict(before=sanitize_cr(before_client),
after=sanitize_cr(after_client))
result['end_state'] = sanitize_cr(after_client)
result['msg'] = 'Client %s has been updated.' % updated_client['clientId']
module.exit_json(**result)
else:
# Delete existing client
result['changed'] = True
if module._diff:
result['diff']['before'] = sanitize_cr(before_client)
result['diff']['after'] = ''
if module.check_mode:
module.exit_json(**result)
kc.delete_client(cid, realm=realm)
result['proposed'] = dict()
result['end_state'] = dict()
result['msg'] = 'Client %s has been deleted.' % before_client['clientId']
module.exit_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
resmo/ansible
|
lib/ansible/modules/identity/keycloak/keycloak_client.py
|
Python
|
gpl-3.0
| 33,175 | 0.003044 |
# maximum number of letters(ignoring spaces and duplicates) if tie choose alphabetical order.
# import sys
# text = "".join(sys.stdin.readlines())
# name_list = text.split("\n")
inputList = ["kylan charles", "raymond strickland", "julissa shepard", "andrea meza", "destiny alvarado"]
inputList2 = ["maria garcia", "smith hernandez", "hernandez smith", "mary martinez", "james johnson"]
inputList3 = ["Sheldon Cooper", "Howord Wolowitz", "Amy Farrah Fowler", "Leonard Hofstadter", "Bernadette R"]
name_store = {}
for name in inputList3:
name_store[name] = len(set(name.lower().replace(" ", ""))) # Remove spaces using replace and remove duplicates using set
res = []
maxLen = -float("inf")
for name in name_store.keys():
if name_store.get(name) > maxLen:
res.clear()
res.append(name)
maxLen = name_store.get(name)
elif name_store.get(name) == maxLen:
res.append(name)
res.sort()
print(res[0])
|
saisankargochhayat/algo_quest
|
Company-Based/SAP/social_sabatical_name.py
|
Python
|
apache-2.0
| 938 | 0.007463 |
import os
from spinspy import local_data
def isdim(dim):
if os.path.isfile('{0:s}{1:s}grid'.format(local_data.path,dim)):
return True
else:
return False
|
bastorer/SPINSpy
|
spinspy/isdim.py
|
Python
|
mit
| 178 | 0.011236 |
from django.db.models import Q
from links.models import Post
from comments.models import ThreadedComment as comments
from django.utils import timezone
from datetime import datetime, timedelta
from django.contrib import messages
KARMA_LOW = 100
KARMA_MEDIUM = 1000
KARMA_HIGH = 5000
INTERVAL_LOW = 3600
INTERVAL_MEDIUM = 360
INTERVAL_HIGH = 36
COMMENT_PER_INTERVAL = 20
COMMENT_MAX = 80
def allowed_to_comment(user):
karma = user.userprofile.karma
now = timezone.now()
time_threshold = now - timedelta(seconds=3600)
comments_number = comments.objects.filter(Q(user=user) and Q(submit_date__gt=time_threshold)).count()
if karma < KARMA_HIGH:
if comments_number > COMMENT_PER_INTERVAL:
return False
else:
return True
else:
if comments_number > COMMENT_MAX:
return False
else:
return True
def allowed_to_post(request, user):
karma = user.userprofile.karma
print karma
now = timezone.now()
try:
posted = Post.objects.filter(post__submitter__exact=user).latest('submit_date')
diff = now - posted.submit_date
diff = diff.seconds
except:
diff = INTERVAL_LOW + 1
print diff
if karma < KARMA_LOW:
result = diff > INTERVAL_LOW
if not result:
messages.success(request, 'Please try in an hour!')
return result
elif karma > KARMA_LOW and karma < KARMA_HIGH:
result = diff > INTERVAL_MEDIUM
if not result:
messages.success(request, 'Please try in ten minutes!')
return result
else:
result = diff > INTERVAL_HIGH
if not result:
messages.warning(request, 'Please try in 30 sec')
return result
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
|
sheshkovsky/jaryan
|
links/utils.py
|
Python
|
apache-2.0
| 1,792 | 0.029576 |
from decimal import Decimal
from customers.models import Customer
from django.contrib import auth
from products.models import *
from django_quickblocks.models import *
import re
from smartmin.views import *
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from orders.models import Order
from locales.models import Country, Currency
from locales.widgets import CoordinatesPickerField
from customers.models import Location
from transactions.models import Credit, Debit
from django.db.models import Sum
# check if the user is trusted
def is_trusted(request):
return request.session.get('trusted', False)
# gate keeper to verify for visitors with/out secret pass
def has_secret_pass(request):
SECRET_PASS = 'iamhungry'
return request.GET and request.GET['password'] == SECRET_PASS
class LoginForm (forms.Form):
phone_number = forms.CharField()
def clean_phone_number(self):
phone_number = self.cleaned_data['phone_number']
phone_number = re.sub("[^0-9]", "", phone_number)
if len(phone_number) != 10:
raise forms.ValidationError("Please enter a phone number with 10 digits, e.g. 0788 55 55 55")
return phone_number
def home(request):
country = Country.objects.get(country_code='RW')
request.session['currency'] = country.currency
# populate favorite stores, for now will be loading all in rwanda
favorite_stores = []
for store in Store.objects.filter(country=country):
favorite_stores.append(store)
context = dict(product_list=Product.objects.filter(is_active=True), country=country, favorite_stores=favorite_stores, currency=country.currency)
if has_secret_pass(request) or is_trusted(request):
request.session['trusted'] = True
return render_to_response('public/home.html', context, context_instance=RequestContext(request))
else:
return render_to_response('public/home_login.html', context, context_instance=RequestContext(request))
def cart(request):
order = Order.from_request(request)
country = Country.objects.get(country_code='RW')
if request.method == 'POST':
if 'add_product' in request.REQUEST:
product = Product.objects.get(id=request.REQUEST['add_product'], is_active=True)
order.add_single(product)
if set(('update', 'checkout', 'shop')).intersection(set(request.REQUEST.keys())):
for item in order.items.all():
if 'remove_%d' % item.id in request.REQUEST:
order.items.filter(pk=item.id).update(is_active=False)
for addon in item.product.addons.all():
exists_in_order = item.addons.filter(addon=addon)
form_name = 'addon_%d_%d' % (item.id, addon.id)
exists_in_form = form_name in request.REQUEST
if exists_in_order and not exists_in_form:
exists_in_order.update(is_active=False)
elif exists_in_form and not exists_in_order:
item.add_on(addon)
if 'checkout' in request.REQUEST:
return HttpResponseRedirect(reverse('public_checkout'))
elif 'shop' in request.REQUEST:
return HttpResponseRedirect("%s?password=iamhungry" % reverse('public_home'))
context = dict(cart=True, order=order, country=country, currency=country.currency)
return render_to_response('public/cart.html', context, context_instance=RequestContext(request))
def checkout(request):
order = Order.from_request(request)
country = Country.objects.get(country_code='RW')
initial_data = dict()
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
phone_number = form.cleaned_data['phone_number']
phone_number = country.derive_international_number(phone_number)
customer = Customer.get_or_create_customer(phone_number)
customer.send_password()
request.session['customer'] = customer
return HttpResponseRedirect(reverse('public_login'))
else:
form=LoginForm(initial_data)
context = dict(order=order, country=country, currency=country.currency, form=form)
return render_to_response('public/checkout.html', context, context_instance=RequestContext(request))
class PasswordForm(forms.Form):
password = forms.CharField()
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordForm, self).__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data['password']
password = re.sub("[^0-9]", "", password)
if not self.user.check_password(password):
raise forms.ValidationError("Sorry, that password doesn't match, try again.")
return password
class CustomerForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
def login(request):
user = request.session['customer']
if request.method == 'POST':
password_form = PasswordForm(request.POST, user=user)
customer_form = CustomerForm(request.POST)
if password_form.is_valid() and customer_form.is_valid():
customer = request.session['customer']
customer.first_name = customer_form.cleaned_data['first_name']
customer.last_name = customer_form.cleaned_data['last_name']
customer.email = customer_form.cleaned_data['email']
customer.save()
user = auth.authenticate(username=customer.username, password=password_form.cleaned_data['password'])
auth.login(request, user)
order = Order.from_request(request)
order.user = user
order.save()
if 'location' in request.session:
location = request.session['location']
return HttpResponseRedirect("%s?lat=%s&lng=%s" % (reverse('public.location_create'), location.lat, location.lng))
else:
return HttpResponseRedirect(reverse('public.location_create'))
else:
password_form = PasswordForm(user=user)
customer_form = CustomerForm(initial={'first_name':user.first_name, 'last_name':user.last_name, 'email': user.email})
context = dict(password_form=password_form, customer_form=customer_form, user=user)
return render_to_response('public/login.html', context, context_instance=RequestContext(request))
class LocationForm(forms.ModelForm):
coordinates = CoordinatesPickerField(required=True)
def clean(self):
clean = self.cleaned_data;
if 'coordinates' in clean and self.instance:
self.instance.lat = clean['coordinates']['lat']
self.instance.lng = clean['coordinates']['lng']
return clean
class Meta:
model = Location
fields = ('building', 'business', 'hints', 'coordinates')
class LocationCRUDL(SmartCRUDL):
model = Location
actions = ('create',)
permissions = False
class Create(SmartCreateView):
form_class = LocationForm
fields = ('building', 'business', 'hints', 'coordinates')
def derive_initial(self):
if self.object and self.object.lat and self.object.lng:
return dict(coordinates=(dict(lat=self.object.lat, lng=self.object.lng))) #pragma: no cover
else:
country = Country.objects.get(country_code='RW')
return dict(coordinates=(dict(lat=country.bounds_lat, lng=country.bounds_lng)))
def get_context_data(self, **kwargs):
context = super(LocationCRUDL.Create, self).get_context_data(**kwargs)
context['display_fields'] = ['hints', 'nickname']
context['order'] = Order.from_request(self.request)
# add our country and it's root locations
context['country'] = Country.objects.get(country_code='RW')
# set the country on our form's location picker
self.form.fields['coordinates'].set_country(context['country'])
return context
def pre_save(self, obj):
obj = super(LocationCRUDL.Create, self).pre_save(obj)
obj.customer = self.request.user
return obj
def post_save(self, obj):
obj = super(LocationCRUDL.Create, self).post_save(obj)
self.order = Order.from_request(self.request)
self.order.location = obj
self.order.stage = 'L'
self.order.save()
self.request.session['location'] = obj
return obj
def get_success_url(self):
return reverse('public_pay', args=[self.order.id])
def pay(request, id):
order = Order.objects.get(id=id)
country = Country.objects.get(country_code='RW')
if not Credit.objects.filter(creditor=order.user):
Credit.objects.create(phone=order.user.username, creditor=order.user, amount=Decimal('0'), created_by=order.user, modified_by=order.user)
# get the sum of all credit associated with this user
credit_amount = Credit.objects.filter(creditor=order.user).aggregate(Sum('amount'))
# calculate balance
balance = credit_amount['amount__sum']
debit_amount = Debit.objects.filter(customer=order.user).aggregate(Sum('amount'))
if debit_amount['amount__sum']:
balance = credit_amount['amount__sum'] - debit_amount['amount__sum'] #pragma: no cover
context = dict(order=order, user=order.user, country=country, currency=country.currency)
# check if the sum of amount is equal or more that the total value on this order
if balance - order.total_with_delivery >= 0:
# create debit of order total value hence the balance will change on refresh
debit = Debit.objects.create(amount=order.total_with_delivery, order=order, customer=order.user, created_by=order.user, modified_by=order.user)
debit_amount = Debit.objects.filter(customer=order.user).aggregate(Sum('amount'))
# mark the order to be payed
order.stage = 'P'
# clear all but trusted and currency
for sesskey in request.session.keys():
if sesskey != 'trusted' or sesskey != 'currency':
del request.session[sesskey]
# send message to "code@nyaruka.com"
from django.core.mail import EmailMessage
from django.core.mail import send_mail
from django.conf import settings
mail_subject = "Motome Order on the line. Right now!"
mail_message = "Hello, Motome team %s just send an order job, please make sure you follow up until he/she receive all ordered items" % order.user.username
send_mail(mail_subject, mail_message, settings.DEFAULT_FROM_EMAIL, ['code@nyaruka.com'])
# add balance to context
balance = balance - order.total_with_delivery
return HttpResponseRedirect(reverse('public_success'))
context['balance'] = balance
return render_to_response('public/pay.html', context, context_instance=RequestContext(request))
def success(request):
context = dict()
return render_to_response('public/success.html', context, context_instance=RequestContext(request))
def confirm(request, id):
order = Order.objects.get(id=id)
country = Country.objects.get(country_code='RW')
context = dict(order=order, user=order.user, country=country, currency=country.currency)
return render_to_response('public/confirm.html', context, context_instance=RequestContext(request))
|
nyaruka/motome
|
motome/public/views.py
|
Python
|
bsd-3-clause
| 11,780 | 0.005772 |
import os
import subprocess
import yaml
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django.core.urlresolvers import reverse
from django.core import serializers
from django.conf import settings
from django.template.loader import render_to_string
from braces.views import JSONResponseMixin, AjaxResponseMixin
from config.forms import ClusterSettingsForm, UCSMSettingsForm, OSSettingsForm, HostSettingsForm, NodeSettingsForm, NetworkSettingsForm, OpenstackSettingsForm
from config.models import OpenstackSettings, NodeSettings
from config.helpers import construct_conf_file
def traverse_tree(dictionary):
try:
for key, value in dictionary.items():
if value == None:
dictionary[key] = u""
traverse_tree(value)
except Exception, e:
pass
return
class HomePageView(TemplateView):
template_name = "home.html"
class SettingsTextView(TemplateView):
template_name = "config.template"
def get_context_data(self, **kwargs):
context = super(SettingsTextView, self).get_context_data(**kwargs)
try:
context['nodes'] = serializers.serialize('python', NodeSettings.objects.all())
context['settings'] = serializers.serialize('python', OpenstackSettings.objects.all())
except IndexError:
pass
return context
class SettingsView(TemplateView):
template_name = "os_template.html"
def get_context_data(self, **kwargs):
context = super(SettingsView, self).get_context_data(**kwargs)
context['cluster_form'] = ClusterSettingsForm()
context['ucsm_form'] = UCSMSettingsForm()
context['os_form'] = OSSettingsForm()
context['network_form'] = NetworkSettingsForm
context['host_form'] = HostSettingsForm()
context['node_form'] = NodeSettingsForm()
context['settings_form'] = OpenstackSettingsForm()
context['nodes'] = NodeSettings.objects.all()
context['settings'] = {}
try:
context['settings'] = OpenstackSettings.objects.all()[0]
context['settings_form'] = OpenstackSettingsForm(instance=context['settings'])
except IndexError:
pass
scenario_list = []
print settings.PROJECT_PATH
for filename in os.listdir(os.path.join(settings.PROJECT_PATH, 'static-raw', 'scenarios')):
if filename.endswith(".yaml"):
scenario_list.append(filename.split('.')[0])
context['scenario_list'] = scenario_list
return context
class SubmitSettingsView(FormView):
template_name = "os_template.html"
form_class = OpenstackSettingsForm
# # add the request to the kwargs
# def get_form_kwargs(self):
# kwargs = super(RegisterView, self).get_form_kwargs()
# kwargs['request'] = self.request
# return kwargs
def form_invalid(self, form):
return super(SubmitSettingsView, self).form_valid(form)
def form_valid(self, form):
OpenstackSettings.objects.all().delete()
config = form.save()
if self.request.POST.get('summary-table-settings', 0) == 'scenario':
try:
iplist_file_path = os.path.join(settings.IPLIST_DESTINATION, 'iplist.yaml')
iplist_content = ""
processed_iplist_content = {}
if os.path.isfile(iplist_file_path):
with open(iplist_file_path, 'r') as content_file:
iplist_content = content_file.read()
processed_iplist_content = yaml.load(iplist_content)
nodes = int(self.request.POST.get('scenario_node_number', 0))
iplist = {}
for x in range(nodes):
hostname = self.request.POST.get('scenario_hostname__'+str(x), "")
ip = self.request.POST.get('scenario_ip__'+str(x), "")
role = self.request.POST.get('role-'+str(x), "")
pndn = 'sys/chassis-'+self.request.POST.get('chassis_number__'+str(x), 0)+'/blade-'+self.request.POST.get('blade_number__'+str(x), 0)
if hostname and ip and role:
iplist[pndn] = {'name': hostname, 'ip':ip, 'role':role, 'type':role}
processed_iplist_content['iplist'] = iplist
traverse_tree(processed_iplist_content)
with open(iplist_file_path, 'w') as content_file:
content_file.write( yaml.safe_dump(processed_iplist_content, default_flow_style=False))
cobbler_file_path = os.path.join(settings.COBBLER_DESTINATION, 'cobbler.yaml')
cobbler_content = ""
processed_cobbler_content = {}
if os.path.isfile(cobbler_file_path):
with open(cobbler_file_path, 'r') as content_file:
cobbler_content = content_file.read()
processed_cobbler_content = yaml.load(cobbler_content)
for x in range(nodes):
hostname = self.request.POST.get('scenario_hostname__'+str(x), "")
ip = self.request.POST.get('scenario_ip__'+str(x), "")
role = self.request.POST.get('role-'+str(x), "")
if hostname and ip and role:
if hostname in processed_cobbler_content:
processed_cobbler_content[hostname]['hostname'] = hostname
processed_cobbler_content[hostname]['power_address'] =ip
else:
processed_cobbler_content[hostname] = {'hostname': hostname, 'power_address':ip}
traverse_tree(processed_cobbler_content)
# with open(cobbler_file_path, 'w') as content_file:
# content_file.write( yaml.safe_dump(processed_cobbler_content, default_flow_style=False))
except Exception, e:
pass
else:
NodeSettings.objects.all().delete()
nodes = int(self.request.POST.get('node_number', 0))
for x in range(nodes):
node_name = self.request.POST.get('node_name__'+str(x), "")
node_number = x
chassis_number = int(self.request.POST.get('chassis_number__'+str(x), 0))
blade_number = int(self.request.POST.get('blade_number__'+str(x), 0))
aio = (x == int(self.request.POST.get('aio', 0)))
compute = ('compute__' + str(x) ) in self.request.POST
network = (x == int(self.request.POST.get('network', 0)))
swift = ('swift__' + str(x) ) in self.request.POST
cinder = ('cinder__' + str(x) ) in self.request.POST
NodeSettings(node_name=node_name, node_number=node_number, aio=aio, compute=compute, network=network,
swift=swift, cinder=cinder, chassis_number=chassis_number, blade_number=blade_number).save()
config_nodes = serializers.serialize('python', NodeSettings.objects.all())
config_settings = serializers.serialize('python', OpenstackSettings.objects.all())
config_text = render_to_string('config.template', {'nodes': config_nodes, 'settings':config_settings})
config_file_path = os.path.join(settings.PROJECT_PATH, 'openstack_settings.txt')
config_file = open(config_file_path, 'w')
config_file.write(config_text)
config_file.close()
construct_conf_file(config=config, query_str_dict = self.request.POST)
return super(SubmitSettingsView, self).form_valid(form)
def get_success_url(self):
return reverse('settings')
class NodeDiscoveryView(JSONResponseMixin, AjaxResponseMixin, View):
def post_ajax(self, request, *args, **kwargs):
hostname = request.POST.get('hostname', '')
username = request.POST.get('username', '')
password = request.POST.get('password', '')
script_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scripts', 'NodeInventory.py')
try:
subprocess.call(['python', script_path , '-i' , hostname , '-u' , username , '-p', password,])
except Exception, e:
pass
#file_path = os.path.join('.', hostname+'_inventory.yaml')
#file_path = os.path.join(settings.PROJECT_PATH, '..', hostname+'_invenotry.yaml')
#file_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scripts', '10.1.1.130_invenotry.yaml') #debug file
file_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scripts', hostname+'_inventory.yaml')
content = ""
with open(file_path, 'r') as content_file:
content = content_file.read()
#print content
processed_content = yaml.load(content)
#print processed_content
json_list = []
for chassis, chassis_dict in processed_content.iteritems():
for node, node_dict in chassis_dict.iteritems():
html_result = ''
text_result = ''
cpu_type = ''
adaptor_type = ''
if len(node_dict['ProcessorUnits']) >0:
for cpu, cpu_dict in node_dict['ProcessorUnits'].iteritems():
cpu_type = cpu_dict['model']
if len(node_dict['AdaptorUnits']) >0:
for adaptor, adaptor_dict in node_dict['AdaptorUnits'].iteritems():
adaptor_type = adaptor_dict['model']
html_result += chassis + '/' + node + '<br>'
if len(node_dict['assignedToDn']) >0:
html_result += 'Service Profile Associated: ' + node_dict['assignedToDn'] + '<br>'
html_result += 'CPU: ' + node_dict['numOfCpus'] + '<br>'
html_result += 'CPU Type: ' + cpu_type + '<br>'
html_result += 'Mem: ' + node_dict['availableMemory'] + '<br>'
html_result += 'Disks: ' + str(len(node_dict['StorageUnits'])) + '<br>'
html_result += 'Adaptors: ' + node_dict['numOfAdaptors'] + '<br>'
html_result += 'Adaptor Type: ' + adaptor_type + '<br>'
text_result += chassis + '/' + node + '\n'
if len(node_dict['assignedToDn']) >0:
text_result += 'Service Profile: ' + node_dict['assignedToDn'] + '\n'
text_result += 'CPU: ' + node_dict['numOfCpus'] + '\n'
text_result += 'CPU Type: ' + cpu_type + '\n'
text_result += 'Mem: ' + node_dict['availableMemory'] + '\n'
text_result += 'Disks: ' + str(len(node_dict['StorageUnits'])) + '\n'
text_result += 'Adaptors: ' + node_dict['numOfAdaptors'] + '\n'
text_result += 'Adaptor Type: ' + adaptor_type + '\n'
json_list.append([html_result, text_result, chassis.split('-')[1], node.split('-')[1], ], )
return self.render_json_response(json_list)
class ScenarioDiscoveryView(JSONResponseMixin, AjaxResponseMixin, View):
def post_ajax(self, request, *args, **kwargs):
hostname = request.POST.get('hostname', '')
username = request.POST.get('username', '')
password = request.POST.get('password', '')
scenario_name = request.POST.get('scenario_name', '')
script_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scripts', 'NodeInventory.py')
# try:
# subprocess.call(['python', script_path , '-i' , hostname , '-u' , username , '-p', password,])
# except Exception, e:
# print e
# print script_path
# pass
#file_path = os.path.join('.', hostname+'_inventory.yaml')
#file_path = os.path.join(settings.PROJECT_PATH, '..', hostname+'_invenotry.yaml')
#file_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scripts', '10.1.1.130_invenotry.yaml') #debug file
file_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scripts', hostname+'_inventory.yaml')
content = ""
with open(file_path, 'r') as content_file:
content = content_file.read()
#print content
processed_content = yaml.load(content)
#print processed_content
scenario_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scenarios', scenario_name+'.yaml')
scenario_content = ""
with open(scenario_path, 'r') as content_file:
scenario_content = content_file.read()
processed_scenario_content = yaml.load(scenario_content)
json_list = []
for chassis, chassis_dict in processed_content.iteritems():
for node, node_dict in chassis_dict.iteritems():
html_result = ''
text_result = ''
cpu_type = ''
adaptor_type = ''
if len(node_dict['ProcessorUnits']) >0:
for cpu, cpu_dict in node_dict['ProcessorUnits'].iteritems():
cpu_type = cpu_dict['model']
if len(node_dict['AdaptorUnits']) >0:
for adaptor, adaptor_dict in node_dict['AdaptorUnits'].iteritems():
adaptor_type = adaptor_dict['model']
html_result += chassis + '/' + node + '<br>'
if len(node_dict['assignedToDn']) >0:
html_result += 'Service Profile Associated: ' + node_dict['assignedToDn'] + '<br>'
html_result += 'CPU: ' + node_dict['numOfCpus'] + '<br>'
html_result += 'CPU Type: ' + cpu_type + '<br>'
html_result += 'Mem: ' + node_dict['availableMemory'] + '<br>'
html_result += 'Disks: ' + str(len(node_dict['StorageUnits'])) + '<br>'
html_result += 'Adaptors: ' + node_dict['numOfAdaptors'] + '<br>'
html_result += 'Adaptor Type: ' + adaptor_type + '<br>'
text_result += chassis + '/' + node + '\n'
if len(node_dict['assignedToDn']) >0:
text_result += 'Service Profile: ' + node_dict['assignedToDn'] + '\n'
text_result += 'CPU: ' + node_dict['numOfCpus'] + '\n'
text_result += 'CPU Type: ' + cpu_type + '\n'
text_result += 'Mem: ' + node_dict['availableMemory'] + '\n'
text_result += 'Disks: ' + str(len(node_dict['StorageUnits'])) + '\n'
text_result += 'Adaptors: ' + node_dict['numOfAdaptors'] + '\n'
text_result += 'Adaptor Type: ' + adaptor_type + '\n'
json_list.append([html_result, text_result, chassis.split('-')[1], node.split('-')[1], ], )
role_list = []
for role, role_dict in processed_scenario_content['roles'].iteritems():
role_list.append(role)
return self.render_json_response({'nodes':json_list, 'roles': role_list})
|
Havate/havate-openstack
|
proto-build/gui/horizon/openstack-dashboard/config/views.py
|
Python
|
apache-2.0
| 15,052 | 0.007109 |
"""Suite CodeWarrior suite: Terms for scripting the CodeWarrior IDE
Level 0, version 0
Generated from /Volumes/Sap/Applications (Mac OS 9)/Metrowerks CodeWarrior 7.0/Metrowerks CodeWarrior/CodeWarrior IDE 4.2.5
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'CWIE'
class CodeWarrior_suite_Events:
_argmap_add = {
'new' : 'kocl',
'with_data' : 'data',
'to_targets' : 'TTGT',
'to_group' : 'TGRP',
}
def add(self, _object, _attributes={}, **_arguments):
"""add: add elements to a project or target
Required argument: an AE object reference
Keyword argument new: the class of the new element or elements to add
Keyword argument with_data: the initial data for the element or elements
Keyword argument to_targets: the targets to which the new element or elements will be added
Keyword argument to_group: the group to which the new element or elements will be added
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'ADDF'
aetools.keysubst(_arguments, self._argmap_add)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def build(self, _no_object=None, _attributes={}, **_arguments):
"""build: build a project or target (equivalent of the Make menu command)
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'MAKE'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def check(self, _object=None, _attributes={}, **_arguments):
"""check: check the syntax of a file in a project or target
Required argument: the file or files to be checked
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'CHEK'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def compile_file(self, _object=None, _attributes={}, **_arguments):
"""compile file: compile a file in a project or target
Required argument: the file or files to be compiled
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'COMP'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def disassemble_file(self, _object=None, _attributes={}, **_arguments):
"""disassemble file: disassemble a file in a project or target
Required argument: the file or files to be disassembled
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'DASM'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_export = {
'in_' : 'kfil',
}
def export(self, _no_object=None, _attributes={}, **_arguments):
"""export: Export the project file as an XML file
Keyword argument in_: the XML file in which to export the project
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'EXPT'
aetools.keysubst(_arguments, self._argmap_export)
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def remove_object_code(self, _no_object=None, _attributes={}, **_arguments):
"""remove object code: remove object code from a project or target
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RMOB'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def remove_target_files(self, _object, _attributes={}, **_arguments):
"""remove target files: remove files from a target
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RMFL'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def run_target(self, _no_object=None, _attributes={}, **_arguments):
"""run target: run a project or target
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RUN '
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def touch_file(self, _object=None, _attributes={}, **_arguments):
"""touch file: touch a file in a project or target for compilation
Required argument: the file or files to be touched
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'TOCH'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def update(self, _no_object=None, _attributes={}, **_arguments):
"""update: bring a project or target up to date
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'UP2D'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class single_class_browser(aetools.ComponentItem):
"""single class browser - a single class browser """
want = '1BRW'
class _Prop_inherits(aetools.NProperty):
"""inherits - all properties and elements of the given class are inherited by this class. """
which = 'c@#^'
want = 'TXTD'
single_class_browsers = single_class_browser
class single_class_hierarchy(aetools.ComponentItem):
"""single class hierarchy - a single class hierarchy document """
want = '1HIR'
single_class_hierarchies = single_class_hierarchy
class class_browser(aetools.ComponentItem):
"""class browser - a class browser """
want = 'BROW'
class_browsers = class_browser
class file_compare_document(aetools.ComponentItem):
"""file compare document - a file compare document """
want = 'COMP'
file_compare_documents = file_compare_document
class catalog_document(aetools.ComponentItem):
"""catalog document - a browser catalog document """
want = 'CTLG'
catalog_documents = catalog_document
class editor_document(aetools.ComponentItem):
"""editor document - an editor document """
want = 'EDIT'
editor_documents = editor_document
class class_hierarchy(aetools.ComponentItem):
"""class hierarchy - a class hierarchy document """
want = 'HIER'
class_hierarchies = class_hierarchy
class project_inspector(aetools.ComponentItem):
"""project inspector - the project inspector """
want = 'INSP'
project_inspectors = project_inspector
class message_document(aetools.ComponentItem):
"""message document - a message document """
want = 'MSSG'
message_documents = message_document
class build_progress_document(aetools.ComponentItem):
"""build progress document - a build progress document """
want = 'PRGS'
build_progress_documents = build_progress_document
class project_document(aetools.ComponentItem):
"""project document - a project document """
want = 'PRJD'
class _Prop_current_target(aetools.NProperty):
"""current target - the current target """
which = 'CURT'
want = 'TRGT'
# element 'TRGT' as ['indx', 'name', 'test', 'rang']
project_documents = project_document
class subtarget(aetools.ComponentItem):
"""subtarget - a target that is prerequisite for another target """
want = 'SBTG'
class _Prop_link_against_output(aetools.NProperty):
"""link against output - is the output of this subtarget linked into its dependent target? """
which = 'LNKO'
want = 'bool'
class _Prop_target(aetools.NProperty):
"""target - the target that is dependent on this subtarget """
which = 'TrgT'
want = 'TRGT'
subtargets = subtarget
class target_file(aetools.ComponentItem):
"""target file - a source or header file in a target """
want = 'SRCF'
class _Prop_code_size(aetools.NProperty):
"""code size - the size of the code (in bytes) produced by compiling this source file """
which = 'CSZE'
want = 'long'
class _Prop_compiled_date(aetools.NProperty):
"""compiled date - the date and this source file was last compiled """
which = 'CMPD'
want = 'ldt '
class _Prop_data_size(aetools.NProperty):
"""data size - the size of the date (in bytes) produced by compiling this source file """
which = 'DSZE'
want = 'long'
class _Prop_debug(aetools.NProperty):
"""debug - is debugging information generated for this source file? """
which = 'DBUG'
want = 'bool'
class _Prop_dependents(aetools.NProperty):
"""dependents - the source files that need this source file in order to build """
which = 'DPND'
want = 'list'
class _Prop_id(aetools.NProperty):
"""id - the unique ID number of the target file """
which = 'ID '
want = 'long'
class _Prop_init_before(aetools.NProperty):
"""init before - is the \xd4initialize before\xd5 flag set for this shared library? """
which = 'INIT'
want = 'bool'
class _Prop_link_index(aetools.NProperty):
"""link index - the index of the source file in its target\xd5s link order (-1 if source file is not in link order) """
which = 'LIDX'
want = 'long'
class _Prop_linked(aetools.NProperty):
"""linked - is the source file in the link order of its target? """
which = 'LINK'
want = 'bool'
class _Prop_location(aetools.NProperty):
"""location - the location of the target file on disk """
which = 'FILE'
want = 'fss '
class _Prop_merge_output(aetools.NProperty):
"""merge output - is this shared library merged into another code fragment? """
which = 'MRGE'
want = 'bool'
class _Prop_modified_date(aetools.NProperty):
"""modified date - the date and time this source file was last modified """
which = 'MODD'
want = 'ldt '
class _Prop_path(aetools.NProperty):
"""path - the path of the source file on disk """
which = 'Path'
want = 'itxt'
class _Prop_prerequisites(aetools.NProperty):
"""prerequisites - the source files needed to build this source file """
which = 'PRER'
want = 'list'
class _Prop_type(aetools.NProperty):
"""type - the type of source file """
which = 'FTYP'
want = 'FTYP'
class _Prop_weak_link(aetools.NProperty):
"""weak link - is this shared library linked weakly? """
which = 'WEAK'
want = 'bool'
target_files = target_file
class symbol_browser(aetools.ComponentItem):
"""symbol browser - a symbol browser """
want = 'SYMB'
symbol_browsers = symbol_browser
class ToolServer_worksheet(aetools.ComponentItem):
"""ToolServer worksheet - a ToolServer worksheet """
want = 'TOOL'
ToolServer_worksheets = ToolServer_worksheet
class target(aetools.ComponentItem):
"""target - a target in a project """
want = 'TRGT'
class _Prop_name(aetools.NProperty):
"""name - """
which = 'pnam'
want = 'itxt'
class _Prop_project_document(aetools.NProperty):
"""project document - the project document that contains this target """
which = 'PrjD'
want = 'PRJD'
# element 'SBTG' as ['indx', 'test', 'rang']
# element 'SRCF' as ['indx', 'test', 'rang']
targets = target
class text_document(aetools.ComponentItem):
"""text document - a document that contains text """
want = 'TXTD'
class _Prop_modified(aetools.NProperty):
"""modified - Has the document been modified since the last save? """
which = 'imod'
want = 'bool'
class _Prop_selection(aetools.NProperty):
"""selection - the selection visible to the user """
which = 'sele'
want = 'csel'
# element 'cha ' as ['indx', 'rele', 'rang', 'test']
# element 'cins' as ['rele']
# element 'clin' as ['indx', 'rang', 'rele']
# element 'ctxt' as ['rang']
text_documents = text_document
single_class_browser._superclassnames = ['text_document']
single_class_browser._privpropdict = {
'inherits' : _Prop_inherits,
}
single_class_browser._privelemdict = {
}
import Standard_Suite
single_class_hierarchy._superclassnames = ['document']
single_class_hierarchy._privpropdict = {
'inherits' : _Prop_inherits,
}
single_class_hierarchy._privelemdict = {
}
class_browser._superclassnames = ['text_document']
class_browser._privpropdict = {
'inherits' : _Prop_inherits,
}
class_browser._privelemdict = {
}
file_compare_document._superclassnames = ['text_document']
file_compare_document._privpropdict = {
'inherits' : _Prop_inherits,
}
file_compare_document._privelemdict = {
}
catalog_document._superclassnames = ['text_document']
catalog_document._privpropdict = {
'inherits' : _Prop_inherits,
}
catalog_document._privelemdict = {
}
editor_document._superclassnames = ['text_document']
editor_document._privpropdict = {
'inherits' : _Prop_inherits,
}
editor_document._privelemdict = {
}
class_hierarchy._superclassnames = ['document']
class_hierarchy._privpropdict = {
'inherits' : _Prop_inherits,
}
class_hierarchy._privelemdict = {
}
project_inspector._superclassnames = ['document']
project_inspector._privpropdict = {
'inherits' : _Prop_inherits,
}
project_inspector._privelemdict = {
}
message_document._superclassnames = ['text_document']
message_document._privpropdict = {
'inherits' : _Prop_inherits,
}
message_document._privelemdict = {
}
build_progress_document._superclassnames = ['document']
build_progress_document._privpropdict = {
'inherits' : _Prop_inherits,
}
build_progress_document._privelemdict = {
}
project_document._superclassnames = ['document']
project_document._privpropdict = {
'current_target' : _Prop_current_target,
'inherits' : _Prop_inherits,
}
project_document._privelemdict = {
'target' : target,
}
subtarget._superclassnames = ['target']
subtarget._privpropdict = {
'inherits' : _Prop_inherits,
'link_against_output' : _Prop_link_against_output,
'target' : _Prop_target,
}
subtarget._privelemdict = {
}
target_file._superclassnames = []
target_file._privpropdict = {
'code_size' : _Prop_code_size,
'compiled_date' : _Prop_compiled_date,
'data_size' : _Prop_data_size,
'debug' : _Prop_debug,
'dependents' : _Prop_dependents,
'id' : _Prop_id,
'init_before' : _Prop_init_before,
'link_index' : _Prop_link_index,
'linked' : _Prop_linked,
'location' : _Prop_location,
'merge_output' : _Prop_merge_output,
'modified_date' : _Prop_modified_date,
'path' : _Prop_path,
'prerequisites' : _Prop_prerequisites,
'type' : _Prop_type,
'weak_link' : _Prop_weak_link,
}
target_file._privelemdict = {
}
symbol_browser._superclassnames = ['text_document']
symbol_browser._privpropdict = {
'inherits' : _Prop_inherits,
}
symbol_browser._privelemdict = {
}
ToolServer_worksheet._superclassnames = ['text_document']
ToolServer_worksheet._privpropdict = {
'inherits' : _Prop_inherits,
}
ToolServer_worksheet._privelemdict = {
}
target._superclassnames = []
target._privpropdict = {
'name' : _Prop_name,
'project_document' : _Prop_project_document,
}
target._privelemdict = {
'subtarget' : subtarget,
'target_file' : target_file,
}
text_document._superclassnames = ['document']
text_document._privpropdict = {
'inherits' : _Prop_inherits,
'modified' : _Prop_modified,
'selection' : _Prop_selection,
}
text_document._privelemdict = {
'character' : Standard_Suite.character,
'insertion_point' : Standard_Suite.insertion_point,
'line' : Standard_Suite.line,
'text' : Standard_Suite.text,
}
_Enum_DKND = {
'project' : 'PRJD', # a project document
'editor_document' : 'EDIT', # an editor document
'message' : 'MSSG', # a message document
'file_compare' : 'COMP', # a file compare document
'catalog_document' : 'CTLG', # a browser catalog
'class_browser' : 'BROW', # a class browser document
'single_class_browser' : '1BRW', # a single class browser document
'symbol_browser' : 'SYMB', # a symbol browser document
'class_hierarchy' : 'HIER', # a class hierarchy document
'single_class_hierarchy' : '1HIR', # a single class hierarchy document
'project_inspector' : 'INSP', # a project inspector
'ToolServer_worksheet' : 'TOOL', # the ToolServer worksheet
'build_progress_document' : 'PRGS', # the build progress window
}
_Enum_FTYP = {
'library_file' : 'LIBF', # a library file
'project_file' : 'PRJF', # a project file
'resource_file' : 'RESF', # a resource file
'text_file' : 'TXTF', # a text file
'unknown_file' : 'UNKN', # unknown file type
}
_Enum_Inte = {
'never_interact' : 'eNvr', # never allow user interactions
'interact_with_self' : 'eInS', # allow user interaction only when an AppleEvent is sent from within CodeWarrior
'interact_with_local' : 'eInL', # allow user interaction when AppleEvents are sent from applications on the same machine (default)
'interact_with_all' : 'eInA', # allow user interaction from both local and remote AppleEvents
}
_Enum_PERM = {
'read_write' : 'RdWr', # the file is open with read/write permission
'read_only' : 'Read', # the file is open with read/only permission
'checked_out_read_write' : 'CkRW', # the file is checked out with read/write permission
'checked_out_read_only' : 'CkRO', # the file is checked out with read/only permission
'checked_out_read_modify' : 'CkRM', # the file is checked out with read/modify permission
'locked' : 'Lock', # the file is locked on disk
'none' : 'LNNO', # the file is new
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'1BRW' : single_class_browser,
'1HIR' : single_class_hierarchy,
'BROW' : class_browser,
'COMP' : file_compare_document,
'CTLG' : catalog_document,
'EDIT' : editor_document,
'HIER' : class_hierarchy,
'INSP' : project_inspector,
'MSSG' : message_document,
'PRGS' : build_progress_document,
'PRJD' : project_document,
'SBTG' : subtarget,
'SRCF' : target_file,
'SYMB' : symbol_browser,
'TOOL' : ToolServer_worksheet,
'TRGT' : target,
'TXTD' : text_document,
}
_propdeclarations = {
'CMPD' : _Prop_compiled_date,
'CSZE' : _Prop_code_size,
'CURT' : _Prop_current_target,
'DBUG' : _Prop_debug,
'DPND' : _Prop_dependents,
'DSZE' : _Prop_data_size,
'FILE' : _Prop_location,
'FTYP' : _Prop_type,
'ID ' : _Prop_id,
'INIT' : _Prop_init_before,
'LIDX' : _Prop_link_index,
'LINK' : _Prop_linked,
'LNKO' : _Prop_link_against_output,
'MODD' : _Prop_modified_date,
'MRGE' : _Prop_merge_output,
'PRER' : _Prop_prerequisites,
'Path' : _Prop_path,
'PrjD' : _Prop_project_document,
'TrgT' : _Prop_target,
'WEAK' : _Prop_weak_link,
'c@#^' : _Prop_inherits,
'imod' : _Prop_modified,
'pnam' : _Prop_name,
'sele' : _Prop_selection,
}
_compdeclarations = {
}
_enumdeclarations = {
'DKND' : _Enum_DKND,
'FTYP' : _Enum_FTYP,
'Inte' : _Enum_Inte,
'PERM' : _Enum_PERM,
}
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/lib-scriptpackages/CodeWarrior/CodeWarrior_suite.py
|
Python
|
mit
| 23,097 | 0.012296 |
#! /usr/bin/env python
#edited on c9
import os
import array as array
import sys
if (len(sys.argv)==1):
print "usage >>seqCheck.py [fileRootName, path ('.') for cwd]"
sys.exit()
elif (len(sys.argv)==2):
fileRootName = sys.argv[1]
rootpath = os.getcwd()
elif (len(sys.argv)==3):
fileRootName = sys.argv[1]
rootpath = os.getcwd() + sys.argv[2]
print rootpath
else:
print "usage >>seqCheck.py [fileRootname, path ('.') for cwd]"
'''
mypath = os.getcwd()
print("Path at terminal when executing this file")
print(os.getcwd() + "\n")
print("This file path, relative to os.getcwd()")
print(__file__ + "\n")
print("This file full path (following symlinks)")
full_path = os.path.realpath(__file__)
print(full_path + "\n")
print("This file directory and name")
path, file = os.path.split(full_path)
print(path + ' --> ' + file + "\n")
print("This file directory only")
print(os.path.dirname(full_path))
'''
f = []
for (dirpath,dirnames,filenames) in os.walk(rootpath):
#print filenames
f.extend(filenames)
break
print len(f)
num = []
#narray = array("i")
for x in f:
y = x.split(".")
if len(y) >1:
try:
z =int(y[1])
except (SyntaxError, ValueError):
pass
num.append(z)
#narray.append(z)
mylist = sorted(num)
end = len(mylist)
start = mylist[0]
print("start is " + str(start))
for x in mylist:
#print(str(x) + " "+ str(start))
if x != start:
print(str(start) + " is missing")
break
start += 1
print("There are "+ str(end) + " files")
#x = os.walk(mypath)
#for y in x:
# print y
|
bkonersman/utils
|
seqCheck.py
|
Python
|
artistic-2.0
| 1,631 | 0.010423 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.