text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""
Serializers and ModelSerializers are similar to Forms and ModelForms.
Unlike forms, they are not constrained to dealing with HTML output, and
form encoded input.
Serialization in REST framework is a two-phase process:
1. Serializers marshal between complex types like model instances, and
python primitives.
2. The process of marshalling between python primitives and request and
response content is handled by parsers and renderers.
"""
from __future__ import unicode_literals
import copy
import datetime
import inspect
import types
from decimal import Decimal
from django.contrib.contenttypes.generic import GenericForeignKey
from django.core.paginator import Page
from django.db import models
from django.forms import widgets
from django.utils.datastructures import SortedDict
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.compat import get_concrete_model, six
from rest_framework.settings import api_settings
# Note: We do the following so that users of the framework can use this style:
#
# example_field = serializers.CharField(...)
#
# This helps keep the separation between model fields, form fields, and
# serializer fields more explicit.
from rest_framework.relations import * # NOQA
from rest_framework.fields import * # NOQA
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situtations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
return models.get_model(app_name, model_name)
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
else:
raise ValueError("{0} is not a Django model".format(obj))
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
class RelationsList(list):
_deleted = []
class NestedValidationError(ValidationError):
"""
The default ValidationError behavior is to stringify each item in the list
if the messages are a list of error messages.
In the case of nested serializers, where the parent has many children,
then the child's `serializer.errors` will be a list of dicts. In the case
of a single child, the `serializer.errors` will be a dict.
We need to override the default behavior to get properly nested error dicts.
"""
def __init__(self, message):
if isinstance(message, dict):
self._messages = [message]
else:
self._messages = message
@property
def messages(self):
return self._messages
class DictWithMetadata(dict):
"""
A dict-like object, that can have additional properties attached.
"""
def __getstate__(self):
"""
Used by pickle (e.g., caching).
Overridden to remove the metadata from the dict, since it shouldn't be
pickled and may in some instances be unpickleable.
"""
return dict(self)
class SortedDictWithMetadata(SortedDict):
"""
A sorted dict-like object, that can have additional properties attached.
"""
def __getstate__(self):
"""
Used by pickle (e.g., caching).
Overriden to remove the metadata from the dict, since it shouldn't be
pickle and may in some instances be unpickleable.
"""
return SortedDict(self).__dict__
def _is_protected_type(obj):
"""
True if the object is a native datatype that does not need to
be serialized further.
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal,
basestring)
)
def _get_declared_fields(bases, attrs):
"""
Create a list of serializer field instances from the passed in 'attrs',
plus any fields on the base classes (in 'bases').
Note that all fields from the base classes are used.
"""
fields = [(field_name, attrs.pop(field_name))
for field_name, obj in list(six.iteritems(attrs))
if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Serializer, add that Serializer's
# fields. Note that we loop over the bases in *reverse*. This is necessary
# in order to maintain the correct order of fields.
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = list(base.base_fields.items()) + fields
return SortedDict(fields)
class SerializerMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = _get_declared_fields(bases, attrs)
return super(SerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class SerializerOptions(object):
"""
Meta class options for Serializer
"""
def __init__(self, meta):
self.depth = getattr(meta, 'depth', 0)
self.fields = getattr(meta, 'fields', ())
self.exclude = getattr(meta, 'exclude', ())
class BaseSerializer(WritableField):
"""
This is the Serializer implementation.
We need to implement it as `BaseSerializer` due to metaclass magicks.
"""
class Meta(object):
pass
_options_class = SerializerOptions
_dict_class = SortedDictWithMetadata
def __init__(self, instance=None, data=None, files=None,
context=None, partial=False, many=None,
allow_add_remove=False, **kwargs):
super(BaseSerializer, self).__init__(**kwargs)
self.opts = self._options_class(self.Meta)
self.parent = None
self.root = None
self.partial = partial
self.many = many
self.allow_add_remove = allow_add_remove
self.context = context or {}
self.init_data = data
self.init_files = files
self.object = instance
self.fields = self.get_fields()
self._data = None
self._files = None
self._errors = None
if many and instance is not None and not hasattr(instance, '__iter__'):
raise ValueError('instance should be a queryset or other iterable with many=True')
if allow_add_remove and not many:
raise ValueError('allow_add_remove should only be used for bulk updates, but you have not set many=True')
#####
# Methods to determine which fields to use when (de)serializing objects.
def get_default_fields(self):
"""
Return the complete set of default fields for the object, as a dict.
"""
return {}
def get_fields(self):
"""
Returns the complete set of fields for the object as a dict.
This will be the set of any explicitly declared fields,
plus the set of fields returned by get_default_fields().
"""
ret = SortedDict()
# Get the explicitly declared fields
base_fields = copy.deepcopy(self.base_fields)
for key, field in base_fields.items():
ret[key] = field
# Add in the default fields
default_fields = self.get_default_fields()
for key, val in default_fields.items():
if key not in ret:
ret[key] = val
# If 'fields' is specified, use those fields, in that order.
if self.opts.fields:
assert isinstance(self.opts.fields, (list, tuple)), '`fields` must be a list or tuple'
new = SortedDict()
for key in self.opts.fields:
new[key] = ret[key]
ret = new
# Remove anything in 'exclude'
if self.opts.exclude:
assert isinstance(self.opts.exclude, (list, tuple)), '`exclude` must be a list or tuple'
for key in self.opts.exclude:
ret.pop(key, None)
for key, field in ret.items():
field.initialize(parent=self, field_name=key)
return ret
#####
# Methods to convert or revert from objects <--> primitive representations.
def get_field_key(self, field_name):
"""
Return the key that should be used for a given field.
"""
return field_name
def restore_fields(self, data, files):
"""
Core of deserialization, together with `restore_object`.
Converts a dictionary of data into a dictionary of deserialized fields.
"""
reverted_data = {}
if data is not None and not isinstance(data, dict):
self._errors['non_field_errors'] = ['Invalid data']
return None
for field_name, field in self.fields.items():
field.initialize(parent=self, field_name=field_name)
try:
field.field_from_native(data, files, field_name, reverted_data)
except ValidationError as err:
self._errors[field_name] = list(err.messages)
return reverted_data
def perform_validation(self, attrs):
"""
Run `validate_<fieldname>()` and `validate()` methods on the serializer
"""
for field_name, field in self.fields.items():
if field_name in self._errors:
continue
source = field.source or field_name
if self.partial and source not in attrs:
continue
try:
validate_method = getattr(self, 'validate_%s' % field_name, None)
if validate_method:
attrs = validate_method(attrs, source)
except ValidationError as err:
self._errors[field_name] = self._errors.get(field_name, []) + list(err.messages)
# If there are already errors, we don't run .validate() because
# field-validation failed and thus `attrs` may not be complete.
# which in turn can cause inconsistent validation errors.
if not self._errors:
try:
attrs = self.validate(attrs)
except ValidationError as err:
if hasattr(err, 'message_dict'):
for field_name, error_messages in err.message_dict.items():
self._errors[field_name] = self._errors.get(field_name, []) + list(error_messages)
elif hasattr(err, 'messages'):
self._errors['non_field_errors'] = err.messages
return attrs
def validate(self, attrs):
"""
Stub method, to be overridden in Serializer subclasses
"""
return attrs
def restore_object(self, attrs, instance=None):
"""
Deserialize a dictionary of attributes into an object instance.
You should override this method to control how deserialized objects
are instantiated.
"""
if instance is not None:
instance.update(attrs)
return instance
return attrs
def to_native(self, obj):
"""
Serialize objects -> primitives.
"""
ret = self._dict_class()
ret.fields = self._dict_class()
for field_name, field in self.fields.items():
if field.read_only and obj is None:
continue
field.initialize(parent=self, field_name=field_name)
key = self.get_field_key(field_name)
value = field.field_to_native(obj, field_name)
method = getattr(self, 'transform_%s' % field_name, None)
if callable(method):
value = method(obj, value)
if not getattr(field, 'write_only', False):
ret[key] = value
ret.fields[key] = self.augment_field(field, field_name, key, value)
return ret
def from_native(self, data, files=None):
"""
Deserialize primitives -> objects.
"""
self._errors = {}
if data is not None or files is not None:
attrs = self.restore_fields(data, files)
if attrs is not None:
attrs = self.perform_validation(attrs)
else:
self._errors['non_field_errors'] = ['No input provided']
if not self._errors:
return self.restore_object(attrs, instance=getattr(self, 'object', None))
def augment_field(self, field, field_name, key, value):
# This horrible stuff is to manage serializers rendering to HTML
field._errors = self._errors.get(key) if self._errors else None
field._name = field_name
field._value = self.init_data.get(key) if self._errors and self.init_data else value
if not field.label:
field.label = pretty_name(key)
return field
def field_to_native(self, obj, field_name):
"""
Override default so that the serializer can be used as a nested field
across relationships.
"""
if self.write_only:
return None
if self.source == '*':
return self.to_native(obj)
# Get the raw field value
try:
source = self.source or field_name
value = obj
for component in source.split('.'):
if value is None:
break
value = get_component(value, component)
except ObjectDoesNotExist:
return None
if is_simple_callable(getattr(value, 'all', None)):
return [self.to_native(item) for item in value.all()]
if value is None:
return None
if self.many is not None:
many = self.many
else:
many = hasattr(value, '__iter__') and not isinstance(value, (Page, dict, six.text_type))
if many:
return [self.to_native(item) for item in value]
return self.to_native(value)
def field_from_native(self, data, files, field_name, into):
"""
Override default so that the serializer can be used as a writable
nested field across relationships.
"""
if self.read_only:
return
try:
value = data[field_name]
except KeyError:
if self.default is not None and not self.partial:
# Note: partial updates shouldn't set defaults
value = copy.deepcopy(self.default)
else:
if self.required:
raise ValidationError(self.error_messages['required'])
return
if self.source == '*':
if value:
reverted_data = self.restore_fields(value, {})
if not self._errors:
into.update(reverted_data)
else:
if value in (None, ''):
into[(self.source or field_name)] = None
else:
# Set the serializer object if it exists
obj = get_component(self.parent.object, self.source or field_name) if self.parent.object else None
# If we have a model manager or similar object then we need
# to iterate through each instance.
if (self.many and
not hasattr(obj, '__iter__') and
is_simple_callable(getattr(obj, 'all', None))):
obj = obj.all()
kwargs = {
'instance': obj,
'data': value,
'context': self.context,
'partial': self.partial,
'many': self.many,
'allow_add_remove': self.allow_add_remove
}
serializer = self.__class__(**kwargs)
if serializer.is_valid():
into[self.source or field_name] = serializer.object
else:
# Propagate errors up to our parent
raise NestedValidationError(serializer.errors)
def get_identity(self, data):
"""
This hook is required for bulk update.
It is used to determine the canonical identity of a given object.
Note that the data has not been validated at this point, so we need
to make sure that we catch any cases of incorrect datatypes being
passed to this method.
"""
try:
return data.get('id', None)
except AttributeError:
return None
@property
def errors(self):
"""
Run deserialization and return error data,
setting self.object if no errors occurred.
"""
if self._errors is None:
data, files = self.init_data, self.init_files
if self.many is not None:
many = self.many
else:
many = hasattr(data, '__iter__') and not isinstance(data, (Page, dict, six.text_type))
if many:
warnings.warn('Implicit list/queryset serialization is deprecated. '
'Use the `many=True` flag when instantiating the serializer.',
DeprecationWarning, stacklevel=3)
if many:
ret = RelationsList()
errors = []
update = self.object is not None
if update:
# If this is a bulk update we need to map all the objects
# to a canonical identity so we can determine which
# individual object is being updated for each item in the
# incoming data
objects = self.object
identities = [self.get_identity(self.to_native(obj)) for obj in objects]
identity_to_objects = dict(zip(identities, objects))
if hasattr(data, '__iter__') and not isinstance(data, (dict, six.text_type)):
for item in data:
if update:
# Determine which object we're updating
identity = self.get_identity(item)
self.object = identity_to_objects.pop(identity, None)
if self.object is None and not self.allow_add_remove:
ret.append(None)
errors.append({'non_field_errors': ['Cannot create a new item, only existing items may be updated.']})
continue
ret.append(self.from_native(item, None))
errors.append(self._errors)
if update and self.allow_add_remove:
ret._deleted = identity_to_objects.values()
self._errors = any(errors) and errors or []
else:
self._errors = {'non_field_errors': ['Expected a list of items.']}
else:
ret = self.from_native(data, files)
if not self._errors:
self.object = ret
return self._errors
def is_valid(self):
return not self.errors
@property
def data(self):
"""
Returns the serialized data on the serializer.
"""
if self._data is None:
obj = self.object
if self.many is not None:
many = self.many
else:
many = hasattr(obj, '__iter__') and not isinstance(obj, (Page, dict))
if many:
warnings.warn('Implicit list/queryset serialization is deprecated. '
'Use the `many=True` flag when instantiating the serializer.',
DeprecationWarning, stacklevel=2)
if many:
self._data = [self.to_native(item) for item in obj]
else:
self._data = self.to_native(obj)
return self._data
def save_object(self, obj, **kwargs):
obj.save(**kwargs)
def delete_object(self, obj):
obj.delete()
def save(self, **kwargs):
"""
Save the deserialized object and return it.
"""
# Clear cached _data, which may be invalidated by `save()`
self._data = None
if isinstance(self.object, list):
[self.save_object(item, **kwargs) for item in self.object]
if self.object._deleted:
[self.delete_object(item) for item in self.object._deleted]
else:
self.save_object(self.object, **kwargs)
return self.object
def metadata(self):
"""
Return a dictionary of metadata about the fields on the serializer.
Useful for things like responding to OPTIONS requests, or generating
API schemas for auto-documentation.
"""
return SortedDict(
[(field_name, field.metadata())
for field_name, field in six.iteritems(self.fields)]
)
class Serializer(six.with_metaclass(SerializerMetaclass, BaseSerializer)):
pass
class ModelSerializerOptions(SerializerOptions):
"""
Meta class options for ModelSerializer
"""
def __init__(self, meta):
super(ModelSerializerOptions, self).__init__(meta)
self.model = getattr(meta, 'model', None)
self.read_only_fields = getattr(meta, 'read_only_fields', ())
self.write_only_fields = getattr(meta, 'write_only_fields', ())
class ModelSerializer(Serializer):
"""
A serializer that deals with model instances and querysets.
"""
_options_class = ModelSerializerOptions
field_mapping = {
models.AutoField: IntegerField,
models.FloatField: FloatField,
models.IntegerField: IntegerField,
models.PositiveIntegerField: IntegerField,
models.SmallIntegerField: IntegerField,
models.PositiveSmallIntegerField: IntegerField,
models.DateTimeField: DateTimeField,
models.DateField: DateField,
models.TimeField: TimeField,
models.DecimalField: DecimalField,
models.EmailField: EmailField,
models.CharField: CharField,
models.URLField: URLField,
models.SlugField: SlugField,
models.TextField: CharField,
models.CommaSeparatedIntegerField: CharField,
models.BooleanField: BooleanField,
models.NullBooleanField: BooleanField,
models.FileField: FileField,
models.ImageField: ImageField,
}
def get_default_fields(self):
"""
Return all the fields that should be serialized for the model.
"""
cls = self.opts.model
assert cls is not None, \
"Serializer class '%s' is missing 'model' Meta option" % self.__class__.__name__
opts = get_concrete_model(cls)._meta
ret = SortedDict()
nested = bool(self.opts.depth)
# Deal with adding the primary key field
pk_field = opts.pk
while pk_field.rel and pk_field.rel.parent_link:
# If model is a child via multitable inheritance, use parent's pk
pk_field = pk_field.rel.to._meta.pk
field = self.get_pk_field(pk_field)
if field:
ret[pk_field.name] = field
# Deal with forward relationships
forward_rels = [field for field in opts.fields if field.serialize]
forward_rels += [field for field in opts.many_to_many if field.serialize]
for model_field in forward_rels:
has_through_model = False
if model_field.rel:
to_many = isinstance(model_field,
models.fields.related.ManyToManyField)
related_model = _resolve_model(model_field.rel.to)
if to_many and not model_field.rel.through._meta.auto_created:
has_through_model = True
if model_field.rel and nested:
if len(inspect.getargspec(self.get_nested_field).args) == 2:
warnings.warn(
'The `get_nested_field(model_field)` call signature '
'is due to be deprecated. '
'Use `get_nested_field(model_field, related_model, '
'to_many) instead',
PendingDeprecationWarning
)
field = self.get_nested_field(model_field)
else:
field = self.get_nested_field(model_field, related_model, to_many)
elif model_field.rel:
if len(inspect.getargspec(self.get_nested_field).args) == 3:
warnings.warn(
'The `get_related_field(model_field, to_many)` call '
'signature is due to be deprecated. '
'Use `get_related_field(model_field, related_model, '
'to_many) instead',
PendingDeprecationWarning
)
field = self.get_related_field(model_field, to_many=to_many)
else:
field = self.get_related_field(model_field, related_model, to_many)
else:
field = self.get_field(model_field)
if field:
if has_through_model:
field.read_only = True
ret[model_field.name] = field
# Deal with reverse relationships
if not self.opts.fields:
reverse_rels = []
else:
# Reverse relationships are only included if they are explicitly
# present in the `fields` option on the serializer
reverse_rels = opts.get_all_related_objects()
reverse_rels += opts.get_all_related_many_to_many_objects()
for relation in reverse_rels:
accessor_name = relation.get_accessor_name()
if not self.opts.fields or accessor_name not in self.opts.fields:
continue
related_model = relation.model
to_many = relation.field.rel.multiple
has_through_model = False
is_m2m = isinstance(relation.field,
models.fields.related.ManyToManyField)
if (is_m2m and
hasattr(relation.field.rel, 'through') and
not relation.field.rel.through._meta.auto_created):
has_through_model = True
if nested:
field = self.get_nested_field(None, related_model, to_many)
else:
field = self.get_related_field(None, related_model, to_many)
if field:
if has_through_model:
field.read_only = True
ret[accessor_name] = field
# Ensure that 'read_only_fields' is an iterable
assert isinstance(self.opts.read_only_fields, (list, tuple)), '`read_only_fields` must be a list or tuple'
# Add the `read_only` flag to any fields that have been specified
# in the `read_only_fields` option
for field_name in self.opts.read_only_fields:
assert field_name not in self.base_fields.keys(), (
"field '%s' on serializer '%s' specified in "
"`read_only_fields`, but also added "
"as an explicit field. Remove it from `read_only_fields`." %
(field_name, self.__class__.__name__))
assert field_name in ret, (
"Non-existant field '%s' specified in `read_only_fields` "
"on serializer '%s'." %
(field_name, self.__class__.__name__))
ret[field_name].read_only = True
# Ensure that 'write_only_fields' is an iterable
assert isinstance(self.opts.write_only_fields, (list, tuple)), '`write_only_fields` must be a list or tuple'
for field_name in self.opts.write_only_fields:
assert field_name not in self.base_fields.keys(), (
"field '%s' on serializer '%s' specified in "
"`write_only_fields`, but also added "
"as an explicit field. Remove it from `write_only_fields`." %
(field_name, self.__class__.__name__))
assert field_name in ret, (
"Non-existant field '%s' specified in `write_only_fields` "
"on serializer '%s'." %
(field_name, self.__class__.__name__))
ret[field_name].write_only = True
return ret
def get_pk_field(self, model_field):
"""
Returns a default instance of the pk field.
"""
return self.get_field(model_field)
def get_nested_field(self, model_field, related_model, to_many):
"""
Creates a default instance of a nested relational field.
Note that model_field will be `None` for reverse relationships.
"""
class NestedModelSerializer(ModelSerializer):
class Meta:
model = related_model
depth = self.opts.depth - 1
return NestedModelSerializer(many=to_many)
def get_related_field(self, model_field, related_model, to_many):
"""
Creates a default instance of a flat relational field.
Note that model_field will be `None` for reverse relationships.
"""
# TODO: filter queryset using:
# .using(db).complex_filter(self.rel.limit_choices_to)
kwargs = {
'queryset': related_model._default_manager,
'many': to_many
}
if model_field:
kwargs['required'] = not(model_field.null or model_field.blank)
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if not model_field.editable:
kwargs['read_only'] = True
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
return PrimaryKeyRelatedField(**kwargs)
def get_field(self, model_field):
"""
Creates a default instance of a basic non-relational field.
"""
kwargs = {}
if model_field.null or model_field.blank:
kwargs['required'] = False
if isinstance(model_field, models.AutoField) or not model_field.editable:
kwargs['read_only'] = True
if model_field.has_default():
kwargs['default'] = model_field.get_default()
if issubclass(model_field.__class__, models.TextField):
kwargs['widget'] = widgets.Textarea
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
# TODO: TypedChoiceField?
if model_field.flatchoices: # This ModelField contains choices
kwargs['choices'] = model_field.flatchoices
if model_field.null:
kwargs['empty'] = None
return ChoiceField(**kwargs)
# put this below the ChoiceField because min_value isn't a valid initializer
if issubclass(model_field.__class__, models.PositiveIntegerField) or\
issubclass(model_field.__class__, models.PositiveSmallIntegerField):
kwargs['min_value'] = 0
attribute_dict = {
models.CharField: ['max_length'],
models.CommaSeparatedIntegerField: ['max_length'],
models.DecimalField: ['max_digits', 'decimal_places'],
models.EmailField: ['max_length'],
models.FileField: ['max_length'],
models.ImageField: ['max_length'],
models.SlugField: ['max_length'],
models.URLField: ['max_length'],
}
if model_field.__class__ in attribute_dict:
attributes = attribute_dict[model_field.__class__]
for attribute in attributes:
kwargs.update({attribute: getattr(model_field, attribute)})
try:
return self.field_mapping[model_field.__class__](**kwargs)
except KeyError:
return ModelField(model_field=model_field, **kwargs)
def get_validation_exclusions(self, instance=None):
"""
Return a list of field names to exclude from model validation.
"""
cls = self.opts.model
opts = get_concrete_model(cls)._meta
exclusions = [field.name for field in opts.fields + opts.many_to_many]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name in exclusions \
and not field.read_only \
and (field.required or hasattr(instance, field_name)) \
and not isinstance(field, Serializer):
exclusions.remove(field_name)
return exclusions
def full_clean(self, instance):
"""
Perform Django's full_clean, and populate the `errors` dictionary
if any validation errors occur.
Note that we don't perform this inside the `.restore_object()` method,
so that subclasses can override `.restore_object()`, and still get
the full_clean validation checking.
"""
try:
instance.full_clean(exclude=self.get_validation_exclusions(instance))
except ValidationError as err:
self._errors = err.message_dict
return None
return instance
def restore_object(self, attrs, instance=None):
"""
Restore the model instance.
"""
m2m_data = {}
related_data = {}
nested_forward_relations = {}
meta = self.opts.model._meta
# Reverse fk or one-to-one relations
for (obj, model) in meta.get_all_related_objects_with_model():
field_name = obj.get_accessor_name()
if field_name in attrs:
related_data[field_name] = attrs.pop(field_name)
# Reverse m2m relations
for (obj, model) in meta.get_all_related_m2m_objects_with_model():
field_name = obj.get_accessor_name()
if field_name in attrs:
m2m_data[field_name] = attrs.pop(field_name)
# Forward m2m relations
for field in meta.many_to_many + meta.virtual_fields:
if isinstance(field, GenericForeignKey):
continue
if field.name in attrs:
m2m_data[field.name] = attrs.pop(field.name)
# Nested forward relations - These need to be marked so we can save
# them before saving the parent model instance.
for field_name in attrs.keys():
if isinstance(self.fields.get(field_name, None), Serializer):
nested_forward_relations[field_name] = attrs[field_name]
# Create an empty instance of the model
if instance is None:
instance = self.opts.model()
for key, val in attrs.items():
try:
setattr(instance, key, val)
except ValueError:
self._errors[key] = self.error_messages['required']
# Any relations that cannot be set until we've
# saved the model get hidden away on these
# private attributes, so we can deal with them
# at the point of save.
instance._related_data = related_data
instance._m2m_data = m2m_data
instance._nested_forward_relations = nested_forward_relations
return instance
def from_native(self, data, files):
"""
Override the default method to also include model field validation.
"""
instance = super(ModelSerializer, self).from_native(data, files)
if not self._errors:
return self.full_clean(instance)
def save_object(self, obj, **kwargs):
"""
Save the deserialized object.
"""
if getattr(obj, '_nested_forward_relations', None):
# Nested relationships need to be saved before we can save the
# parent instance.
for field_name, sub_object in obj._nested_forward_relations.items():
if sub_object:
self.save_object(sub_object)
setattr(obj, field_name, sub_object)
obj.save(**kwargs)
if getattr(obj, '_m2m_data', None):
for accessor_name, object_list in obj._m2m_data.items():
setattr(obj, accessor_name, object_list)
del(obj._m2m_data)
if getattr(obj, '_related_data', None):
related_fields = dict([
(field.get_accessor_name(), field)
for field, model
in obj._meta.get_all_related_objects_with_model()
])
for accessor_name, related in obj._related_data.items():
if isinstance(related, RelationsList):
# Nested reverse fk relationship
for related_item in related:
fk_field = related_fields[accessor_name].field.name
setattr(related_item, fk_field, obj)
self.save_object(related_item)
# Delete any removed objects
if related._deleted:
[self.delete_object(item) for item in related._deleted]
elif isinstance(related, models.Model):
# Nested reverse one-one relationship
fk_field = obj._meta.get_field_by_name(accessor_name)[0].field.name
setattr(related, fk_field, obj)
self.save_object(related)
else:
# Reverse FK or reverse one-one
setattr(obj, accessor_name, related)
del(obj._related_data)
class HyperlinkedModelSerializerOptions(ModelSerializerOptions):
"""
Options for HyperlinkedModelSerializer
"""
def __init__(self, meta):
super(HyperlinkedModelSerializerOptions, self).__init__(meta)
self.view_name = getattr(meta, 'view_name', None)
self.lookup_field = getattr(meta, 'lookup_field', None)
self.url_field_name = getattr(meta, 'url_field_name', api_settings.URL_FIELD_NAME)
class HyperlinkedModelSerializer(ModelSerializer):
"""
A subclass of ModelSerializer that uses hyperlinked relationships,
instead of primary key relationships.
"""
_options_class = HyperlinkedModelSerializerOptions
_default_view_name = '%(model_name)s-detail'
_hyperlink_field_class = HyperlinkedRelatedField
_hyperlink_identify_field_class = HyperlinkedIdentityField
def get_default_fields(self):
fields = super(HyperlinkedModelSerializer, self).get_default_fields()
if self.opts.view_name is None:
self.opts.view_name = self._get_default_view_name(self.opts.model)
if self.opts.url_field_name not in fields:
url_field = self._hyperlink_identify_field_class(
view_name=self.opts.view_name,
lookup_field=self.opts.lookup_field
)
ret = self._dict_class()
ret[self.opts.url_field_name] = url_field
ret.update(fields)
fields = ret
return fields
def get_pk_field(self, model_field):
if self.opts.fields and model_field.name in self.opts.fields:
return self.get_field(model_field)
def get_related_field(self, model_field, related_model, to_many):
"""
Creates a default instance of a flat relational field.
"""
# TODO: filter queryset using:
# .using(db).complex_filter(self.rel.limit_choices_to)
kwargs = {
'queryset': related_model._default_manager,
'view_name': self._get_default_view_name(related_model),
'many': to_many
}
if model_field:
kwargs['required'] = not(model_field.null or model_field.blank)
if model_field.help_text is not None:
kwargs['help_text'] = model_field.help_text
if model_field.verbose_name is not None:
kwargs['label'] = model_field.verbose_name
if self.opts.lookup_field:
kwargs['lookup_field'] = self.opts.lookup_field
return self._hyperlink_field_class(**kwargs)
def get_identity(self, data):
"""
This hook is required for bulk update.
We need to override the default, to use the url as the identity.
"""
try:
return data.get(self.opts.url_field_name, None)
except AttributeError:
return None
def _get_default_view_name(self, model):
"""
Return the view name to use if 'view_name' is not specified in 'Meta'
"""
model_meta = model._meta
format_kwargs = {
'app_label': model_meta.app_label,
'model_name': model_meta.object_name.lower()
}
return self._default_view_name % format_kwargs
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/rest_framework/serializers.py | Python | agpl-3.0 | 41,575 | 0.001034 |
# Copyright (c) 2016, Matt Layman
import unittest
from tap.tests.factory import Factory
class TestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self.factory = Factory()
| Mark-E-Hamilton/tappy | tap/tests/testcase.py | Python | bsd-2-clause | 258 | 0 |
#!/usr/bin/env python2.7
import json
import argparse
import codecs
import sys
def main(args):
data = args.in_lyapas.read()
data = json.dumps(data, ensure_ascii=False, encoding='utf-8')
json_data = '{"file": "' + args.in_lyapas.name + '",' + ' "source": ' + data +'}'
args.out_filename.write(json_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Getting json from lyapas sourses')
parser.add_argument('in_lyapas', help='Path in filesystem for input lyapas-file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('-out_filename', help='Name of output file', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
main(args)
| tsu-iscd/lyapas-lcc | lyapas_to_json.py | Python | bsd-3-clause | 753 | 0.010624 |
"""
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
w[v == 0] = 0
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(alpha, y, v, Q, QT_y)
else:
out, c = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv,
scoring=self.scoring)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| waterponey/scikit-learn | sklearn/linear_model/ridge.py | Python | bsd-3-clause | 51,357 | 0.000156 |
import unittest
from os import path
from API.directoryscanner import find_runs_in_directory
path_to_module = path.abspath(path.dirname(__file__))
class TestDirectoryScanner(unittest.TestCase):
def test_sample_names_spaces(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-names-with-spaces"))
self.assertEqual(1, len(runs))
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertEqual(sample.get_id(), sample.get_id().strip())
def test_single_end(self):
runs = find_runs_in_directory(path.join(path_to_module, "single_end"))
self.assertEqual(1, len(runs))
self.assertEqual("SINGLE_END", runs[0].metadata["layoutType"])
samples = runs[0].sample_list
self.assertEqual(3, len(samples))
for sample in samples:
self.assertFalse(sample.is_paired_end())
def test_completed_upload(self):
runs = find_runs_in_directory(path.join(path_to_module, "completed"))
self.assertEqual(0, len(runs))
def test_find_sample_sheet_name_variations(self):
runs = find_runs_in_directory(path.join(path_to_module, "sample-sheet-name-variations"))
self.assertEqual(1, len(runs))
| phac-nml/irida-miseq-uploader | Tests/unitTests/test_directoryscanner.py | Python | apache-2.0 | 1,282 | 0.00234 |
from selenium import webdriver
from time import sleep
driver=webdriver.Firefox()
#打开我要自学网页面并截图
driver.get("http://www.51zxw.net/")
driver.get_screenshot_as_file(r'E:\0python_script\four\Webdriver\zxw.jpg')
sleep(2)
#打开百度页面并截图
driver.get("http://www.baidu.com")
driver.get_screenshot_as_file(r'E:\0python_script\four\Webdriver\baidu.png')
sleep(2)
driver.quit()
| 1065865483/0python_script | four/Webdriver/screenshot.py | Python | mit | 405 | 0.008264 |
import random
N = 600851475143
def gcd(a, b):
while b > 0:
a, b = b, a % b
return a
def factorize(N):
" N の素因数分解を求める (Pollard's rho algorithm) "
factors = []
while N >= 2:
d = 1
while d == 1:
x = random.randint(1, N)
y = random.randint(1, N)
d = gcd(abs(x-y), N)
d = int(d)
if d < N:
factors.append(d)
N /= d
elif d == N:
factors.append(d)
break
return factors
factors = list(sorted(factorize(N)))
print(factors[-1])
| ys-nuem/project-euler | 003/003.py | Python | mit | 603 | 0.006861 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeJpegOp."""
import os
import time
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeJpegBenchmark(test.Benchmark):
"""Evaluate tensorflow DecodeJpegOp performance."""
def _evalDecodeJpeg(self,
image_name,
parallelism,
num_iters,
crop_during_decode=None,
crop_window=None,
tile=None):
"""Evaluate DecodeJpegOp for the given image.
TODO(tanmingxing): add decoding+cropping as well.
Args:
image_name: a string of image file name (without suffix).
parallelism: the number of concurrent decode_jpeg ops to be run.
num_iters: number of iterations for evaluation.
crop_during_decode: If true, use fused DecodeAndCropJpeg instead of
separate decode and crop ops. It is ignored if crop_window is None.
crop_window: if not None, crop the decoded image. Depending on
crop_during_decode, cropping could happen during or after decoding.
tile: if not None, tile the image to composite a larger fake image.
Returns:
The duration of the run in seconds.
"""
ops.reset_default_graph()
image_file_path = resource_loader.get_path_to_datafile(
os.path.join('core', 'lib', 'jpeg', 'testdata', image_name))
# resource_loader does not seem to work well under benchmark runners.
# So if the above path is not available, try another way to access the file:
if not os.path.exists(image_file_path):
image_file_path = resource_loader.get_path_to_datafile(
os.path.join(
'..', '..', 'core', 'lib', 'jpeg', 'testdata', image_name))
if tile is None:
image_content = variable_scope.get_variable(
'image_%s' % image_name,
initializer=io_ops.read_file(image_file_path))
else:
single_image = image_ops.decode_jpeg(
io_ops.read_file(image_file_path), channels=3, name='single_image')
# Tile the image to composite a new larger image.
tiled_image = array_ops.tile(single_image, tile)
image_content = variable_scope.get_variable(
'tiled_image_%s' % image_name,
initializer=image_ops.encode_jpeg(tiled_image))
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
images = []
for _ in range(parallelism):
if crop_window is None:
# No crop.
image = image_ops.decode_jpeg(image_content, channels=3)
elif crop_during_decode:
# combined decode and crop.
image = image_ops.decode_and_crop_jpeg(
image_content, crop_window, channels=3)
else:
# separate decode and crop.
image = image_ops.decode_jpeg(image_content, channels=3)
image = image_ops.crop_to_bounding_box(
image,
offset_height=crop_window[0],
offset_width=crop_window[1],
target_height=crop_window[2],
target_width=crop_window[3])
images.append(image)
r = control_flow_ops.group(*images)
for _ in range(3):
# Skip warm up time.
self.evaluate(r)
start_time = time.time()
for _ in range(num_iters):
self.evaluate(r)
end_time = time.time()
return end_time - start_time
def benchmarkDecodeJpegSmall(self):
"""Evaluate single DecodeImageOp for small size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'small.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegMedium(self):
"""Evaluate single DecodeImageOp for medium size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegLarge(self):
"""Evaluate single DecodeImageOp for large size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
tile = [4, 4, 1]
for parallelism in [1, 100]:
# Tile the medium size image to composite a larger fake image.
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, tile)
duration_decode_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, False, crop_window, tile)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window, tile)
self.report_benchmark(
name='decode_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/python/kernel_tests/image_ops/decode_jpeg_op_test.py | Python | apache-2.0 | 7,835 | 0.006254 |
# -*- encoding: utf-8 -*-
def offset_happens_before_timespan_stops(
timespan=None,
offset=None,
hold=False,
):
r'''Makes time relation indicating that `offset` happens
before `timespan` stops.
::
>>> relation = timespantools.offset_happens_before_timespan_stops()
>>> print(format(relation))
timespantools.OffsetTimespanTimeRelation(
inequality=timespantools.CompoundInequality(
[
timespantools.SimpleInequality('offset < timespan.stop'),
],
logical_operator='and',
),
)
Returns time relation or boolean.
'''
from abjad.tools import timespantools
inequality = timespantools.CompoundInequality([
'offset < timespan.stop',
])
time_relation = timespantools.OffsetTimespanTimeRelation(
inequality,
timespan=timespan,
offset=offset)
if time_relation.is_fully_loaded and not hold:
return time_relation()
else:
return time_relation
| mscuthbert/abjad | abjad/tools/timespantools/offset_happens_before_timespan_stops.py | Python | gpl-3.0 | 1,081 | 0.000925 |
# -*- coding: utf-8 -*-
import aaargh
from app import Negi
app = aaargh.App(description="Jinja2+JSON powered static HTML build tool")
@app.cmd(help='Parse JSON and build HTML')
@app.cmd_arg('-d','--data_dir',default='./data',help='JSON data dirctory(default:./data')
@app.cmd_arg('-t','--tmpl_dir',default='./templates',help='Jinja2 template dirctory(default:./templates')
@app.cmd_arg('-o','--out_dir',default='./dist',help='Output dirctory(default:./dist')
@app.cmd_arg('-v','--verbose',nargs='?',const=True,default=False)
def build(data_dir,tmpl_dir,out_dir,verbose):
builder = Negi(
data_dir= data_dir,
tmpl_dir = tmpl_dir,
out_dir = out_dir,
verbose = verbose
)
builder.build()
def main():
app.run()
if __name__ == '__main__':
main()
| zk33/negi | negi/main.py | Python | mit | 798 | 0.035088 |
import re
from autotest.client.shared import error
from autotest.client import utils
from virttest import virsh
from virttest import utils_libvirtd
def run(test, params, env):
"""
Test the command virsh nodecpustats
(1) Call the virsh nodecpustats command for all cpu host cpus
separately
(2) Get the output
(3) Check the against /proc/stat output(o) for respective cpu
user: o[0] + o[1]
system: o[2] + o[5] + o[6]
idle: o[3]
iowait: o[4]
(4) Call the virsh nodecpustats command with an unexpected option
(5) Call the virsh nodecpustats command with libvirtd service stop
"""
def virsh_check_nodecpustats_percpu(actual_stats):
"""
Check the acual nodecpustats output value
total time <= system uptime
"""
# Normalise to seconds from nano seconds
total = float((actual_stats['system'] + actual_stats['user'] +
actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9))
uptime = float(utils.get_uptime())
if not total <= uptime:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as total time: %f is more"
" than uptime: %f" % (total, uptime))
return True
def virsh_check_nodecpustats(actual_stats, cpu_count):
"""
Check the acual nodecpustats output value
total time <= system uptime
"""
# Normalise to seconds from nano seconds and get for one cpu
total = float(((actual_stats['system'] + actual_stats['user'] +
actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9)) / (
cpu_count))
uptime = float(utils.get_uptime())
if not total <= uptime:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as total time: %f is more"
" than uptime: %f" % (total, uptime))
return True
def virsh_check_nodecpustats_percentage(actual_per):
"""
Check the actual nodecpustats percentage adds up to 100%
"""
total = int(round(actual_per['user'] + actual_per['system'] +
actual_per['idle'] + actual_per['iowait']))
if not total == 100:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as the total percentage value: %d"
" is not equal 100" % total)
def parse_output(output):
"""
To get the output parsed into a dictionary
:param virsh command output
:return: dict of user,system,idle,iowait times
"""
# From the beginning of a line, group 1 is one or more word-characters,
# followed by zero or more whitespace characters and a ':',
# then one or more whitespace characters,
# followed by group 2, which is one or more digit characters,
# e.g as below
# user: 6163690000000
#
regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)")
actual = {}
for line in output.stdout.split('\n'):
match_obj = regex_obj.search(line)
# Due to the extra space in the list
if match_obj is not None:
name = match_obj.group(1)
value = match_obj.group(2)
actual[name] = int(value)
return actual
def parse_percentage_output(output):
"""
To get the output parsed into a dictionary
:param virsh command output
:return: dict of user,system,idle,iowait times
"""
# From the beginning of a line, group 1 is one or more word-characters,
# followed by zero or more whitespace characters and a ':',
# then one or more whitespace characters,
# followed by group 2, which is one or more digit characters,
# e.g as below
# user: 1.5%
#
regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+.\d+)")
actual_percentage = {}
for line in output.stdout.split('\n'):
match_obj = regex_obj.search(line)
# Due to the extra space in the list
if match_obj is not None:
name = match_obj.group(1)
value = match_obj.group(2)
actual_percentage[name] = float(value)
return actual_percentage
# Initialize the variables
itr = int(params.get("inner_test_iterations"))
option = params.get("virsh_cpunodestats_options")
invalid_cpunum = params.get("invalid_cpunum")
status_error = params.get("status_error")
libvirtd = params.get("libvirtd", "on")
# Prepare libvirtd service
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
# Get the host cpu list
host_cpus_list = utils.cpu_online_map()
# Run test case for 5 iterations default can be changed in subtests.cfg
# file
for i in range(itr):
if status_error == "yes":
if invalid_cpunum == "yes":
option = "--cpu %s" % (len(host_cpus_list) + 1)
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
raise error.TestFail("Command 'virsh nodecpustats' "
"succeeded with libvirtd service "
"stopped, incorrect")
else:
raise error.TestFail("Command 'virsh nodecpustats %s' "
"succeeded (incorrect command)" % option)
elif status_error == "no":
# Run the testcase for each cpu to get the cpu stats
for cpu in host_cpus_list:
option = "--cpu %s" % cpu
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_output(output)
virsh_check_nodecpustats_percpu(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
"not succeeded" % option)
# Run the test case for each cpu to get the cpu stats in percentage
for cpu in host_cpus_list:
option = "--cpu %s --percent" % cpu
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_percentage_output(output)
virsh_check_nodecpustats_percentage(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
option = ''
# Run the test case for total cpus to get the cpus stats
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_output(output)
virsh_check_nodecpustats(actual_value, len(host_cpus_list))
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
# Run the test case for the total cpus to get the stats in
# percentage
option = "--percent"
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_percentage_output(output)
virsh_check_nodecpustats_percentage(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
# Recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
| svirt/tp-libvirt | libvirt/tests/src/virsh_cmd/host/virsh_nodecpustats.py | Python | gpl-2.0 | 8,299 | 0.000361 |
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
from numpy.f2py.f2py2e import main
main()
| ryfeus/lambda-packs | pytorch/source/numpy/f2py/__main__.py | Python | mit | 134 | 0 |
from django.db import models
from annoying.fields import AutoOneToOneField
class SuperVillain(models.Model):
name = models.CharField(max_length="20", default="Dr Horrible")
class SuperHero(models.Model):
name = models.CharField(max_length="20", default="Captain Hammer")
mortal_enemy = AutoOneToOneField(SuperVillain, related_name='mortal_enemy')
| YPCrumble/django-annoying | annoying/tests/models.py | Python | bsd-3-clause | 363 | 0 |
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class BRAINSPosteriorToContinuousClassInputSpec(CommandLineInputSpec):
inputWhiteVolume = File(desc="White Matter Posterior Volume", exists=True, argstr="--inputWhiteVolume %s")
inputBasalGmVolume = File(desc="Basal Grey Matter Posterior Volume", exists=True, argstr="--inputBasalGmVolume %s")
inputSurfaceGmVolume = File(desc="Surface Grey Matter Posterior Volume", exists=True, argstr="--inputSurfaceGmVolume %s")
inputCsfVolume = File(desc="CSF Posterior Volume", exists=True, argstr="--inputCsfVolume %s")
inputVbVolume = File(desc="Venous Blood Posterior Volume", exists=True, argstr="--inputVbVolume %s")
inputCrblGmVolume = File(desc="Cerebellum Grey Matter Posterior Volume", exists=True, argstr="--inputCrblGmVolume %s")
inputCrblWmVolume = File(desc="Cerebellum White Matter Posterior Volume", exists=True, argstr="--inputCrblWmVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output Continuous Tissue Classified Image", argstr="--outputVolume %s")
class BRAINSPosteriorToContinuousClassOutputSpec(TraitedSpec):
outputVolume = File(desc="Output Continuous Tissue Classified Image", exists=True)
class BRAINSPosteriorToContinuousClass(SEMLikeCommandLine):
"""title: Tissue Classification
category: BRAINS.Classify
description: This program will generate an 8-bit continuous tissue classified image based on BRAINSABC posterior images.
version: 3.0
documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSClassify
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Vincent A. Magnotta
acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568
"""
input_spec = BRAINSPosteriorToContinuousClassInputSpec
output_spec = BRAINSPosteriorToContinuousClassOutputSpec
_cmd = " BRAINSPosteriorToContinuousClass "
_outputs_filenames = {'outputVolume': 'outputVolume'}
_redirect_x = False
| grlee77/nipype | nipype/interfaces/semtools/brains/classify.py | Python | bsd-3-clause | 2,306 | 0.006071 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import json
import logging
import time
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
class APIKey(ndb.Model):
key = ndb.StringProperty(indexed=True,required=True)
class Importer:
def load(self, toonlist, data):
q = APIKey.query()
apikey = q.fetch()[0].key
# Request all of the toon data from the blizzard API and determine the
# group's ilvls, armor type counts and token type counts. subs are not
# included in the counts, since they're not really part of the main
# group.
for toon in toonlist:
try:
# TODO: this object can probably be a class instead of another dict
newdata = dict()
data.append(newdata)
url = 'https://us.api.battle.net/wow/character/aerie-peak/%s?fields=progression,items&locale=en_US&apikey=%s' % (toon, apikey)
# create the rpc object for the fetch method. the deadline
# defaults to 5 seconds, but that seems to be too short for the
# Blizzard API site sometimes. setting it to 10 helps a little
# but it makes page loads a little slower.
rpc = urlfetch.create_rpc(10)
rpc.callback = self.create_callback(rpc, toon, newdata)
urlfetch.make_fetch_call(rpc, url)
newdata['rpc'] = rpc
newdata['toon'] = toon
# The Blizzard API has a limit of 10 calls per second. Sleep here
# for a very brief time to avoid hitting that limit.
time.sleep(0.1)
except:
logging.error('Failed to create rpc for %s' % toon)
# Now that all of the RPC calls have been created, loop through the data
# dictionary one more time and wait for each fetch to be completed. Once
# all of the waits finish, then we have all of the data from the
# Blizzard API and can loop through all of it and build the page.
start = time.time()
for d in data:
try:
d['rpc'].wait()
except:
logging.error('Waiting for rpc failed')
end = time.time()
logging.info("Time spent retrieving data: %f seconds" % (end-start))
# Callback that handles the result of the call to the Blizzard API. This will fill in
# the toondata dict for the requested toon with either data from Battle.net or with an
# error message to display on the page.
def handle_result(self, rpc, name, toondata):
try:
response = rpc.get_result()
except urlfetch_errors.DeadlineExceededError:
logging.error('urlfetch threw DeadlineExceededError on toon %s' % name.encode('ascii','ignore'))
toondata['toon'] = name
toondata['status'] = 'nok'
toondata['reason'] = 'Timeout retrieving data from Battle.net for %s. Refresh page to try again.' % name
return
except urlfetch_errors.DownloadError:
logging.error('urlfetch threw DownloadError on toon %s' % name.encode('ascii','ignore'))
toondata['toon'] = name
toondata['status'] = 'nok'
toondata['reason'] = 'Network error retrieving data from Battle.net for toon %s. Refresh page to try again.' % name
return
except:
logging.error('urlfetch threw unknown exception on toon %s' % name.encode('ascii','ignore'))
toondata['toon'] = name
toondata['status'] = 'nok'
toondata['reason'] = 'Unknown error retrieving data from Battle.net for toon %s. Refresh page to try again.' % name
return
# change the json from the response into a dict of data and store it
# into the toondata object that was passed in.
jsondata = json.loads(response.content)
toondata.update(jsondata);
# Blizzard's API will return an error if it couldn't retrieve the data
# for some reason. Check for this and log it if it fails. Note that
# this response doesn't contain the toon's name so it has to be added
# in afterwards.
if 'status' in jsondata and jsondata['status'] == 'nok':
logging.error('Blizzard API failed to find toon %s for reason: %s' %
(name.encode('ascii','ignore'), jsondata['reason']))
toondata['toon'] = name
toondata['reason'] = "Error retrieving data for %s from Blizzard API: %s" % (name, jsondata['reason'])
return
# we get all of the data here, but we want to filter out just the raids
# we care about so that it's not so much data returned from the importer
validraids = ['Highmaul','Blackrock Foundry']
if toondata['progression'] != None:
toondata['progression']['raids'] = [r for r in toondata['progression']['raids'] if r['name'] in validraids]
del toondata['rpc']
def create_callback(self, rpc, name, toondata):
return lambda: self.handle_result(rpc, name, toondata)
class Setup:
# The new Battle.net Mashery API requires an API key when using it. This
# method stores an API in the datastore so it can used in later page requests.
def setkey(self,apikey):
# Delete all of the entities out of the apikey datastore so fresh entities
# can be loaded.
q = APIKey.query()
result = q.fetch();
if (len(result) == 0):
k = APIKey(key = apikey)
k.put()
else:
k = result[0]
k.key = apikey
k.put()
| AndyHannon/ctrprogress | wowapi.py | Python | mit | 5,784 | 0.007089 |
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import UploadBlackListView, DemoView, UdateBlackListView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^upload-blacklist$', login_required(UploadBlackListView.as_view()), name='upload-blacklist'),
url(r'^update-blacklist$', UdateBlackListView.as_view(), name='update-blacklist'),
url(r'^profile/', include('n_profile.urls')),
url(r'^demo$', DemoView.as_view(), name='demo'),
]
| nirvaris/nirvaris-djangofence | djangofence/urls.py | Python | mit | 566 | 0.003534 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Segment Hook
which allows you to connect to your Segment account,
retrieve data from it or write to that file.
NOTE: this hook also relies on the Segment analytics package:
https://github.com/segmentio/analytics-python
"""
import analytics
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
class SegmentHook(BaseHook, LoggingMixin):
def __init__(
self,
segment_conn_id='segment_default',
segment_debug_mode=False,
*args,
**kwargs
):
"""
Create new connection to Segment
and allows you to pull data out of Segment or write to it.
You can then use that file with other
Airflow operators to move the data around or interact with segment.
:param segment_conn_id: the name of the connection that has the parameters
we need to connect to Segment.
The connection should be type `json` and include a
write_key security token in the `Extras` field.
:type segment_conn_id: str
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
:type segment_debug_mode: boolean
.. note::
You must include a JSON structure in the `Extras` field.
We need a user's security token to connect to Segment.
So we define it in the `Extras` field as:
`{"write_key":"YOUR_SECURITY_TOKEN"}`
"""
self.segment_conn_id = segment_conn_id
self.segment_debug_mode = segment_debug_mode
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(self.segment_conn_id)
self.extras = self.connection.extra_dejson
self.write_key = self.extras.get('write_key')
if self.write_key is None:
raise AirflowException('No Segment write key provided')
def get_conn(self):
self.log.info('Setting write key for Segment analytics connection')
analytics.debug = self.segment_debug_mode
if self.segment_debug_mode:
self.log.info('Setting Segment analytics connection to debug mode')
analytics.on_error = self.on_error
analytics.write_key = self.write_key
return analytics
def on_error(self, error, items):
"""
Handles error callbacks when using Segment with segment_debug_mode set to True
"""
self.log.error('Encountered Segment error: {segment_error} with '
'items: {with_items}'.format(segment_error=error,
with_items=items))
raise AirflowException('Segment error: {}'.format(error))
| subodhchhabra/airflow | airflow/contrib/hooks/segment_hook.py | Python | apache-2.0 | 3,748 | 0.0008 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.firestore_v1beta1.proto import common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2
from google.cloud.firestore_v1beta1.proto import document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto',
package='google.firestore.v1beta1',
syntax='proto3',
serialized_pb=_b('\nEgoogle/cloud/firestore_v1beta1/proto/event_flow_document_change.proto\x12\x18google.firestore.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.protoB\xa2\x01\n\x1c\x63om.google.firestore.v1beta1B\x1c\x45ventFlowDocumentChangeProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR,google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.firestore.v1beta1B\034EventFlowDocumentChangeProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\252\002\036Google.Cloud.Firestore.V1Beta1'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| jonparrott/gcloud-python | firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2.py | Python | apache-2.0 | 2,565 | 0.011696 |
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`sciond` --- Reference endhost SCION Daemon
================================================
"""
# Stdlib
import logging
import os
import errno
import threading
import time
from itertools import product
# External
from external.expiring_dict import ExpiringDict
# SCION
from lib.app.sciond import get_default_sciond_path
from lib.defines import (
GEN_CACHE_PATH,
PATH_FLAG_SIBRA,
PATH_REQ_TOUT,
SCIOND_API_SOCKDIR,
)
from lib.errors import SCIONBaseError, SCIONParseError, SCIONServiceLookupError
from lib.log import log_exception
from lib.msg_meta import SockOnlyMetadata
from lib.path_seg_meta import PathSegMeta
from lib.packet.ctrl_pld import CtrlPayload, mk_ctrl_req_id
from lib.packet.path import SCIONPath
from lib.packet.path_mgmt.base import PathMgmt
from lib.packet.path_mgmt.rev_info import (
SignedRevInfoCertFetchError,
RevInfoExpiredError,
RevInfoValidationError,
RevocationInfo,
SignedRevInfo,
SignedRevInfoVerificationError
)
from lib.packet.path_mgmt.seg_req import PathSegmentReply, PathSegmentReq
from lib.packet.scion_addr import ISD_AS
from lib.packet.scmp.types import SCMPClass, SCMPPathClass
from lib.path_combinator import build_shortcut_paths, tuples_to_full_paths
from lib.path_db import DBResult, PathSegmentDB
from lib.rev_cache import RevCache
from lib.sciond_api.as_req import SCIONDASInfoReply, SCIONDASInfoReplyEntry, SCIONDASInfoRequest
from lib.sciond_api.revocation import SCIONDRevReply, SCIONDRevReplyStatus
from lib.sciond_api.host_info import HostInfo
from lib.sciond_api.if_req import SCIONDIFInfoReply, SCIONDIFInfoReplyEntry, SCIONDIFInfoRequest
from lib.sciond_api.base import SCIONDMsg
from lib.sciond_api.path_meta import FwdPathMeta, PathInterface
from lib.sciond_api.path_req import (
SCIONDPathRequest,
SCIONDPathReplyError,
SCIONDPathReply,
SCIONDPathReplyEntry,
)
from lib.sciond_api.revocation import SCIONDRevNotification
from lib.sciond_api.segment_req import (
SCIONDSegTypeHopReply,
SCIONDSegTypeHopReplyEntry,
SCIONDSegTypeHopRequest,
)
from lib.sciond_api.service_req import (
SCIONDServiceInfoReply,
SCIONDServiceInfoReplyEntry,
SCIONDServiceInfoRequest,
)
from lib.sibra.ext.resv import ResvBlockSteady
from lib.socket import ReliableSocket
from lib.thread import thread_safety_net
from lib.types import (
CertMgmtType,
PathMgmtType as PMT,
PathSegmentType as PST,
PayloadClass,
LinkType,
SCIONDMsgType as SMT,
ServiceType,
TypeBase,
)
from lib.util import SCIONTime
from sciond.req import RequestState
from scion_elem.scion_elem import SCIONElement
_FLUSH_FLAG = "FLUSH"
class SCIONDaemon(SCIONElement):
"""
The SCION Daemon used for retrieving and combining paths.
"""
MAX_REQS = 1024
# Time a path segment is cached at a host (in seconds).
SEGMENT_TTL = 300
# Empty Path TTL
EMPTY_PATH_TTL = SEGMENT_TTL
def __init__(self, conf_dir, addr, api_addr, run_local_api=False,
port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False):
"""
Initialize an instance of the class SCIONDaemon.
"""
super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir,
prom_export=prom_export, public=(addr, port))
up_labels = {**self._labels, "type": "up"} if self._labels else None
down_labels = {**self._labels, "type": "down"} if self._labels else None
core_labels = {**self._labels, "type": "core"} if self._labels else None
self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels)
self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels)
self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels)
self.rev_cache = RevCache()
# Keep track of requested paths.
self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT)
self.req_path_lock = threading.Lock()
self._api_sock = None
self.daemon_thread = None
os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
self.api_addr = (api_addr or get_default_sciond_path())
if delete_sock:
try:
os.remove(self.api_addr)
except OSError as e:
if e.errno != errno.ENOENT:
logging.error("Could not delete socket %s: %s" % (self.api_addr, e))
self.CTRL_PLD_CLASS_MAP = {
PayloadClass.PATH: {
PMT.REPLY: self.handle_path_reply,
PMT.REVOCATION: self.handle_revocation,
},
PayloadClass.CERT: {
CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
CertMgmtType.TRC_REPLY: self.process_trc_reply,
CertMgmtType.TRC_REQ: self.process_trc_request,
},
}
self.SCMP_PLD_CLASS_MAP = {
SCMPClass.PATH:
{SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation},
}
if run_local_api:
self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond"))
self._socks.add(self._api_sock, self.handle_accept)
@classmethod
def start(cls, conf_dir, addr, api_addr=None, run_local_api=False, port=0):
"""
Initializes and starts a SCIOND instance.
"""
inst = cls(conf_dir, addr, api_addr, run_local_api, port)
name = "SCIONDaemon.run %s" % inst.addr.isd_as
inst.daemon_thread = threading.Thread(
target=thread_safety_net, args=(inst.run,), name=name, daemon=True)
inst.daemon_thread.start()
logging.debug("sciond started with api_addr = %s", inst.api_addr)
def _get_msg_meta(self, packet, addr, sock):
if sock != self._udp_sock:
return packet, SockOnlyMetadata.from_values(sock) # API socket
else:
return super()._get_msg_meta(packet, addr, sock)
def handle_msg_meta(self, msg, meta):
"""
Main routine to handle incoming SCION messages.
"""
if isinstance(meta, SockOnlyMetadata): # From SCIOND API
try:
sciond_msg = SCIONDMsg.from_raw(msg)
except SCIONParseError as err:
logging.error(str(err))
return
self.api_handle_request(sciond_msg, meta)
return
super().handle_msg_meta(msg, meta)
def handle_path_reply(self, cpld, meta):
"""
Handle path reply from local path server.
"""
pmgt = cpld.union
path_reply = pmgt.union
assert isinstance(path_reply, PathSegmentReply), type(path_reply)
recs = path_reply.recs()
for srev_info in recs.iter_srev_infos():
self.check_revocation(srev_info, lambda x: self.continue_revocation_processing(
srev_info) if not x else False, meta)
req = path_reply.req()
key = req.dst_ia(), req.flags()
with self.req_path_lock:
r = self.requested_paths.get(key)
if r:
r.notify_reply(path_reply)
else:
logging.warning("No outstanding request found for %s", key)
for type_, pcb in recs.iter_pcbs():
seg_meta = PathSegMeta(pcb, self.continue_seg_processing,
meta, type_, params=(r,))
self._process_path_seg(seg_meta, cpld.req_id)
def continue_revocation_processing(self, srev_info):
self.rev_cache.add(srev_info)
self.remove_revoked_segments(srev_info.rev_info())
def continue_seg_processing(self, seg_meta):
"""
For every path segment(that can be verified) received from the path
server this function gets called to continue the processing for the
segment.
The segment is added to pathdb and pending requests are checked.
"""
pcb = seg_meta.seg
type_ = seg_meta.type
# Check that segment does not contain a revoked interface.
if not self.check_revoked_interface(pcb, self.rev_cache):
return
map_ = {
PST.UP: self._handle_up_seg,
PST.DOWN: self._handle_down_seg,
PST.CORE: self._handle_core_seg,
}
map_[type_](pcb)
r = seg_meta.params[0]
if r:
r.verified_segment()
def _handle_up_seg(self, pcb):
if self.addr.isd_as != pcb.last_ia():
return None
if self.up_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Up segment added: %s", pcb.short_desc())
return pcb.first_ia()
return None
def _handle_down_seg(self, pcb):
last_ia = pcb.last_ia()
if self.addr.isd_as == last_ia:
return None
if self.down_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Down segment added: %s", pcb.short_desc())
return last_ia
return None
def _handle_core_seg(self, pcb):
if self.core_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Core segment added: %s", pcb.short_desc())
return pcb.first_ia()
return None
def api_handle_request(self, msg, meta):
"""
Handle local API's requests.
"""
mtype = msg.type()
if mtype == SMT.PATH_REQUEST:
threading.Thread(
target=thread_safety_net,
args=(self._api_handle_path_request, msg, meta),
daemon=True).start()
elif mtype == SMT.REVOCATION:
self._api_handle_rev_notification(msg, meta)
elif mtype == SMT.AS_REQUEST:
self._api_handle_as_request(msg, meta)
elif mtype == SMT.IF_REQUEST:
self._api_handle_if_request(msg, meta)
elif mtype == SMT.SERVICE_REQUEST:
self._api_handle_service_request(msg, meta)
elif mtype == SMT.SEGTYPEHOP_REQUEST:
self._api_handle_seg_type_request(msg, meta)
else:
logging.warning(
"API: type %s not supported.", TypeBase.to_str(mtype))
def _api_handle_path_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDPathRequest), type(request)
req_id = pld.id
dst_ia = request.dst_ia()
src_ia = request.src_ia()
if not src_ia:
src_ia = self.addr.isd_as
thread = threading.current_thread()
thread.name = "SCIONDaemon API id:%s %s -> %s" % (
thread.ident, src_ia, dst_ia)
paths, error = self.get_paths(dst_ia, flush=request.p.flags.refresh)
if request.p.maxPaths:
paths = paths[:request.p.maxPaths]
reply_entries = []
for path_meta in paths:
fwd_if = path_meta.fwd_path().get_fwd_if()
# Set dummy host addr if path is empty.
haddr, port = None, None
if fwd_if:
br = self.ifid2br[fwd_if]
haddr, port = br.int_addrs.public
addrs = [haddr] if haddr else []
first_hop = HostInfo.from_values(addrs, port)
reply_entry = SCIONDPathReplyEntry.from_values(
path_meta, first_hop)
reply_entries.append(reply_entry)
logging.debug("Replying to api request for %s with %d paths:\n%s",
dst_ia, len(paths), "\n".join([p.short_desc() for p in paths]))
self._send_path_reply(req_id, reply_entries, error, meta)
def _send_path_reply(self, req_id, reply_entries, error, meta):
path_reply = SCIONDMsg(SCIONDPathReply.from_values(reply_entries, error), req_id)
self.send_meta(path_reply.pack(), meta)
def _api_handle_as_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDASInfoRequest), type(request)
req_ia = request.isd_as()
if not req_ia or req_ia.is_zero() or req_ia == self.addr.isd_as:
# Request is for the local AS.
reply_entry = SCIONDASInfoReplyEntry.from_values(
self.addr.isd_as, self.is_core_as(), self.topology.mtu)
else:
# Request is for a remote AS.
reply_entry = SCIONDASInfoReplyEntry.from_values(req_ia, self.is_core_as(req_ia))
as_reply = SCIONDMsg(SCIONDASInfoReply.from_values([reply_entry]), pld.id)
self.send_meta(as_reply.pack(), meta)
def _api_handle_if_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDIFInfoRequest), type(request)
all_brs = request.all_brs()
if_list = []
if not all_brs:
if_list = list(request.iter_ids())
if_entries = []
for if_id, br in self.ifid2br.items():
if all_brs or if_id in if_list:
br_addr, br_port = br.int_addrs.public
info = HostInfo.from_values([br_addr], br_port)
reply_entry = SCIONDIFInfoReplyEntry.from_values(if_id, info)
if_entries.append(reply_entry)
if_reply = SCIONDMsg(SCIONDIFInfoReply.from_values(if_entries), pld.id)
self.send_meta(if_reply.pack(), meta)
def _api_handle_service_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDServiceInfoRequest), type(request)
all_svcs = request.all_services()
svc_list = []
if not all_svcs:
svc_list = list(request.iter_service_types())
svc_entries = []
for svc_type in ServiceType.all():
if all_svcs or svc_type in svc_list:
lookup_res = self.dns_query_topo(svc_type)
host_infos = []
for addr, port in lookup_res:
host_infos.append(HostInfo.from_values([addr], port))
reply_entry = SCIONDServiceInfoReplyEntry.from_values(
svc_type, host_infos)
svc_entries.append(reply_entry)
svc_reply = SCIONDMsg(SCIONDServiceInfoReply.from_values(svc_entries), pld.id)
self.send_meta(svc_reply.pack(), meta)
def _api_handle_rev_notification(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDRevNotification), type(request)
self.handle_revocation(CtrlPayload(PathMgmt(request.srev_info())), meta, pld)
def _api_handle_seg_type_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDSegTypeHopRequest), type(request)
segmentType = request.p.type
db = []
if segmentType == PST.CORE:
db = self.core_segments
elif segmentType == PST.UP:
db = self.up_segments
elif segmentType == PST.DOWN:
db = self.down_segments
else:
logging.error("Requesting segment type %s unrecognized.", segmentType)
seg_entries = []
for segment in db(full=True):
if_list = []
for asm in segment.iter_asms():
isd_as = asm.isd_as()
hof = asm.pcbm(0).hof()
egress = hof.egress_if
ingress = hof.ingress_if
if ingress:
if_list.append(PathInterface.from_values(isd_as, ingress))
if egress:
if_list.append(PathInterface.from_values(isd_as, egress))
reply_entry = SCIONDSegTypeHopReplyEntry.from_values(
if_list, segment.get_timestamp(), segment.get_expiration_time())
seg_entries.append(reply_entry)
seg_reply = SCIONDMsg(
SCIONDSegTypeHopReply.from_values(seg_entries), pld.id)
self.send_meta(seg_reply.pack(), meta)
def handle_scmp_revocation(self, pld, meta):
srev_info = SignedRevInfo.from_raw(pld.info.srev_info)
self.handle_revocation(CtrlPayload(PathMgmt(srev_info)), meta)
def handle_revocation(self, cpld, meta, pld=None):
pmgt = cpld.union
srev_info = pmgt.union
rev_info = srev_info.rev_info()
assert isinstance(rev_info, RevocationInfo), type(rev_info)
logging.debug("Received revocation: %s from %s", srev_info.short_desc(), meta)
self.check_revocation(srev_info,
lambda e: self.process_revocation(e, srev_info, meta, pld), meta)
def process_revocation(self, error, srev_info, meta, pld):
rev_info = srev_info.rev_info()
status = None
if error is None:
status = SCIONDRevReplyStatus.VALID
self.rev_cache.add(srev_info)
self.remove_revoked_segments(rev_info)
else:
if type(error) == RevInfoValidationError:
logging.error("Failed to validate RevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.INVALID
if type(error) == RevInfoExpiredError:
logging.info("Ignoring expired Revinfo, %s from %s", srev_info.short_desc(), meta)
status = SCIONDRevReplyStatus.STALE
if type(error) == SignedRevInfoCertFetchError:
logging.error("Failed to fetch certificate for SignedRevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.UNKNOWN
if type(error) == SignedRevInfoVerificationError:
logging.error("Failed to verify SRevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.SIGFAIL
if type(error) == SCIONBaseError:
logging.error("Revocation check failed for %s from %s:\n%s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.UNKNOWN
if pld:
rev_reply = SCIONDMsg(SCIONDRevReply.from_values(status), pld.id)
self.send_meta(rev_reply.pack(), meta)
def remove_revoked_segments(self, rev_info):
# Go through all segment databases and remove affected segments.
removed_up = removed_core = removed_down = 0
if rev_info.p.linkType == LinkType.CORE:
removed_core = self._remove_revoked_pcbs(self.core_segments, rev_info)
elif rev_info.p.linkType in [LinkType.PARENT, LinkType.CHILD]:
removed_up = self._remove_revoked_pcbs(self.up_segments, rev_info)
removed_down = self._remove_revoked_pcbs(self.down_segments, rev_info)
elif rev_info.p.linkType != LinkType.PEER:
logging.error("Bad RevInfo link type: %s", rev_info.p.linkType)
logging.info("Removed %d UP- %d CORE- and %d DOWN-Segments." %
(removed_up, removed_core, removed_down))
def _remove_revoked_pcbs(self, db, rev_info):
"""
Removes all segments from 'db' that have a revoked upstream PCBMarking.
:param db: The PathSegmentDB.
:type db: :class:`lib.path_db.PathSegmentDB`
:param rev_info: The revocation info
:type rev_info: RevocationInfo
:returns: The number of deletions.
:rtype: int
"""
to_remove = []
for segment in db(full=True):
for asm in segment.iter_asms():
if self._check_revocation_for_asm(rev_info, asm, verify_all=False):
logging.debug("Removing segment: %s" % segment.short_desc())
to_remove.append(segment.get_hops_hash())
return db.delete_all(to_remove)
def _flush_path_dbs(self):
self.core_segments.flush()
self.down_segments.flush()
self.up_segments.flush()
def get_paths(self, dst_ia, flags=(), flush=False):
"""Return a list of paths."""
logging.debug("Paths requested for ISDAS=%s, flags=%s, flush=%s",
dst_ia, flags, flush)
if flush:
logging.info("Flushing PathDBs.")
self._flush_path_dbs()
if self.addr.isd_as == dst_ia or (
self.addr.isd_as.any_as() == dst_ia and
self.topology.is_core_as):
# Either the destination is the local AS, or the destination is any
# core AS in this ISD, and the local AS is in the core
empty = SCIONPath()
exp_time = int(time.time()) + self.EMPTY_PATH_TTL
empty_meta = FwdPathMeta.from_values(empty, [], self.topology.mtu, exp_time)
return [empty_meta], SCIONDPathReplyError.OK
paths = self.path_resolution(dst_ia, flags=flags)
if not paths:
key = dst_ia, flags
with self.req_path_lock:
r = self.requested_paths.get(key)
if r is None:
# No previous outstanding request
req = PathSegmentReq.from_values(self.addr.isd_as, dst_ia, flags=flags)
r = RequestState(req.copy())
self.requested_paths[key] = r
self._fetch_segments(req)
# Wait until event gets set.
timeout = not r.e.wait(PATH_REQ_TOUT)
with self.req_path_lock:
if timeout:
r.done()
if key in self.requested_paths:
del self.requested_paths[key]
if timeout:
logging.error("Query timed out for %s", dst_ia)
return [], SCIONDPathReplyError.PS_TIMEOUT
# Check if we can fulfill the path request.
paths = self.path_resolution(dst_ia, flags=flags)
if not paths:
logging.error("No paths found for %s", dst_ia)
return [], SCIONDPathReplyError.NO_PATHS
return paths, SCIONDPathReplyError.OK
def path_resolution(self, dst_ia, flags=()):
# dst as == 0 means any core AS in the specified ISD.
dst_is_core = self.is_core_as(dst_ia) or dst_ia[1] == 0
sibra = PATH_FLAG_SIBRA in flags
if self.topology.is_core_as:
if dst_is_core:
ret = self._resolve_core_core(dst_ia, sibra=sibra)
else:
ret = self._resolve_core_not_core(dst_ia, sibra=sibra)
elif dst_is_core:
ret = self._resolve_not_core_core(dst_ia, sibra=sibra)
elif sibra:
ret = self._resolve_not_core_not_core_sibra(dst_ia)
else:
ret = self._resolve_not_core_not_core_scion(dst_ia)
if not sibra:
return ret
# FIXME(kormat): Strip off PCBs, and just return sibra reservation
# blocks
return self._sibra_strip_pcbs(self._strip_nones(ret))
def _resolve_core_core(self, dst_ia, sibra=False):
"""Resolve path from core to core."""
res = set()
for cseg in self.core_segments(last_ia=self.addr.isd_as, sibra=sibra,
**dst_ia.params()):
res.add((None, cseg, None))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_core_not_core(self, dst_ia, sibra=False):
"""Resolve path from core to non-core."""
res = set()
# First check whether there is a direct path.
for dseg in self.down_segments(
first_ia=self.addr.isd_as, last_ia=dst_ia, sibra=sibra):
res.add((None, None, dseg))
# Check core-down combination.
for dseg in self.down_segments(last_ia=dst_ia, sibra=sibra):
dseg_ia = dseg.first_ia()
if self.addr.isd_as == dseg_ia:
pass
for cseg in self.core_segments(
first_ia=dseg_ia, last_ia=self.addr.isd_as, sibra=sibra):
res.add((None, cseg, dseg))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_not_core_core(self, dst_ia, sibra=False):
"""Resolve path from non-core to core."""
res = set()
params = dst_ia.params()
params["sibra"] = sibra
if dst_ia[0] == self.addr.isd_as[0]:
# Dst in local ISD. First check whether DST is a (super)-parent.
for useg in self.up_segments(**params):
res.add((useg, None, None))
# Check whether dst is known core AS.
for cseg in self.core_segments(**params):
# Check do we have an up-seg that is connected to core_seg.
for useg in self.up_segments(first_ia=cseg.last_ia(), sibra=sibra):
res.add((useg, cseg, None))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_not_core_not_core_scion(self, dst_ia):
"""Resolve SCION path from non-core to non-core."""
up_segs = self.up_segments()
down_segs = self.down_segments(last_ia=dst_ia)
core_segs = self._calc_core_segs(dst_ia[0], up_segs, down_segs)
full_paths = build_shortcut_paths(
up_segs, down_segs, self.rev_cache)
tuples = []
for up_seg in up_segs:
for down_seg in down_segs:
tuples.append((up_seg, None, down_seg))
for core_seg in core_segs:
tuples.append((up_seg, core_seg, down_seg))
full_paths.extend(tuples_to_full_paths(tuples))
return full_paths
def _resolve_not_core_not_core_sibra(self, dst_ia):
"""Resolve SIBRA path from non-core to non-core."""
res = set()
up_segs = set(self.up_segments(sibra=True))
down_segs = set(self.down_segments(last_ia=dst_ia, sibra=True))
for up_seg, down_seg in product(up_segs, down_segs):
src_core_ia = up_seg.first_ia()
dst_core_ia = down_seg.first_ia()
if src_core_ia == dst_core_ia:
res.add((up_seg, down_seg))
continue
for core_seg in self.core_segments(first_ia=dst_core_ia,
last_ia=src_core_ia, sibra=True):
res.add((up_seg, core_seg, down_seg))
return res
def _strip_nones(self, set_):
"""Strip None entries from a set of tuples"""
res = []
for tup in set_:
res.append(tuple(filter(None, tup)))
return res
def _sibra_strip_pcbs(self, paths):
ret = []
for pcbs in paths:
resvs = []
for pcb in pcbs:
resvs.append(self._sibra_strip_pcb(pcb))
ret.append(resvs)
return ret
def _sibra_strip_pcb(self, pcb):
assert pcb.is_sibra()
pcb_ext = pcb.sibra_ext
resv_info = pcb_ext.info
resv = ResvBlockSteady.from_values(resv_info, pcb.get_n_hops())
asms = pcb.iter_asms()
if pcb_ext.p.up:
asms = reversed(list(asms))
iflist = []
for sof, asm in zip(pcb_ext.iter_sofs(), asms):
resv.sofs.append(sof)
iflist.extend(self._sibra_add_ifs(
asm.isd_as(), sof, resv_info.fwd_dir))
assert resv.num_hops == len(resv.sofs)
return pcb_ext.p.id, resv, iflist
def _sibra_add_ifs(self, isd_as, sof, fwd):
def _add(ifid):
if ifid:
ret.append((isd_as, ifid))
ret = []
if fwd:
_add(sof.ingress)
_add(sof.egress)
else:
_add(sof.egress)
_add(sof.ingress)
return ret
def _wait_for_events(self, events, deadline):
"""
Wait on a set of events, but only until the specified deadline. Returns
the number of events that happened while waiting.
"""
count = 0
for e in events:
if e.wait(max(0, deadline - SCIONTime.get_time())):
count += 1
return count
def _fetch_segments(self, req):
"""
Called to fetch the requested path.
"""
try:
addr, port = self.dns_query_topo(ServiceType.PS)[0]
except SCIONServiceLookupError:
log_exception("Error querying path service:")
return
req_id = mk_ctrl_req_id()
logging.debug("Sending path request (%s) to [%s]:%s [id: %016x]",
req.short_desc(), addr, port, req_id)
meta = self._build_meta(host=addr, port=port)
self.send_meta(CtrlPayload(PathMgmt(req), req_id=req_id), meta)
def _calc_core_segs(self, dst_isd, up_segs, down_segs):
"""
Calculate all possible core segments joining the provided up and down
segments. Returns a list of all known segments, and a seperate list of
the missing AS pairs.
"""
src_core_ases = set()
dst_core_ases = set()
for seg in up_segs:
src_core_ases.add(seg.first_ia()[1])
for seg in down_segs:
dst_core_ases.add(seg.first_ia()[1])
# Generate all possible AS pairs
as_pairs = list(product(src_core_ases, dst_core_ases))
return self._find_core_segs(self.addr.isd_as[0], dst_isd, as_pairs)
def _find_core_segs(self, src_isd, dst_isd, as_pairs):
"""
Given a set of AS pairs across 2 ISDs, return the core segments
connecting those pairs
"""
core_segs = []
for src_core_as, dst_core_as in as_pairs:
src_ia = ISD_AS.from_values(src_isd, src_core_as)
dst_ia = ISD_AS.from_values(dst_isd, dst_core_as)
if src_ia == dst_ia:
continue
seg = self.core_segments(first_ia=dst_ia, last_ia=src_ia)
if seg:
core_segs.extend(seg)
return core_segs
def run(self):
"""
Run an instance of the SCION daemon.
"""
threading.Thread(
target=thread_safety_net, args=(self._check_trc_cert_reqs,),
name="Elem.check_trc_cert_reqs", daemon=True).start()
super().run()
| klausman/scion | python/sciond/sciond.py | Python | apache-2.0 | 30,793 | 0.000974 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask import url_for
class Hateoas(object):
def link(self, rel, title, href):
return "<link rel='%s' title='%s' href='%s'/>" % (rel, title, href)
def create_link(self, item, rel='self'):
title = item.__class__.__name__.lower()
method = ".api_%s" % title
href = url_for(method, id=item.id, _external=True)
return self.link(rel, title, href)
def create_links(self, item):
cls = item.__class__.__name__.lower()
if cls == 'taskrun':
link = self.create_link(item)
links = []
if item.app_id is not None:
links.append(self.create_link(item.app, rel='parent'))
if item.task_id is not None:
links.append(self.create_link(item.task, rel='parent'))
return links, link
elif cls == 'task':
link = self.create_link(item)
links = []
if item.app_id is not None:
links = [self.create_link(item.app, rel='parent')]
return links, link
elif cls == 'category':
return None, self.create_link(item)
elif cls == 'app':
link = self.create_link(item)
links = []
if item.category_id is not None:
links.append(self.create_link(item.category, rel='category'))
return links, link
else:
return False
def remove_links(self, item):
"""Remove HATEOAS link and links from item"""
if item.get('link'):
item.pop('link')
if item.get('links'):
item.pop('links')
return item
| geotagx/geotagx-pybossa-archive | pybossa/hateoas.py | Python | agpl-3.0 | 2,393 | 0 |
#!/opt/local/bin/python
import string
import os
def header(n) :
return "//\n\
// BAGEL - Brilliantly Advanced General Electronic Structure Library\n\
// Filename: SPCASPT2_gen" + str(n) + ".cc\n\
// Copyright (C) 2014 Toru Shiozaki\n\
//\n\
// Author: Toru Shiozaki <shiozaki@northwestern.edu>\n\
// Maintainer: Shiozaki group\n\
//\n\
// This file is part of the BAGEL package.\n\
//\n\
// This program is free software: you can redistribute it and/or modify\n\
// it under the terms of the GNU General Public License as published by\n\
// the Free Software Foundation, either version 3 of the License, or\n\
// (at your option) any later version.\n\
//\n\
// This program is distributed in the hope that it will be useful,\n\
// but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
// GNU General Public License for more details.\n\
//\n\
// You should have received a copy of the GNU General Public License\n\
// along with this program. If not, see <http://www.gnu.org/licenses/>.\n\
//\n\
\n\
#include <bagel_config.h>\n\
#ifdef COMPILE_SMITH\n\
\n\
#include <src/smith/caspt2/SPCASPT2_tasks" + str(n) + ".h>\n\
\n\
using namespace std;\n\
using namespace bagel;\n\
using namespace bagel::SMITH;\n\
using namespace bagel::SMITH::SPCASPT2;\n\
\n\
"
footer = "#endif\n"
f = open('SPCASPT2_gen.cc', 'r')
lines = f.read().split("\n")[32:]
tasks = []
tmp = ""
for line in lines:
if (line[0:4] == "Task"):
if (tmp != ""):
tasks.append(tmp)
tmp = ""
if (line != ""):
tmp += line + "\n"
if (line == "}"):
tmp += "\n"
tasks.append(tmp)
tmp = ""
num = 0
chunk = 50
for i in range(len(tasks)):
if (num != 0 and num % chunk == 0):
n = num / chunk
fout = open("SPCASPT2_gen" + str(n) + ".cc", "w")
out = header(n) + tmp + footer
fout.write(out)
fout.close()
tmp = ""
num = num+1
tmp = tmp + tasks[i];
n = (num-1) / chunk + 1
fout = open("SPCASPT2_gen" + str(n) + ".cc", "w")
out = header(n) + tmp + footer
fout.write(out)
fout.close()
os.remove("SPCASPT2_gen.cc")
| nubakery/smith3 | python/spcaspt2/gen_split.py | Python | gpl-2.0 | 2,179 | 0.001377 |
#!/usr/bin/env python
# coding:utf-8
__author__ = 'lixin'
'''
°²×°MySQL
¿ÉÒÔÖ±½Ó´ÓMySQL¹Ù·½ÍøÕ¾ÏÂÔØ×îеÄCommunity Server 5.6.x°æ±¾¡£MySQLÊÇ¿çÆ½Ì¨µÄ£¬Ñ¡Ôñ¶ÔÓ¦µÄƽ̨ÏÂÔØ°²×°Îļþ£¬°²×°¼´¿É¡£
°²×°Ê±£¬MySQL»áÌáʾÊäÈërootÓû§µÄ¿ÚÁÇëÎñ±Ø¼ÇÇå³þ¡£Èç¹ûżDz»×¡£¬¾Í°Ñ¿ÚÁîÉèÖÃΪpassword¡£
ÔÚWindowsÉÏ£¬°²×°Ê±ÇëÑ¡ÔñUTF-8±àÂ룬ÒÔ±ãÕýÈ·µØ´¦ÀíÖÐÎÄ¡£
ÔÚMac»òLinuxÉÏ£¬ÐèÒª±à¼MySQLµÄÅäÖÃÎļþ£¬°ÑÊý¾Ý¿âĬÈϵıàÂëÈ«²¿¸ÄΪUTF-8¡£MySQLµÄÅäÖÃÎļþĬÈÏ´æ·ÅÔÚ/etc/my.cnf»òÕß/etc/mysql/my.cnf£º
[client]
default-character-set = utf8
[mysqld]
default-storage-engine = INNODB
character-set-server = utf8
collation-server = utf8_general_ci
ÖØÆôMySQLºó£¬¿ÉÒÔͨ¹ýMySQLµÄ¿Í»§¶ËÃüÁîÐмì²é±àÂ룺
$ mysql -u root -p
Enter password:
Welcome to the MySQL monitor...
...
mysql> show variables like '%char%';
+--------------------------+--------------------------------------------------------+
| Variable_name | Value |
+--------------------------+--------------------------------------------------------+
| character_set_client | utf8 |
| character_set_connection | utf8 |
| character_set_database | utf8 |
| character_set_filesystem | binary |
| character_set_results | utf8 |
| character_set_server | utf8 |
| character_set_system | utf8 |
| character_sets_dir | /usr/local/mysql-5.1.65-osx10.6-x86_64/share/charsets/ |
+--------------------------+--------------------------------------------------------+
8 rows in set (0.00 sec)
¿´µ½utf8×ÖÑù¾Í±íʾ±àÂëÉèÖÃÕýÈ·¡£
'''
# ÐèÒª°²×°MYSQLÇý¶¯,Ö¸ÁîÈçÏ£º
# $ pip install mysql-connector-python --allow-external mysql-connector-python
# µ¼Èë:
# 导å
¥:
import uuid
from datetime import datetime
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# å建对象çåºç±»:
Base = declarative_base()
# åå§åæ°æ®åºè¿æ¥:
engine = create_engine('mysql+mysqlconnector://root:Duanx1234@localhost:3306/test')
# å建DBSessionç±»å:
DBSession = sessionmaker(bind=engine)
class Sourcedata(Base):
# 表çåå:
__tablename__ = 'sourcedata'
# 表çç»æ
id = Column(String(50), primary_key=True)
name = Column(String(500))
url = Column(String(500))
sharetime = Column(String(20))
createtime = Column(String(20))
class SourcedataDao:
def batchInsert(self, flist):
try:
# å建session对象:
session = DBSession()
for sd in flist:
# å建æ°Sourcedata对象:
new_sourcedata = Sourcedata(id=str(uuid.uuid4()), name=sd.name, url=sd.url, sharetime=sd.sharetime, createtime=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# æ·»å å°session:
session.add(new_sourcedata)
print "insert a new_sourcedata"
# æäº¤å³ä¿åå°æ°æ®åº:
session.commit()
except Exception,e:
print e.message
finally:
# å
³ésession:
session.close()
class sdata:
def __init__(self,n,u):
self.name=n
self.url=u
self.sharetime=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
flist = []
sdDao = SourcedataDao()
for i in range(10):
flist.append(sdata("file" + str(i), "pan.baidu.com/file" + str(i)))
sdDao.batchInsert(flist)
| duanx/bdcspider | bdmysqlDB.py | Python | gpl-2.0 | 3,758 | 0.00612 |
__author__ = 'yuxiang'
import os
import datasets
import datasets.rgbd_scenes
import datasets.imdb
import numpy as np
import subprocess
import cPickle
class rgbd_scenes(datasets.imdb):
def __init__(self, image_set, rgbd_scenes_path=None):
datasets.imdb.__init__(self, 'rgbd_scenes_' + image_set)
self._image_set = image_set
self._rgbd_scenes_path = self._get_default_path() if rgbd_scenes_path is None \
else rgbd_scenes_path
self._data_path = os.path.join(self._rgbd_scenes_path, 'imgs')
self._classes = ('__background__', 'bowl', 'cap', 'cereal_box', 'coffee_mug', 'coffee_table', 'office_chair', 'soda_can', 'sofa', 'table')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._rgbd_scenes_path), \
'rgbd_scenes path does not exist: {}'.format(self._rgbd_scenes_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + '-color' + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def depth_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.depth_path_from_index(self.image_index[i])
def depth_path_from_index(self, index):
"""
Construct an depth path from the image's "index" identifier.
"""
depth_path = os.path.join(self._data_path, index + '-depth' + self._image_ext)
assert os.path.exists(depth_path), \
'Path does not exist: {}'.format(depth_path)
return depth_path
def metadata_path_at(self, i):
"""
Return the absolute path to metadata i in the image sequence.
"""
return self.metadata_path_from_index(self.image_index[i])
def metadata_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
metadata_path = os.path.join(self._data_path, index + '-meta.mat')
return metadata_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._rgbd_scenes_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'RGBD_Scenes', 'rgbd-scenes-v2')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_rgbd_scenes_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_rgbd_scenes_annotation(self, index):
"""
Load class name and meta data
"""
# image path
image_path = self.image_path_from_index(index)
# depth path
depth_path = self.depth_path_from_index(index)
# metadata path
metadata_path = self.metadata_path_from_index(index)
boxes = []
gt_class = []
return {'image': image_path,
'depth': depth_path,
'meta_data': metadata_path,
'boxes': boxes,
'gt_classes': gt_class,
'flipped' : False}
if __name__ == '__main__':
d = datasets.rgbd_scenes('val')
res = d.roidb
from IPython import embed; embed()
| yuxng/Deep_ISM | ISM/lib/datasets/rgbd_scenes.py | Python | mit | 4,957 | 0.004035 |
"""
Resolve unspecified dates and date strings to datetimes.
"""
import datetime as dt
from dateutil.parser import parse as parse_date
import pytz
class InvalidDateFormat(Exception):
"""
The date string could not be parsed.
"""
pass
class DateValidationError(Exception):
"""
Dates are not semantically valid.
"""
pass
DISTANT_PAST = dt.datetime(dt.MINYEAR, 1, 1, tzinfo=pytz.utc)
DISTANT_FUTURE = dt.datetime(dt.MAXYEAR, 1, 1, tzinfo=pytz.utc)
def _parse_date(value, _):
"""
Parse an ISO formatted datestring into a datetime object with timezone set to UTC.
Args:
value (str or datetime): The ISO formatted date string or datetime object.
_ (function): The i18n service function used to get the appropriate
text for a message.
Returns:
datetime.datetime
Raises:
InvalidDateFormat: The date string could not be parsed.
"""
if isinstance(value, dt.datetime):
return value.replace(tzinfo=pytz.utc)
elif isinstance(value, basestring):
try:
return parse_date(value).replace(tzinfo=pytz.utc)
except ValueError:
raise InvalidDateFormat(
_("'{date}' is an invalid date format. Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.").format(
date=value
)
)
else:
raise InvalidDateFormat(_("'{date}' must be a date string or datetime").format(date=value))
def parse_date_value(date, _):
""" Public method for _parse_date """
return _parse_date(date, _)
def resolve_dates(start, end, date_ranges, _):
"""
Resolve date strings (including "default" dates) to datetimes.
The basic rules are:
1) Unset problem start dates default to the distant past.
2) Unset problem end dates default to the distant future.
3) Unset start dates default to the start date of the previous assessment/submission.
(The first submission defaults to the problem start date.)
4) Unset end dates default to the end date of the following assessment/submission.
(The last assessment defaults to the problem end date.)
5) `start` resolves to the earliest start date.
6) `end` resolves to the latest end date.
7) Ensure that `start` is before `end`.
8) Ensure that `start` is before the earliest due date.
9) Ensure that `end` is after the latest start date.
Overriding start/end dates:
* Rules 5-9 may seem strange, but they're necessary. Unlike `date_ranges`,
the `start` and `end` values are inherited by the XBlock from the LMS.
This means that you can set `start` and `end` in Studio, effectively bypassing
our validation rules.
* On the other hand, we *need* the start/due dates so we can resolve unspecified
date ranges to an actual date. For example,
if the problem closes on April 15th, 2014, but the course author hasn't specified
a due date for a submission, we need ensure the submission closes on April 15th.
* For this reason, we use `start` and `end` only if they satisfy our validation
rules. If not (because a course author has changed them to something invalid in Studio),
we use the dates that the course author specified in the problem definition,
which (a) MUST satisfy our ordering constraints, and (b) are probably
what the author intended.
Example:
Suppose I have a problem with a submission and two assessments:
| |
| |== submission ==| |== peer-assessessment ==| |== self-assessment ==| |
| |
and I set start/due dates for the submission and self-assessment, but not for peer-assessment.
Then by default, peer-assessment will "expand":
| |
| |== submission ==| |== self-assessment ==| |
| |============================ peer-assessment ==========================| |
| |
If I then remove the due date for the submission, but add a due date for peer-assessment:
| |
| |== submission =============================| |== self-assessment ==| |
| |============== peer-assessment ============| |
| |
If no dates are set, start dates default to the distant past and end dates default
to the distant future:
| |
| |================= submission ==============| |
| |============== self-assessment ============| |
| |============== peer-assessment ============| |
| |
Args:
start (str, ISO date format, or datetime): When the problem opens.
A value of None indicates that the problem is always open.
end (str, ISO date format, or datetime): When the problem closes.
A value of None indicates that the problem never closes.
date_ranges (list of tuples): list of (start, end) ISO date string tuples indicating
the start/end timestamps (date string or datetime) of each submission/assessment.
_ (function): An i18n service function to use for retrieving the
proper text.
Returns:
start (datetime): The resolved start date
end (datetime): The resolved end date.
list of (start, end) tuples, where both elements are datetime objects.
Raises:
DateValidationError
InvalidDateFormat
"""
# Resolve problem start and end dates to minimum and maximum dates
start = _parse_date(start, _) if start is not None else DISTANT_PAST
end = _parse_date(end, _) if end is not None else DISTANT_FUTURE
resolved_starts = []
resolved_ends = []
# Amazingly, Studio allows the release date to be after the due date!
# This can cause a problem if the course author has configured:
#
# 1) Problem start >= problem due, and
# 2) Start/due dates that resolve to the problem start/due date.
#
# In this case, all submission/assessment start dates
# could default to the problem start while
# due dates default to the problem due date, violating
# the constraint that start dates always precede due dates.
# If we detect that the author has done this,
# we set the start date to just before
# the due date, so we (just barely) satify the validation rules.
if start >= end:
start = end - dt.timedelta(milliseconds=1)
# Override start/end dates if they fail to satisfy our validation rules
# These are the only parameters a course author can change in Studio
# without triggering our validation rules, so we need to use sensible
# defaults. See the docstring above for a more detailed justification.
for step_start, step_end in date_ranges:
if step_start is not None:
parsed_start = _parse_date(step_start, _)
start = min(start, parsed_start)
end = max(end, parsed_start + dt.timedelta(milliseconds=1))
if step_end is not None:
parsed_end = _parse_date(step_end, _)
end = max(end, parsed_end)
start = min(start, parsed_end - dt.timedelta(milliseconds=1))
# Iterate through the list forwards and backwards simultaneously
# As we iterate forwards, resolve start dates.
# As we iterate backwards, resolve end dates.
prev_start = start
prev_end = end
for index in range(len(date_ranges)):
reverse_index = len(date_ranges) - index - 1
# Resolve "default" start dates to the previous start date.
# If I set a start date for peer-assessment, but don't set a start date for the following self-assessment,
# then the self-assessment should default to the same start date as the peer-assessment.
step_start, __ = date_ranges[index]
step_start = _parse_date(step_start, _) if step_start is not None else prev_start
# Resolve "default" end dates to the following end date.
# If I set a due date for self-assessment, but don't set a due date for the previous peer-assessment,
# then the peer-assessment should default to the same due date as the self-assessment.
__, step_end = date_ranges[reverse_index]
step_end = _parse_date(step_end, _) if step_end is not None else prev_end
if step_start < prev_start:
msg = _(
u"This step's start date '{start}' cannot be earlier than the previous step's start date '{prev}'."
).format(
start=step_start,
prev=prev_start,
)
raise DateValidationError(msg)
if step_end > prev_end:
msg = _(u"This step's due date '{due}' cannot be later than the next step's due date '{prev}'.").format(
due=step_end, prev=prev_end
)
raise DateValidationError(msg)
resolved_starts.append(step_start)
resolved_ends.insert(0, step_end)
prev_start = step_start
prev_end = step_end
# Combine the resolved dates back into a list of tuples
resolved_ranges = zip(resolved_starts, resolved_ends)
# Now that we have resolved both start and end dates, we can safely compare them
for resolved_start, resolved_end in resolved_ranges:
if resolved_start >= resolved_end:
msg = _(u"The start date '{start}' cannot be later than the due date '{due}'").format(
start=resolved_start, due=resolved_end
)
raise DateValidationError(msg)
return start, end, resolved_ranges
| Stanford-Online/edx-ora2 | openassessment/xblock/resolve_dates.py | Python | agpl-3.0 | 10,291 | 0.003984 |
# This script is actually for Cyber Security on Windows 7. Should mostly work
# for Windows 8 and 10 too. I just absolutely hate using Windows 8 and refuse
# to test it on any Windows 8 machine.
from __future__ import print_function
from subprocess import call
from subprocess import check_output
import os
############################# User Management #############################
# Get username
username = os.getenv('username')
# Make alphanumeric variable
alpha = 'abcdefghijklmnopqrstuvwxyz'
numbers = '1234567890'
alpha_numeric = alpha + alpha.upper() + numbers
registry_commands = open("commands.txt", "r")
# Initialize important variables
users = []
incoming_user = ''
times_through = 1
temp_users = str(check_output('net user'))
for not_allowed_characters in '"/\[]:;|=,+*?<>':
temp_users.replace(not_allowed_characters, '')
temp_users.replace("\r\n","")
temp_users.replace("\r","")
temp_users.replace("\n","")
# " / \ [ ] : ; | = , + * ? < > are the characters not allowed in usernames
# Get a list of all users on the system
for character in temp_users:
if character in alpha_numeric or character in "-#\'.!@$%^&()}{":
incoming_user += character
elif len(incoming_user) > 0:
if times_through > 5:
users.append(incoming_user)
incoming_user = ''
times_through += 1
# Remove unnecessary stuff at end
users = users[0:len(users)-4]
# Print all users
print('All the users currently on this computer are ' + str(users))
def user_management(users):
def should_be_admin(user):
# Should the user be an admin
should_be_admin = raw_input(user + " is an administrator. Should they be? y/n. ")
if should_be_admin == 'y':
return True
if should_be_admin == 'n':
return False
def should_be_user(user):
# Should the user be a user
should_be_user = raw_input(user + " is a user. Should they be? y/n. ")
if should_be_user == 'y':
return True
if should_be_user == 'n':
return False
for user in users:
# Iterate through user list
if user in check_output('net localgroup Administrators'):
# If user is in the Administrators localgroup
if not should_be_admin(user):
print('Removing ' + user + ' from the Administrators group')
os.system('net localgroup Administrators ' + user + ' /delete')
else:
print('OK. We are keeping ' + user + ' in the Administrators group.')
else:
should_be_user_answer = should_be_user(user)
if not should_be_user_answer:
print('Removing ' + user)
os.system('net user ' + user + ' /delete')
if should_be_admin(user):
if user not in check_output('net localgroup Administrators'):
if should_be_admin(user):
print('Adding ' + user + 'to the Administrators group')
os.system('net localgroup Administrators ' + user + ' /add')
# Ask if we should do user management stuff.
do_user_management = raw_input("Shall we manage users? y/n. ")
if do_user_management == 'y':
user_management(users)
############################# Registry keys and such #############################
if raw_input("Shall we change some registry stuff? y/n. ") == 'y':
# Password policy automagic
print('Chaning password policies and such...')
os.system('net accounts /FORCELOGOFF:30 /MINPWLEN:8 /MAXPWAGE:30 /MINPWAGE:10 /UNIQUEPW:5')
# Clean DNS cache, cause why not
print('Bro, I cleaned your DNS cache. Deal with it.')
os.system('ipconfig /flushdns')
# Disable built-in accounts
print('I really hope you weren\'t the default Administrator account')
os.system('net user Guest /active:NO')
os.system('net user Administrator /active:NO')
# Make auditing great again.
print('Auditing now on! Yay!!!!')
os.system('auditpol /set /category:* /success:enable')
os.system('auditpol /set /category:* /failure:enable')
# Enable firewall
print('The firewall torch has been passed on to you')
os.system('netsh advfirewall set allprofiles state on')
os.system('echo You\'re going to have to type exit')
#I have no idea what I was doing here....
os.system('secedit /import /db secedit.sdb /cfg cyber.inf /overwrite /log MyLog.txt')
reg_dir = '"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System\\ '
for command in (('FilterAdministratorToken"','1'),('ConsentPromptBehaviorAdmin"','1'),('ConsentPromptBehaviorUser"','1'),('EnableInstallerDetection"','1'),('ValidateAdminCodeSignatures"','1'),('EnableLUA"','1'),('PromptOnSecureDesktop"','1'),('EnableVirtualization"','1'),):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\\'
for command in (('AUOptions"', '4'),('ElevateNonAdmins"', '1'),('IncludeRecommendedUpdates"', '1'),('ScheduledInstallTime"', '22')):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Terminal Server\\'
for command in (('fDenyTSConnections"', '1'),('AllowRemoteRPC"', '0')):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Control\Remote Assistance\\'
for command in (('fAllowFullControl"','0'),('fAllowToGetHelp"','0')):
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp\\'
command = ('UserAuthentication"','1')
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Control\Remote Assistance\\'
command = ('CreateEncryptedOnlyTickets"','1')
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
reg_dir = '"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Terminal Server\WinStations\RDP-Tcp\\'
command = ('fDisableEncryption"','0')
os.system('reg add ' + reg_dir + ' /v ' + command[0] + ' /t REG_DWORD /d ' + command[1] + ' /f')
# I have found additional commands. This one 'might' fix the host file.
os.system('attrib -r -s C:\WINDOWS\system32\drivers\etc\hosts')
os.system('echo > C:\Windows\System32\drivers\etc\hosts')
# This isn't really appropriate for this option, but...
os.system('net start > started_services.txt')
# Remote registry
os.system('net stop RemoteRegistry')
os.system('sc config RemoteRegistry start=disabled')
for service in ('RemoteAccess', 'Telephony', 'tlntsvr', 'p2pimsvc', 'simptcp', 'fax', 'msftpsvc'):
os.system('net stop ' + service)
os.system('sc config ' + service + ' start = disabled')
for command in registry_commands.readlines():
os.system(command)
############################# Search for media files #############################
if raw_input("Shall we search for media files? y/n. ") == 'y':
file_list = []
# Ask for directory to be scanned.
directory_to_scan = input('What directory would you like to scan for media files? Remember to enclose your directory in \'s or "s, and use two \s if your directory ends in a \. ')
# Inefficient but I spent too much time looking how to do this to delete it.
'''for root, dirs, files in os.walk(directory_to_scan):
for f_name in files:
file_path = os.path.join(root, f_name)
# If the file ends with common media extension, add file path to text_file
for extension in ('.mp3','.wav','.png','wmv','.jpg','.jpeg','.mp4','.avi','.mov','.aif','.iff','.php','.m3u','.m4a','.wma','.m4v','.mpg','.bmp','.gif','.bat','.exe','.zip','.7z'):
if root in file_list:
pass
else:
file_list.append(root)'''
os.system('dir /s /b ' + directory_to_scan + ' > allfiles.txt')
input_file = open('allfiles.txt', 'r')
text_file = open('media_files.txt','w')
for line in input_file:
for extension in ('.mp3','.wav','.png','wmv','.jpg','.jpeg','.mp4','.avi','.mov','.aif','.iff','.m3u','.m4a','.wma','.m4v','.mpg','.bmp','.gif','.bat','.txt','.exe','.zip','.7z','.php','.html'):
if line.endswith(extension + '\n'):
text_file.write(line)
for line in input_file.readlines():
for bad_stuff in ['cain','able','nmap','keylogger','armitage','metasploit','shellter','clean']:
if bad_stuff in line:
text_file.write(line)
text_file.close()
print('Available commands are addUser, passwords, and exit.')
command = raw_input('What would you like to do? ')
if command == 'addUser':
username = raw_input('What is the desired username? ')
os.system('net user ' + username + ' P@55w0rd /ADD')
if command == 'passwords':
users_string = str(users).replace('[','')
users_string = str(users).replace(']','')
username = raw_input('The current users on the machine are ' + users_string + '. Who\'s password would you like to change? ')
new_password = raw_input('What shall the password be? ')
os.system('net user ' + username + ' P@55w0rd')
if command == 'exit':
os.system('pause')
| road2ge/cyber-defense-scripts | main-for-windows.py | Python | gpl-3.0 | 9,651 | 0.017615 |
# -*- coding: utf-8 -*-
import os
from fuel import config
from fuel.datasets import H5PYDataset
from fuel.transformers.defaults import uint8_pixels_to_floatX
class SVHN(H5PYDataset):
"""The Street View House Numbers (SVHN) dataset.
SVHN [SVHN] is a real-world image dataset for developing machine
learning and object recognition algorithms with minimal requirement
on data preprocessing and formatting. It can be seen as similar in
flavor to MNIST [LBBH] (e.g., the images are of small cropped
digits), but incorporates an order of magnitude more labeled data
(over 600,000 digit images) and comes from a significantly harder,
unsolved, real world problem (recognizing digits and numbers in
natural scene images). SVHN is obtained from house numbers in
Google Street View images.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
.. [LBBH] Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner,
*Gradient-based learning applied to document recognition*,
Proceedings of the IEEE, November 1998, 86(11):2278-2324.
Parameters
----------
which_format : {1, 2}
SVHN format 1 contains the full numbers, whereas SVHN format 2
contains cropped digits.
which_set : {'train', 'test', 'extra'}
Whether to load the training set (73,257 examples), the test
set (26,032 examples) or the extra set (531,131 examples).
Note that SVHN does not have a validation set; usually you
will create your own training/validation split
using the `subset` argument.
"""
filename = 'svhn_format_{}.hdf5'
default_transformers = uint8_pixels_to_floatX(('features',))
def __init__(self, which_format, which_set, **kwargs):
self.which_format = which_format
super(SVHN, self).__init__(self.data_path, which_set, **kwargs)
@property
def data_path(self):
return os.path.join(
config.data_path, self.filename.format(self.which_format))
| EderSantana/fuel | fuel/datasets/svhn.py | Python | mit | 2,213 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LocalNetworkGateway"]
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2018_01_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_01_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LocalNetworkGateway"]
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2018_01_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LocalNetworkGatewayListResult"]
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_01_01/operations/_local_network_gateways_operations.py | Python | mit | 27,633 | 0.005139 |
#!/usr/bin/python
"""Test of Dojo combo box presentation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(PauseAction(5000))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Tab to the first combo box",
["BRAILLE LINE: 'US State test 1 (200% Courier font): California $l'",
" VISIBLE: '(200% Courier font): California ', cursor=32",
"BRAILLE LINE: 'Focus mode'",
" VISIBLE: 'Focus mode', cursor=0",
"BRAILLE LINE: 'US State test 1 (200% Courier font): California $l'",
" VISIBLE: '(200% Courier font): California ', cursor=32",
"SPEECH OUTPUT: 'collapsed'",
"SPEECH OUTPUT: 'US State test 1 (200% Courier font): entry California selected'",
"SPEECH OUTPUT: 'Focus mode' voice=system"]))
sequence.append(utils.StartRecordingAction())
sequence.append(TypeAction("C"))
sequence.append(utils.AssertPresentationAction(
"2. Replace existing text with a 'C'",
["KNOWN ISSUE: The braille line is not quite right",
"BRAILLE LINE: 'US State test 1 (200% Courier font): C $l'",
" VISIBLE: '(200% Courier font): C $l', cursor=23",
"BRAILLE LINE: 'US State test 1 (200% Courier font): US State test 1 (200% Courier font): combo box'",
" VISIBLE: 'te test 1 (200% Courier font): U', cursor=32",
"SPEECH OUTPUT: 'expanded'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Down Arrow",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'California menu'",
"SPEECH OUTPUT: 'C alifornia (CA).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Down Arrow",
["BRAILLE LINE: 'C olorado (CO)'",
" VISIBLE: 'C olorado (CO)', cursor=1",
"SPEECH OUTPUT: 'C olorado (CO).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Down Arrow",
["BRAILLE LINE: 'C onnecticut (CT)'",
" VISIBLE: 'C onnecticut (CT)', cursor=1",
"SPEECH OUTPUT: 'C onnecticut (CT).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Down Arrow",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'C alifornia (CA).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"7. Up Arrow",
["BRAILLE LINE: 'C onnecticut (CT)'",
" VISIBLE: 'C onnecticut (CT)', cursor=1",
"SPEECH OUTPUT: 'C onnecticut (CT).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"8. Up Arrow",
["BRAILLE LINE: 'C olorado (CO)'",
" VISIBLE: 'C olorado (CO)', cursor=1",
"SPEECH OUTPUT: 'C olorado (CO).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"9. Up Arrow",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'C alifornia (CA).'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"10. Basic Where Am I - Combo box expanded",
["BRAILLE LINE: 'C alifornia (CA)'",
" VISIBLE: 'C alifornia (CA)', cursor=1",
"SPEECH OUTPUT: 'California menu'",
"SPEECH OUTPUT: 'C alifornia (CA).'",
"SPEECH OUTPUT: '1 of 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Escape"))
sequence.append(utils.AssertPresentationAction(
"11. Escape",
["BRAILLE LINE: 'US State test 1 (200% Courier font): US State test 1 (200% Courier font): combo box'",
" VISIBLE: 'te test 1 (200% Courier font): U', cursor=32",
"BRAILLE LINE: 'US State test 1 (200% Courier font): California $l'",
" VISIBLE: '(200% Courier font): California ', cursor=32",
"SPEECH OUTPUT: 'collapsed'",
"SPEECH OUTPUT: 'US State test 1 (200% Courier font): entry California selected'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| pvagner/orca | test/keystrokes/firefox/aria_combobox_dojo.py | Python | lgpl-2.1 | 4,695 | 0.000852 |
"""
Student Views
"""
import datetime
import logging
import uuid
from collections import namedtuple
from bulk_email.models import Optout
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.views import password_reset_confirm
from django.contrib.sites.models import Site
from django.core import mail
from django.urls import reverse
from django.core.validators import ValidationError, validate_email
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import Signal, receiver
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.utils.encoding import force_bytes, force_text
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from edx_ace import ace
from edx_ace.recipient import Recipient
from edx_django_utils import monitoring as monitoring_utils
from eventtracking import tracker
from ipware.ip import get_ip
# Note that this lives in LMS, so this dependency should be refactored.
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from six import text_type
from xmodule.modulestore.django import modulestore
import track.views
from course_modes.models import CourseMode
from edx_ace import ace
from edx_ace.recipient import Recipient
from edxmako.shortcuts import render_to_response, render_to_string
from entitlements.models import CourseEntitlement
from openedx.core.djangoapps.ace_common.template_context import get_base_template_context
from openedx.core.djangoapps.catalog.utils import get_programs_with_type
from openedx.core.djangoapps.embargo import api as embargo_api
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.oauth_dispatch.api import destroy_oauth_tokens
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.theming.helpers import get_current_site
from openedx.core.djangoapps.user_api.config.waffle import (
PASSWORD_UNICODE_NORMALIZE_FLAG, PREVENT_AUTH_USER_WRITES, SYSTEM_MAINTENANCE_MSG, waffle
)
from openedx.core.djangoapps.user_api.errors import UserNotFound, UserAPIInternalError
from openedx.core.djangoapps.user_api.models import UserRetirementRequest
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.journals.api import get_journals_context
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from student.helpers import (
DISABLE_UNENROLL_CERT_STATES,
auth_pipeline_urls,
cert_info,
create_or_set_user_attribute_created_on_site,
do_create_account,
generate_activation_email_context,
get_next_url_for_login_page
)
from student.message_types import EmailChange, PasswordReset
from student.models import (
CourseEnrollment,
PasswordHistory,
PendingEmailChange,
Registration,
RegistrationCookieConfiguration,
UserAttribute,
UserProfile,
UserSignupSource,
UserStanding,
create_comments_service_user,
email_exists_or_retired,
)
from student.signals import REFUND_ORDER
from student.tasks import send_activation_email
from student.text_me_the_app import TextMeTheAppFragmentView
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.password_policy_validators import normalize_password, validate_password
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple(
'ReverifyInfo',
'course_id course_name course_number date status display'
)
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
REGISTRATION_UTM_PARAMETERS = {
'utm_source': 'registration_utm_source',
'utm_medium': 'registration_utm_medium',
'utm_campaign': 'registration_utm_campaign',
'utm_term': 'registration_utm_term',
'utm_content': 'registration_utm_content',
}
REGISTRATION_UTM_CREATED_AT = 'registration_utm_created_at'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "registration"])
def csrf_token(context):
"""
A csrf token that can be included in a form.
"""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="{}" /></div>'.format(token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# Maximum number of courses to display on the homepage.
context['homepage_course_max'] = configuration_helpers.get_value(
'HOMEPAGE_COURSE_MAX', settings.HOMEPAGE_COURSE_MAX
)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
# Add marketable programs to the context.
context['programs_list'] = get_programs_with_type(request.site, include_hidden=False)
# TODO: Course Listing Plugin required
context['journal_info'] = get_journals_context(request)
return render_to_response('index.html', context)
def compose_and_send_activation_email(user, profile, user_registration=None):
"""
Construct all the required params and send the activation email
through celery task
Arguments:
user: current logged-in user
profile: profile object of the current logged-in user
user_registration: registration of the current logged-in user
"""
dest_addr = user.email
if user_registration is None:
user_registration = Registration.objects.get(user=user)
context = generate_activation_email_context(user, user_registration)
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message_for_activation = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
from_address = configuration_helpers.get_value('ACTIVATION_EMAIL_FROM_ADDRESS', from_address)
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message_for_activation = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message_for_activation)
send_activation_email.delay(subject, message_for_activation, from_address, dest_addr)
def send_reactivation_email_for_user(user):
try:
registration = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
})
try:
context = generate_activation_email_context(user, registration)
except ObjectDoesNotExist:
log.error(
u'Unable to send reactivation email due to unavailable profile for the user "%s"',
user.username,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
})
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
from_address = configuration_helpers.get_value('ACTIVATION_EMAIL_FROM_ADDRESS', from_address)
try:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s" to "%s"',
from_address,
user.email,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
})
return JsonResponse({"success": True})
@login_required
def course_run_refund_status(request, course_id):
"""
Get Refundable status for a course.
Arguments:
request: The request object.
course_id (str): The unique identifier for the course.
Returns:
Json response.
"""
try:
course_key = CourseKey.from_string(course_id)
course_enrollment = CourseEnrollment.get_enrollment(request.user, course_key)
except InvalidKeyError:
logging.exception("The course key used to get refund status caused InvalidKeyError during look up.")
return JsonResponse({'course_refundable_status': ''}, status=406)
refundable_status = course_enrollment.refundable()
logging.info("Course refund status for course {0} is {1}".format(course_id, refundable_status))
return JsonResponse({'course_refundable_status': refundable_status}, status=200)
def _update_email_opt_in(request, org):
"""
Helper function used to hit the profile API if email opt-in is enabled.
"""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
TODO: This is lms specific and does not belong in common code.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated:
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = CourseKey.from_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
# Allow us to monitor performance of this transaction on a per-course basis since we often roll-out features
# on a per-course basis.
monitoring_utils.set_custom_metric('course_id', text_type(course_id))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
if CourseEntitlement.check_for_existing_entitlement_and_enroll(user=user, course_run_key=course_id):
return HttpResponse(reverse('courseware', args=[unicode(course_id)]))
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': text_type(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
REFUND_ORDER.send(sender=None, course_enrollment=enrollment)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""
JSON call to change a profile setting: Right now, location
"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
Handler that saves the user Signup Source when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
@ensure_csrf_cookie
def activate_account(request, key):
"""
When link in activation e-mail is clicked
"""
# If request is in Studio call the appropriate view
if theming_helpers.get_project_root_name().lower() == u'cms':
return activate_account_studio(request, key)
try:
registration = Registration.objects.get(activation_key=key)
except (Registration.DoesNotExist, Registration.MultipleObjectsReturned):
messages.error(
request,
HTML(_(
'{html_start}Your account could not be activated{html_end}'
'Something went wrong, please <a href="{support_url}">contact support</a> to resolve this issue.'
)).format(
support_url=configuration_helpers.get_value('SUPPORT_SITE_LINK', settings.SUPPORT_SITE_LINK),
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon'
)
else:
if registration.user.is_active:
messages.info(
request,
HTML(_('{html_start}This account has already been activated.{html_end}')).format(
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
elif waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
messages.error(
request,
HTML(u'{html_start}{message}{html_end}').format(
message=Text(SYSTEM_MAINTENANCE_MSG),
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
else:
registration.activate()
# Success message for logged in users.
message = _('{html_start}Success{html_end} You have activated your account.')
if not request.user.is_authenticated:
# Success message for logged out users
message = _(
'{html_start}Success! You have activated your account.{html_end}'
'You will now receive email updates and alerts from us related to'
' the courses you are enrolled in. Sign In to continue.'
)
# Add message for later use.
messages.success(
request,
HTML(message).format(
html_start=HTML('<p class="message-title">'),
html_end=HTML('</p>'),
),
extra_tags='account-activation aa-icon',
)
return redirect('dashboard')
@ensure_csrf_cookie
def activate_account_studio(request, key):
"""
When link in activation e-mail is clicked and the link belongs to studio.
"""
try:
registration = Registration.objects.get(activation_key=key)
except (Registration.DoesNotExist, Registration.MultipleObjectsReturned):
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
else:
user_logged_in = request.user.is_authenticated
already_active = True
if not registration.user.is_active:
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
return render_to_response('registration/activation_invalid.html',
{'csrf': csrf(request)['csrf_token']})
registration.activate()
already_active = False
return render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
@require_http_methods(['POST'])
def password_change_request_handler(request):
"""Handle password change requests originating from the account page.
Uses the Account API to email the user a link to the password reset page.
Note:
The next step in the password reset process (confirmation) is currently handled
by student.views.password_reset_confirm_wrapper, a custom wrapper around Django's
password reset confirmation view.
Args:
request (HttpRequest)
Returns:
HttpResponse: 200 if the email was sent successfully
HttpResponse: 400 if there is no 'email' POST parameter
HttpResponse: 403 if the client has been rate limited
HttpResponse: 405 if using an unsupported HTTP method
Example usage:
POST /account/password
"""
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Password reset rate limit exceeded")
return HttpResponseForbidden()
user = request.user
# Prefer logged-in user's email
email = user.email if user.is_authenticated else request.POST.get('email')
if email:
try:
from openedx.core.djangoapps.user_api.accounts.api import request_password_change
request_password_change(email, request.is_secure())
user = user if user.is_authenticated else User.objects.get(email=email)
destroy_oauth_tokens(user)
except UserNotFound:
AUDIT_LOG.info("Invalid password reset attempt")
# Increment the rate limit counter
limiter.tick_bad_request_counter(request)
# If enabled, send an email saying that a password reset was attempted, but that there is
# no user associated with the email
if configuration_helpers.get_value('ENABLE_PASSWORD_RESET_FAILURE_EMAIL',
settings.FEATURES['ENABLE_PASSWORD_RESET_FAILURE_EMAIL']):
site = get_current_site()
message_context = get_base_template_context(site)
message_context.update({
'failed': True,
'request': request, # Used by google_analytics_tracking_pixel
'email_address': email,
})
msg = PasswordReset().personalize(
recipient=Recipient(username='', email_address=email),
language=settings.LANGUAGE_CODE,
user_context=message_context,
)
ace.send(msg)
except UserAPIInternalError as err:
log.exception('Error occured during password change for user {email}: {error}'
.format(email=email, error=err))
return HttpResponse(_("Some error occured during password change. Please try again"), status=500)
return HttpResponse(status=200)
else:
return HttpResponseBadRequest(_("No email address provided."))
@csrf_exempt
@require_POST
def password_reset(request):
"""
Attempts to send a password reset e-mail.
"""
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request)
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
destroy_oauth_tokens(request.user)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if UserRetirementRequest.has_user_requested_retirement(user):
# Refuse to reset the password of any user that has requested retirement.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': _('Error in resetting your password.'),
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': SYSTEM_MAINTENANCE_MSG,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if request.method == 'POST':
if PASSWORD_UNICODE_NORMALIZE_FLAG.is_enabled():
# We have to make a copy of request.POST because it is a QueryDict object which is immutable until copied.
# We have to use request.POST because the password_reset_confirm method takes in the request and a user's
# password is set to the request.POST['new_password1'] field. We have to also normalize the new_password2
# field so it passes the equivalence check that new_password1 == new_password2
# In order to switch out of having to do this copy, we would want to move the normalize_password code into
# a custom User model's set_password method to ensure it is always happening upon calling set_password.
request.POST = request.POST.copy()
request.POST['new_password1'] = normalize_password(request.POST['new_password1'])
request.POST['new_password2'] = normalize_password(request.POST['new_password2'])
password = request.POST['new_password1']
try:
validate_password(password, user=user)
except ValidationError as err:
# We have a password reset attempt which violates some security
# policy, or any other validation. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': ' '.join(err.messages),
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200:
form_valid = response.context_data['form'].is_valid() if response.context_data['form'] else False
if not form_valid:
log.warning(
u'Unable to reset password for user [%s] because form is not valid. '
u'A possible cause is that the user had an invalid reset token',
user.username,
)
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
use_https = theming_helpers.get_current_request().is_secure()
site = Site.objects.get_current()
message_context = get_base_template_context(site)
message_context.update({
'old_email': user.email,
'new_email': pec.new_email,
'confirm_link': '{protocol}://{site}{link}'.format(
protocol='https' if use_https else 'http',
site=configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME),
link=reverse('confirm_email_change', kwargs={
'key': pec.activation_key,
}),
),
})
msg = EmailChange().personalize(
recipient=Recipient(user.username, pec.new_email),
language=preferences_api.get_user_preference(user, LANGUAGE_KEY),
user_context=message_context,
)
try:
ace.send(msg)
except Exception:
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": message_context['old_email'],
"new": message_context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
return render_to_response('email_change_failed.html', {'err_msg': SYSTEM_MAINTENANCE_MSG})
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""
Modify logged-in user's setting for receiving emails from a course.
"""
user = request.user
course_id = request.POST.get("course_id")
course_key = CourseKey.from_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
@ensure_csrf_cookie
def text_me_the_app(request):
"""
Text me the app view.
"""
text_me_fragment = TextMeTheAppFragmentView().render_to_fragment(request)
context = {
'nav_hidden': True,
'show_dashboard_tabs': True,
'show_program_listing': ProgramsApiConfig.is_enabled(),
'fragment': text_me_fragment
}
return render_to_response('text-me-the-app.html', context)
| teltek/edx-platform | common/djangoapps/student/views/management.py | Python | agpl-3.0 | 44,012 | 0.002545 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ConfigParser import ConfigParser
CONFIG_FILE = 'config.conf'
config = ConfigParser()
config.read(CONFIG_FILE)
print config.get('DEMO', 'STR_VAL')
print config.getint('DEMO', 'INT_VAL')
print config.getfloat('DEMO', 'FLOAT_VAL')
| ASMlover/study | python/src/config_parser.py | Python | bsd-2-clause | 1,635 | 0 |
"""
Create python docs for dash easily.
"""
__version__ = '0.2.1'
__author__ = 'whtsky'
__license__ = 'MIT'
| whtsky/Dash.py | dash_py/__init__.py | Python | mit | 109 | 0 |
# -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import uuid
from decimal import Decimal
from unittest import skipIf
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, ComboField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, Field, FileField, FilePathField,
FloatField, Form, GenericIPAddressField, HiddenInput, ImageField,
IntegerField, MultipleChoiceField, NullBooleanField, NumberInput,
PasswordInput, RadioSelect, RegexField, SlugField, SplitDateTimeField,
Textarea, TextInput, TimeField, TypedChoiceField, TypedMultipleChoiceField,
URLField, UUIDField, ValidationError, Widget, forms,
)
from django.test import SimpleTestCase, ignore_warnings
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.duration import duration_string
try:
from PIL import Image
except ImportError:
Image = None
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Otherwise, return a maxlength attribute equal to max_length
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
# IntegerField ################################################################
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertEqual(True, isinstance(f.clean('1'), int))
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertEqual(None, f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertEqual(True, isinstance(f.clean('1'), int))
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, 11)
self.assertEqual(10, f.clean('10'))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, '11')
self.assertEqual(f.max_value, 10)
self.assertEqual(f.min_value, None)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'", f.clean, 21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
Make sure localized IntegerField's widget renders to a text input with
no number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_subclass(self):
"""
Test that class-defined widget is not overwritten by __init__ (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
# FloatField ##################################################################
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(True, isinstance(f.clean('1'), float))
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Infinity')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(f, '<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" />')
def test_floatfield_localized(self):
"""
Make sure localized FloatField's widget renders to a text input with
no number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n))
# DecimalField ################################################################
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(True, isinstance(f.clean('1'), Decimal))
self.assertEqual(f.clean('23'), Decimal("23"))
self.assertEqual(f.clean('3.14'), Decimal("3.14"))
self.assertEqual(f.clean(3.14), Decimal("3.14"))
self.assertEqual(f.clean(Decimal('3.14')), Decimal("3.14"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'łąść')
self.assertEqual(f.clean('1.0 '), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), Decimal("1.0"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '123.45')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '1.234')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 digits before the decimal point.'", f.clean, '123.4')
self.assertEqual(f.clean('-12.34'), Decimal("-12.34"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-123.45')
self.assertEqual(f.clean('-.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), Decimal("-0.12"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '-000.123')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-000.12345')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(None))
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_3(self):
f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(f.clean('1.5'), Decimal("1.5"))
self.assertEqual(f.clean('0.5'), Decimal("0.5"))
self.assertEqual(f.clean('.5'), Decimal("0.5"))
self.assertEqual(f.clean('00.50'), Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, Decimal('1.5'))
self.assertEqual(f.min_value, Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '0.00000001')
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), Decimal('0.02'))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 3 digits in total.'", f.clean, '000000.0002')
self.assertEqual(f.clean('.002'), Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), Decimal(".01"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 0 digits before the decimal point.'", f.clean, '1.1')
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('1E+2'), Decimal('1E+2'))
self.assertEqual(f.clean('1e+2'), Decimal('1E+2'))
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean('0.546e+2')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={'step': '0.01'}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" type="number" id="id_f" />')
def test_decimalfield_localized(self):
"""
Make sure localized DecimalField's widget renders to a text input with
no number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = Decimal("0.1")
self.assertFalse(f.has_changed(d, '0.10'))
self.assertTrue(f.has_changed(d, '0.101'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
# DateField ###################################################################
def test_datefield_1(self):
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October, 2006'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-4-31')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '200a-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '25/10/06')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertEqual(None, f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(None, f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006 10 25'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/2006')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 25 October 2006 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, 'a\x00b')
@ignore_warnings(category=RemovedInDjango20Warning) # for _has_changed
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = datetime.date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
# Test for deprecated behavior _has_changed
self.assertFalse(f._has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""Test that field.strptime doesn't raise an UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
# TimeField ###################################################################
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f.has_changed(t1, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51 PM'))
# DateTimeField ###############################################################
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertEqual(None, f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(None, f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, ' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
# DurationField ###########################################################
def test_durationfield_1(self):
f = DurationField()
self.assertEqual(datetime.timedelta(seconds=30), f.clean('30'))
self.assertEqual(
datetime.timedelta(minutes=15, seconds=30),
f.clean('15:30')
)
self.assertEqual(
datetime.timedelta(hours=1, minutes=15, seconds=30),
f.clean('1:15:30')
)
self.assertEqual(
datetime.timedelta(
days=1, hours=1, minutes=15, seconds=30, milliseconds=300),
f.clean('1 1:15:30.3')
)
def test_durationfield_2(self):
class DurationForm(Form):
duration = DurationField(initial=datetime.timedelta(hours=1))
f = DurationForm()
self.assertHTMLEqual(
'<input id="id_duration" type="text" name="duration" value="01:00:00">',
str(f['duration'])
)
def test_durationfield_prepare_value(self):
field = DurationField()
td = datetime.timedelta(minutes=15, seconds=30)
self.assertEqual(field.prepare_value(td), duration_string(td))
self.assertEqual(field.prepare_value('arbitrary'), 'arbitrary')
self.assertIsNone(field.prepare_value(None))
# RegexField ##################################################################
def test_regexfield_1(self):
f = RegexField('^[0-9][A-F][0-9]$')
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_regexfield_2(self):
f = RegexField('^[0-9][A-F][0-9]$', required=False)
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertEqual('', f.clean(''))
def test_regexfield_3(self):
f = RegexField(re.compile('^[0-9][A-F][0-9]$'))
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
@ignore_warnings(category=RemovedInDjango20Warning) # error_message deprecation
def test_regexfield_4(self):
f = RegexField('^[0-9][0-9][0-9][0-9]$', error_message='Enter a four-digit number.')
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, '123')
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, 'abcd')
def test_regexfield_5(self):
f = RegexField('^[0-9]+$', min_length=5, max_length=10)
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 5 characters (it has 3).'", f.clean, '123')
six.assertRaisesRegex(self, ValidationError, "'Ensure this value has at least 5 characters \(it has 3\)\.', u?'Enter a valid value\.'", f.clean, 'abc')
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '12345678901')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '12345a')
def test_regexfield_6(self):
"""
Ensure that it works with unicode characters.
Refs #.
"""
f = RegexField('^\w+$')
self.assertEqual('éèøçÎÎ你好', f.clean('éèøçÎÎ你好'))
def test_change_regex_after_init(self):
f = RegexField('^[a-z]+$')
f.regex = '^[0-9]+$'
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, 'abcd')
# EmailField ##################################################################
# See also validators tests for validate_email specific tests
def test_emailfield_1(self):
f = EmailField()
self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('person@example.com', f.clean('person@example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
self.assertEqual('local@domain.with.idn.xyz\xe4\xf6\xfc\xdfabc.part.com',
f.clean('local@domain.with.idn.xyzäöüßabc.part.com'))
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take for-freeking-ever
# if the security fix isn't in place.
addr = 'viewx3dtextx26qx3d@yahoo.comx26latlngx3d15854521645943074058'
self.assertEqual(addr, f.clean(addr))
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('person@example.com', f.clean('person@example.com'))
self.assertEqual('example@example.com', f.clean(' example@example.com \t \t '))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(f, '<input id="id_f" type="email" name="f" maxlength="15" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 9).'", f.clean, 'a@foo.com')
self.assertEqual('alf@foo.com', f.clean('alf@foo.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 15 characters (it has 20).'", f.clean, 'alf123456788@foo.com')
# FileField ##################################################################
def test_filefield_1(self):
f = FileField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''), '')
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, 'some content that is not a file')
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', None))
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', b''))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8')))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf')))
def test_filefield_2(self):
f = FileField(max_length=5)
self.assertRaisesMessage(ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'", f.clean, SimpleUploadedFile('test_maxlength.txt', b'hello world'))
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertEqual(SimpleUploadedFile,
type(f.clean(SimpleUploadedFile('name', b''))))
def test_filefield_changed(self):
'''
Test for the behavior of has_changed for FileField. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileField it is ignored.
'''
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f.has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f.has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
# ImageField ##################################################################
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.png'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
# URLField ##################################################################
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('http://localhost', f.clean('http://localhost'))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com.', f.clean('http://example.com.'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test'))
self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com'))
self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com'))
self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10'))
self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'com.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://invalid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://-invalid.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.alid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.-alid.com')
self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com'))
self.assertEqual('http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah'))
self.assertEqual('http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '[a')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://[a')
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 12).'", f.clean, 'http://f.com')
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 37).'", f.clean, 'http://abcdefghijklmnopqrstuvwxyz.com')
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual('http://example.com', f.clean('example.com'))
self.assertEqual('', f.clean(''))
self.assertEqual('https://example.com', f.clean('https://example.com'))
def test_urlfield_7(self):
f = URLField()
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com/test', f.clean('http://example.com/test'))
self.assertEqual('http://example.com?some_param=some_value',
f.clean('http://example.com?some_param=some_value'))
def test_urlfield_9(self):
f = URLField()
urls = (
'http://עברית.idn.icann.org/',
'http://sãopaulo.com/',
'http://sãopaulo.com.br/',
'http://пример.испытание/',
'http://مثال.إختبار/',
'http://例子.测试/',
'http://例子.測試/',
'http://उदाहरण.परीक्षा/',
'http://例え.テスト/',
'http://مثال.آزمایشی/',
'http://실례.테스트/',
'http://العربية.idn.icann.org/',
)
for url in urls:
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""Test URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
'http://[12:34::3a53]/',
'http://[a34:9238::]:8080/',
)
for url in urls:
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 23)
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
# BooleanField ################################################################
def test_booleanfield_1(self):
f = BooleanField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(True, f.clean(True))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, False)
self.assertEqual(True, f.clean(1))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 0)
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(True, f.clean('True'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 'False')
def test_booleanfield_2(self):
f = BooleanField(required=False)
self.assertEqual(False, f.clean(''))
self.assertEqual(False, f.clean(None))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(True, f.clean(1))
self.assertEqual(False, f.clean(0))
self.assertEqual(True, f.clean('1'))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(False, f.clean('False'))
self.assertEqual(False, f.clean('false'))
self.assertEqual(False, f.clean('FaLsE'))
def test_boolean_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(BooleanField())), BooleanField)
def test_booleanfield_changed(self):
f = BooleanField()
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed('', None))
self.assertFalse(f.has_changed('', ''))
self.assertTrue(f.has_changed(False, 'on'))
self.assertFalse(f.has_changed(True, 'on'))
self.assertTrue(f.has_changed(True, ''))
# Initial value may have mutated to a string due to show_hidden_initial (#19537)
self.assertTrue(f.has_changed('False', 'on'))
# ChoiceField #################################################################
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. John is not one of the available choices.'", f.clean, 'John')
def test_choicefield_4(self):
f = ChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, '6')
def test_choicefield_callable(self):
choices = lambda: [('J', 'John'), ('P', 'Paul')]
f = ChoiceField(choices=choices)
self.assertEqual('J', f.clean('J'))
def test_choicefield_callable_may_evaluate_to_different_values(self):
choices = []
def choices_as_callable():
return choices
class ChoiceFieldForm(Form):
choicefield = ChoiceField(choices=choices_as_callable)
choices = [('J', 'John')]
form = ChoiceFieldForm()
self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices))
choices = [('P', 'Paul')]
form = ChoiceFieldForm()
self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices))
# TypedChoiceField ############################################################
# TypedChoiceField is just like ChoiceField, except that coerced types will
# be returned:
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, '2')
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean('1'))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual(True, f.clean('-1'))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, 'B')
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual('', f.clean(''))
# If you want cleaning an empty value to return a different type, tell the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertEqual(None, f.clean(''))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed(1, '1'))
def test_typedchoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedChoiceField(choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual(Decimal('1.2'), f.clean('2'))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3')
# NullBooleanField ############################################################
def test_nullbooleanfield_1(self):
f = NullBooleanField()
self.assertEqual(None, f.clean(''))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(None, f.clean(None))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('1'))
self.assertEqual(None, f.clean('2'))
self.assertEqual(None, f.clean('3'))
self.assertEqual(None, f.clean('hello'))
self.assertEqual(True, f.clean('true'))
self.assertEqual(False, f.clean('false'))
def test_nullbooleanfield_2(self):
# Make sure that the internal value is preserved if using HiddenInput (#7753)
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual('<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" /><input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />', str(f))
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertEqual(None, f.full_clean())
self.assertEqual(True, f.cleaned_data['hidden_nullbool1'])
self.assertEqual(False, f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its boolean
# values. (#9609)
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertEqual(None, f.full_clean())
self.assertEqual(True, f.cleaned_data['nullbool0'])
self.assertEqual(False, f.cleaned_data['nullbool1'])
self.assertEqual(None, f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# MultipleChoiceField #########################################################
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ())
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['6'])
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['1', '6'])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ['1']))
self.assertFalse(f.has_changed([1, 2], ['1', '2']))
self.assertFalse(f.has_changed([2, 1], ['1', '2']))
self.assertTrue(f.has_changed([1, 2], ['1']))
self.assertTrue(f.has_changed([1, 2], ['1', '3']))
# TypedMultipleChoiceField ############################################################
# TypedMultipleChoiceField is just like MultipleChoiceField, except that coerced types
# will be returned:
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['2'])
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1', '-1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['1', '2'])
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, ['B'])
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertEqual(None, f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
def test_typedmultiplechoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedMultipleChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual([Decimal('1.2')], f.clean(['2']))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3'])
# ComboField ##################################################################
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('test@example.com', f.clean('test@example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, 'longemailaddress@example.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('test@example.com', f.clean('test@example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, 'longemailaddress@example.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
# FilePathField ###############################################################
def test_filepathfield_1(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
self.assertTrue(fix_os_paths(path).endswith('/django/forms/'))
def test_filepathfield_2(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path)
f.choices = [p for p in f.choices if p[0].endswith('.py')]
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. fields.py is not one of the available choices.'", f.clean, 'fields.py')
assert fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')
def test_filepathfield_3(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_4(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
f = FilePathField(path=path, recursive=True, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/extras/__init__.py', 'extras/__init__.py'),
('/django/forms/extras/widgets.py', 'extras/widgets.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_folders(self):
path = os.path.dirname(upath(__file__)) + '/filepath_test_files/'
f = FilePathField(path=path, allow_folders=True, allow_files=False)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
f = FilePathField(path=path, allow_folders=True, allow_files=True)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'),
('/tests/forms_tests/tests/filepath_test_files/1x1.png', '1x1.png'),
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
('/tests/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'),
('/tests/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'),
]
actual = fix_os_paths(f.choices)
self.assertEqual(len(expected), len(actual))
for exp, got in zip(expected, actual):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
# SplitDateTimeField ##########################################################
def test_splitdatetimefield_1(self):
from django.forms.widgets import SplitDateTimeWidget
f = SplitDateTimeField()
assert isinstance(f.widget, SplitDateTimeWidget)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertEqual(None, f.clean(None))
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(['']))
self.assertEqual(None, f.clean(['', '']))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', ''])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f.has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
# GenericIPAddressField #######################################################
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '256.125.1.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
# SlugField ###################################################################
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
# UUIDField ###################################################################
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
| digimarc/django | tests/forms_tests/tests/test_fields.py | Python | bsd-3-clause | 83,991 | 0.005026 |
import logging
from django.db.models import DateTimeField, Model, Manager
from django.db.models.query import QuerySet
from django.db.models.fields.related import \
OneToOneField, ManyToManyField, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist
LOGGER = logging.getLogger(__name__)
def _unset_related_one_to_one(obj, field):
old_value = getattr(obj, field.column)
if old_value is not None:
LOGGER.debug(
'Setting %s.%s to None on object %s (old value: %s)',
obj._meta.model.__name__, field.column, obj.pk, old_value)
# Unset the fk field (e.g. Foo.baz_id)
setattr(obj, field.column, None)
# Unset the related object field (e.g. Foo.baz)
setattr(obj, field.name, None)
def _unset_related_many_to_many(obj, field):
manager = getattr(obj, field.name)
old_values = manager.values_list('pk', flat=True)
LOGGER.debug(
'Removing all objects from %s.%s on object %s (old values: %s)',
obj._meta.model.__name__, field.name, obj.pk,
', '.join(str(pk) for pk in old_values))
manager.remove(*manager.all())
def _unset_related_objects_relations(obj):
LOGGER.debug('Soft-deleting object %s %s',
obj._meta.model.__name__, obj.pk)
for field in obj._meta.get_fields():
field_type = type(field)
if field_type is OneToOneField:
_unset_related_one_to_one(obj, field)
elif field_type in (ManyToManyRel, ManyToManyField):
_unset_related_many_to_many(obj, field)
for related in obj._meta.get_all_related_objects():
# Unset related objects' relation
rel_name = related.get_accessor_name()
if related.one_to_one:
# Handle one-to-one relations.
try:
related_object = getattr(obj, rel_name)
except ObjectDoesNotExist:
pass
else:
_unset_related_one_to_one(related_object, related.field)
related_object.save()
else:
# Handle one-to-many and many-to-many relations.
related_objects = getattr(obj, rel_name)
if related_objects.count():
affected_objects_id = ', '.join(
str(pk) for pk in related_objects.values_list(
'pk', flat=True))
old_values = ', '.join(
str(val) for val in related_objects.values_list(
related.field.name, flat=True))
LOGGER.debug(
'Setting %s.%s to None on objects %s (old values: %s)',
related_objects.model.__name__, related.field.name,
affected_objects_id, old_values)
related_objects.update(**{related.field.name: None})
class SoftDeleteQuerySet(QuerySet):
"""This QuerySet subclass implements soft deletion of objects.
"""
def delete(self):
"""Soft delete all objects included in this queryset.
"""
for obj in self:
_unset_related_objects_relations(obj)
self.update(deleted=now())
def undelete(self):
"""Soft undelete all objects included in this queryset.
"""
objects = self.filter(deleted__isnull=False)
if objects.count():
LOGGER.debug(
'Soft undeleting %s objects: %s', self.model.__name__,
', '.join(str(pk)
for pk in objects.values_list('pk', flat=True)))
objects.update(deleted=None)
class SoftDeleteManager(Manager.from_queryset(SoftDeleteQuerySet)):
"""This Manager hides soft deleted objects by default,
and exposes methods to access them.
"""
def _get_base_queryset(self):
return super(SoftDeleteManager, self).get_queryset()
def get_queryset(self):
"""Return NOT DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=True)
def deleted(self):
"""Return DELETED objects.
"""
return self._get_base_queryset().filter(deleted__isnull=False)
def with_deleted(self):
"""Return ALL objects.
"""
return self._get_base_queryset()
class SoftDeleteModel(Model):
"""Simply inherit this class to enable soft deletion on a model.
"""
class Meta:
abstract = True
objects = SoftDeleteManager()
deleted = DateTimeField(verbose_name=_('deleted'), null=True, blank=True)
def delete(self):
"""Soft delete this object.
"""
_unset_related_objects_relations(self)
self.deleted = now()
self.save()
return self
def undelete(self):
"""Undelete this soft-deleted object.
"""
if self.deleted is not None:
LOGGER.debug('Soft-undeleting object %s %s',
self._meta.model.__name__, self.pk)
self.deleted = None
self.save()
return self
| pmuller/django-softdeletion | django_softdeletion/models.py | Python | mit | 5,119 | 0 |
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| foobarbazblarg/stayclean | stayclean-2020-january/display-final-after-month-is-over.py | Python | mit | 3,056 | 0.004254 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
)
class JWPlatformBaseIE(InfoExtractor):
@staticmethod
def _find_jwplayer_data(webpage):
# TODO: Merge this with JWPlayer-related codes in generic.py
mobj = re.search(
'jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)\.setup\((?P<options>[^)]+)\)',
webpage)
if mobj:
return mobj.group('options')
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._parse_json(
self._find_jwplayer_data(webpage), video_id)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id, require_title=True, m3u8_id=None, rtmp_params=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
video_data = jwplayer_data['playlist'][0]
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
formats = []
for source in video_data['sources']:
source_url = self._proto_relative_url(source['file'])
source_type = source.get('type') or ''
if source_type in ('application/vnd.apple.mpegurl', 'hls') or determine_ext(source_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False))
elif source_type.startswith('audio'):
formats.append({
'url': source_url,
'vcodec': 'none',
})
else:
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': int_or_none(source.get('height')),
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv',
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if track.get('file') and track.get('kind') == 'captions':
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track['file'])
})
return {
'id': video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
}
class JWPlatformIE(JWPlatformBaseIE):
_VALID_URL = r'(?:https?://content\.jwplatform\.com/(?:feeds|players|jw6)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})'
_TEST = {
'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js',
'md5': 'fa8899fa601eb7c83a64e9d568bdf325',
'info_dict': {
'id': 'nPripu9l',
'ext': 'mov',
'title': 'Big Buck Bunny Trailer',
'description': 'Big Buck Bunny is a short animated film by the Blender Institute. It is made using free and open source software.',
'upload_date': '20081127',
'timestamp': 1227796140,
}
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<script[^>]+?src=["\'](?P<url>(?:https?:)?//content.jwplatform.com/players/[a-zA-Z0-9]{8})',
webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json('http://content.jwplatform.com/feeds/%s.json' % video_id, video_id)
return self._parse_jwplayer_data(json_data, video_id)
| dntt1/youtube-dl | youtube_dl/extractor/jwplatform.py | Python | unlicense | 5,161 | 0.002906 |
from .utils import PyKEArgumentHelpFormatter
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from tqdm import tqdm
from . import kepio, kepmsg, kepkey, kepfit, kepstat, kepfunc
__all__ = ['kepoutlier']
def kepoutlier(infile, outfile=None, datacol='SAP_FLUX', nsig=3.0, stepsize=1.0,
npoly=3, niter=1, operation='remove', ranges='0,0', plot=False,
plotfit=False, overwrite=False, verbose=False,
logfile='kepoutlier.log'):
"""
kepoutlier -- Remove or replace statistical outliers from time series data
kepoutlier identifies data outliers relative to piecemeal best-fit
polynomials. Outliers are either removed from the output time series or
replaced by a noise-treated value defined by the polynomial fit. Identified
outliers and the best fit functions are optionally plotted for inspection
purposes.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing a Kepler light
curve within the first data extension.
outfile : str
The name of the output FITS file. ``outfile`` will be direct copy of
infile with either data outliers removed (i.e. the table will have
fewer rows) or the outliers will be corrected according to a best-fit
function and a noise model.
datacol : str
The column name containing data stored within extension 1 of infile.
This data will be searched for outliers. Typically this name is
SAP_FLUX (Simple Aperture Photometry fluxes) or PDCSAP_FLUX (Pre-search
Data Conditioning fluxes).
nsig : float
The sigma clipping threshold. Data deviating from a best fit function
by more than the threshold will be either removed or corrected
according to the user selection of operation.
stepsize : float
The data within datacol is unlikely to be well represented by a single
polynomial function. stepsize splits the data up into a series of time
blocks, each is fit independently by a separate function. The user can
provide an informed choice of stepsize after inspecting the data with
the kepdraw tool. Units are days.
npoly : int
The polynomial order of each best-fit function.
niter : int
If outliers are found in a particular data section, that data will be
removed temporarily and the time series fit again. This will be
iterated niter times before freezing upon the best available fit.
operation : str
* ``remove`` throws away outliers. The output data table will smaller
or equal in size to the input table.
* ``replace`` replaces outliers with a value that is consistent with
the best-fit polynomial function and a random component defined by the
rms of the data relative to the fit and calculated using the inverse
normal cumulative function and a random number generator.
ranges : str
The user can choose specific time ranges of data on which to work. This
could, for example, avoid removing known stellar flares from a dataset.
Time ranges are supplied as comma-separated pairs of Barycentric Julian
Dates (BJDs). Multiple ranges are separated by a semi-colon. An example
containing two time ranges is::
'2455012.48517,2455014.50072;2455022.63487,2455025.08231'
If the user wants to correct the entire time series then providing
``ranges = '0,0'`` will tell the task to operate on the whole time series.
plot : bool
Plot the data and outliers?
plotfit : bool
Overlay the polynomial fits upon the plot?
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepoutlier kplr002437329-2010355172524_llc.fits --datacol SAP_FLUX
--nsig 4 --stepsize 5 --npoly 2 --niter 10 --operation replace
--verbose --plot --plotfit
.. image:: ../_static/images/api/kepoutlier.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPOUTLIER -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' datacol={}'.format(datacol)
+ ' nsig={}'.format(nsig)
+ ' stepsize={}'.format(stepsize)
+ ' npoly={}'.format(npoly)
+ ' niter={}'.format(niter)
+ ' operation={}'.format(operation)
+ ' ranges={}'.format(ranges)
+ ' plot={}'.format(plot)
+ ' plotfit={}'.format(plotfit)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPOUTLIER started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPOUTLIER: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
try:
work = instr[0].header['FILEVER']
cadenom = 1.0
except:
cadenom = cadence
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# read table structure
table = kepio.readfitstab(infile, instr[1], logfile, verbose)
# filter input data table
try:
nanclean = instr[1].header['NANCLEAN']
except:
time = kepio.readtimecol(infile, table, logfile, verbose)
flux = kepio.readfitscol(infile, table, datacol, logfile, verbose)
finite_data_mask = np.isfinite(time) & np.isfinite(flux) & (flux != 0)
table = table[finite_data_mask]
instr[1].data = table
comment = 'NaN cadences removed from data'
kepkey.new('NANCLEAN', True, comment, instr[1], outfile, logfile,
verbose)
# read table columns
try:
intime = instr[1].data.field('barytime') + 2.4e6
except:
intime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
verbose)
indata = kepio.readfitscol(infile, instr[1].data, datacol, logfile,
verbose)
intime = intime + bjdref
indata = indata / cadenom
# time ranges for region to be corrected
t1, t2 = kepio.timeranges(ranges, logfile, verbose)
cadencelis = kepstat.filterOnRange(intime, t1, t2)
# find limits of each time step
tstep1, tstep2 = [], []
work = intime[0]
while work < intime[-1]:
tstep1.append(work)
tstep2.append(np.array([work + stepsize, intime[-1]],
dtype='float64').min())
work += stepsize
# find cadence limits of each time step
cstep1, cstep2 = [], []
work1 = 0
work2 = 0
for i in range(len(intime)):
if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize:
work2 = i
else:
cstep1.append(work1)
cstep2.append(work2)
work1 = i
work2 = i
cstep1.append(work1)
cstep2.append(work2)
outdata = indata * 1.0
# comment keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
# clean up x-axis unit
intime0 = (tstart // 100) * 100.0
ptime = intime - intime0
xlab = 'BJD $-$ {}'.format(intime0)
# clean up y-axis units
pout = indata * 1.0
nrm = len(str(int(pout.max())))-1
pout = pout / 10**nrm
ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm
# data limits
xmin = ptime.min()
xmax = ptime.max()
ymin = pout.min()
ymax = pout.max()
xr = xmax - xmin
yr = ymax - ymin
ptime = np.insert(ptime, [0], [ptime[0]])
ptime = np.append(ptime, [ptime[-1]])
pout = np.insert(pout, [0], [0.0])
pout = np.append(pout, 0.0)
# plot light curve
if plot:
plt.figure()
plt.clf()
# plot data
ax = plt.axes([0.06, 0.1, 0.93, 0.87])
# force tick labels to be absolute rather than relative
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.plot(ptime, pout, color='#0000ff', linestyle='-', linewidth=1.0)
plt.fill(ptime, pout, color='#ffff00', linewidth=0.0, alpha=0.2)
plt.xlabel(xlab, {'color' : 'k'})
plt.ylabel(ylab, {'color' : 'k'})
plt.grid()
# loop over each time step, fit data, determine rms
masterfit = indata * 0.0
mastersigma = np.zeros(len(masterfit))
functype = getattr(kepfunc, 'poly' + str(npoly))
for i in range(len(cstep1)):
pinit = [indata[cstep1[i]:cstep2[i]+1].mean()]
if npoly > 0:
for j in range(npoly):
pinit.append(0.0)
pinit = np.array(pinit, dtype='float32')
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty = \
kepfit.lsqclip(functype, pinit,
intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]],
indata[cstep1[i]:cstep2[i]+1], None, nsig,
nsig, niter, logfile, verbose)
for j in range(len(coeffs)):
masterfit[cstep1[i]: cstep2[i] + 1] += (coeffs[j]
* (intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]]) ** j)
for j in range(cstep1[i], cstep2[i] + 1):
mastersigma[j] = sigma
if plotfit:
plt.plot(plotx + intime[cstep1[i]] - intime0, ploty / 10 ** nrm,
'g', lw=3)
except:
for j in range(cstep1[i], cstep2[i] + 1):
masterfit[j] = indata[j]
mastersigma[j] = 1.0e10
message = ('WARNING -- KEPOUTLIER: could not fit range '
+ str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]))
kepmsg.warn(logfile, message, verbose)
# reject outliers
rejtime, rejdata = [], []
naxis2 = 0
for i in tqdm(range(len(masterfit))):
if (abs(indata[i] - masterfit[i]) > nsig * mastersigma[i]
and i in cadencelis):
rejtime.append(intime[i])
rejdata.append(indata[i])
if operation == 'replace':
[rnd] = kepstat.randarray([masterfit[i]], [mastersigma[i]])
table[naxis2] = table[i]
table.field(datacol)[naxis2] = rnd
naxis2 += 1
else:
table[naxis2] = table[i]
naxis2 += 1
instr[1].data = table[:naxis2]
if plot:
rejtime = np.array(rejtime, dtype='float64')
rejdata = np.array(rejdata, dtype='float32')
plt.plot(rejtime - intime0, rejdata / 10 ** nrm, 'ro')
# plot ranges
plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01)
if ymin >= 0.0:
plt.ylim(ymin - yr * 0.01, ymax + yr * 0.01)
else:
plt.ylim(1.0e-10, ymax + yr * 0.01)
# render plot
plt.show()
# write output file
print("Writing output file {}...".format(outfile))
instr.writeto(outfile)
# close input file
instr.close()
kepmsg.clock('KEPOUTLIER completed at', logfile, verbose)
def kepoutlier_main():
import argparse
parser = argparse.ArgumentParser(
description='Remove or replace data outliers from a time series',
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input file', type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-kepoutlier.'),
default=None)
parser.add_argument('--datacol', default='SAP_FLUX',
help='Name of data column to plot', type=str)
parser.add_argument('--nsig', default=3.,
help='Sigma clipping threshold for outliers',
type=float)
parser.add_argument('--stepsize', default=1.0,
help='Stepsize on which to fit data [days]',
type=float)
parser.add_argument('--npoly', default=3,
help='Polynomial order for each fit', type=int)
parser.add_argument('--niter', default=1,
help='Maximum number of clipping iterations', type=int)
parser.add_argument('--operation', default='remove',
help='Remove or replace outliers?', type=str,
choices=['replace','remove'])
parser.add_argument('--ranges', default='0,0',
help='Time ranges of regions to filter', type=str)
parser.add_argument('--plot', action='store_true', help='Plot result?')
parser.add_argument('--plotfit', action='store_true',
help='Plot fit over results?')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='kepoutlier.log', dest='logfile', type=str)
args = parser.parse_args()
kepoutlier(args.infile, args.outfile, args.datacol, args.nsig,
args.stepsize, args.npoly,args.niter, args.operation,
args.ranges, args.plot, args.plotfit, args.overwrite,
args.verbose, args.logfile)
| gully/PyKE | pyke/kepoutlier.py | Python | mit | 14,393 | 0.00139 |
#!/usr/bin/env python
import urllib2
from bs4 import BeautifulSoup as BS
import re
import time
def getAgenciesList():
agenciesList_req = urllib2.Request('''http://services.my511.org/Transit2.0/GetAgencies.aspx?token=aeeb38de-5385-482a-abde-692dfb2769e3''')
xml_resp = urllib2.urlopen(agenciesList_req)
soup = BS(xml_resp.read(),'lxml')
print soup.prettify()
agencies = soup.find_all('agency')
for a in agencies:
print a['name']
def getBusList(busCodes):
api_url = '''http://services.my511.org/Transit2.0/GetRoutesForAgencies.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&agencyNames=SF-MUNI'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
routes = soup.find_all('route')
for route in routes:
if route['code'] in busCodes:
print route.prettify()
def getBusStopsList():
api_url = '''http://services.my511.org/Transit2.0/GetStopsForRoute.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&routeIDF=SF-MUNI~8X~Inbound'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
print soup.prettify()
def getNextDepartures(stopcode,buscode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
# print soup.prettify()
route = soup.find('route',{'code':buscode})
l = route.departuretimelist.getText().split()
if l:
print '-- %s\t%s (mins)'%(buscode,', '.join(l))
else:
print '-- %s\tUnavailable'%buscode
return l
class busTime:
def __init__(self,busCode,busTime=[]):
self.busCode = busCode #String
self.busTime = busTime #List of String
def __str__(self):
return self.busCode
class busStopStatus:
def __init__(self,stopcode,description="",departureList=[]):
self.stopcode = stopcode
self.description = description
self.departureList = departureList
def getBusStopStatus(stopcode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
description = soup.find('stop')['name']
status = busStopStatus(stopcode,description,[])
for bus in soup.find_all('route'):
departtime = busTime(bus['code'],[])
timeList = bus.departuretimelist.getText().split()
if timeList:
print '-- %s\t%s (mins)'%(bus['code'],', '.join(timeList))
for t in timeList:
departtime.busTime.append(t)
status.departureList.append(departtime)
else:
print '-- %s\tUnavailable'%bus['code']
return status
if __name__ == '__main__':
print 'BUS TIMING... :D\n'
print time.ctime(time.time())
getBusStopStatus(16367) | trthanhquang/bus-assistant | webApp/getBusTiming.py | Python | mit | 2,827 | 0.038557 |
"""
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
from django.contrib.gis import memoryview
from django.utils import six
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
geom = geom_value
elif (geom_value is None) or (geom_value == ''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None:
value.srid = self._field.srid
elif value is None or isinstance(value, six.string_types + (memoryview,)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('Cannot set %s GeometryProxy (%s) with value of type: %s' % (
obj.__class__.__name__, gtype, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
| 912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/db/models/proxy.py | Python | gpl-2.0 | 2,643 | 0.001892 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BiRNN model with attention."""
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""BiRNN attention model parameters."""
parser_nn.add_argument(
'--cnn_filters',
type=str,
default='10,1',
help='Number of output filters in the convolution layers',
)
parser_nn.add_argument(
'--cnn_kernel_size',
type=str,
default='(5,1),(5,1)',
help='Heights and widths of the 2D convolution window',
)
parser_nn.add_argument(
'--cnn_act',
type=str,
default="'relu','relu'",
help='Activation function in the convolution layers',
)
parser_nn.add_argument(
'--cnn_dilation_rate',
type=str,
default='(1,1),(1,1)',
help='Dilation rate to use for dilated convolutions',
)
parser_nn.add_argument(
'--cnn_strides',
type=str,
default='(1,1),(1,1)',
help='Strides of the convolution layers along the height and width',
)
parser_nn.add_argument(
'--rnn_layers',
type=int,
default=2,
help='Number of RNN layers (each RNN is wrapped by Bidirectional)',
)
parser_nn.add_argument(
'--rnn_type',
type=str,
default='gru',
help='RNN type: it can be gru or lstm',
)
parser_nn.add_argument(
'--rnn_units',
type=int,
default=128,
help='Units number in RNN cell',
)
parser_nn.add_argument(
'--dropout1',
type=float,
default=0.1,
help='Percentage of data dropped',
)
parser_nn.add_argument(
'--units2',
type=str,
default='64,32',
help='Number of units in the last set of hidden layers',
)
parser_nn.add_argument(
'--act2',
type=str,
default="'relu','linear'",
help='Activation function of the last set of hidden layers',
)
def model(flags):
"""BiRNN attention model.
It is based on paper:
A neural attention model for speech command recognition
https://arxiv.org/pdf/1808.08929.pdf
Depending on parameter rnn_type, model can be biLSTM or biGRU
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
rnn_types = {'lstm': tf.keras.layers.LSTM, 'gru': tf.keras.layers.GRU}
if flags.rnn_type not in rnn_types:
ValueError('not supported RNN type ', flags.rnn_type)
rnn = rnn_types[flags.rnn_type]
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
net = tf.keras.backend.expand_dims(net)
for filters, kernel_size, activation, dilation_rate, strides in zip(
utils.parse(flags.cnn_filters), utils.parse(flags.cnn_kernel_size),
utils.parse(flags.cnn_act), utils.parse(flags.cnn_dilation_rate),
utils.parse(flags.cnn_strides)):
net = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
activation=activation,
dilation_rate=dilation_rate,
strides=strides,
padding='same')(
net)
net = tf.keras.layers.BatchNormalization()(net)
shape = net.shape
# input net dimension: [batch, time, feature, channels]
# reshape dimension: [batch, time, feature * channels]
# so that GRU/RNN can process it
net = tf.keras.layers.Reshape((-1, shape[2] * shape[3]))(net)
# dims: [batch, time, feature]
for _ in range(flags.rnn_layers):
net = tf.keras.layers.Bidirectional(
rnn(flags.rnn_units, return_sequences=True, unroll=True))(
net)
feature_dim = net.shape[-1]
middle = net.shape[1] // 2 # index of middle point of sequence
# feature vector at middle point [batch, feature]
mid_feature = net[:, middle, :]
# apply one projection layer with the same dim as input feature
query = tf.keras.layers.Dense(feature_dim)(mid_feature)
# attention weights [batch, time]
att_weights = tf.keras.layers.Dot(axes=[1, 2])([query, net])
att_weights = tf.keras.layers.Softmax(name='attSoftmax')(att_weights)
# apply attention weights [batch, feature]
net = tf.keras.layers.Dot(axes=[1, 1])([att_weights, net])
net = tf.keras.layers.Dropout(rate=flags.dropout1)(net)
for units, activation in zip(
utils.parse(flags.units2), utils.parse(flags.act2)):
net = tf.keras.layers.Dense(units=units, activation=activation)(net)
net = tf.keras.layers.Dense(units=flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
return tf.keras.Model(input_audio, net)
| google-research/google-research | kws_streaming/models/att_rnn.py | Python | apache-2.0 | 5,484 | 0.008388 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Providers sub-commands"""
import re
from airflow.cli.simple_table import AirflowConsole
from airflow.providers_manager import ProvidersManager
from airflow.utils.cli import suppress_logs_and_warning
def _remove_rst_syntax(value: str) -> str:
return re.sub("[`_<>]", "", value.strip(" \n."))
@suppress_logs_and_warning
def provider_get(args):
"""Get a provider info."""
providers = ProvidersManager().providers
if args.provider_name in providers:
provider_version = providers[args.provider_name].version
provider_info = providers[args.provider_name].provider_info
if args.full:
provider_info["description"] = _remove_rst_syntax(provider_info["description"])
AirflowConsole().print_as(
data=[provider_info],
output=args.output,
)
else:
print(f"Provider: {args.provider_name}")
print(f"Version: {provider_version}")
else:
raise SystemExit(f"No such provider installed: {args.provider_name}")
@suppress_logs_and_warning
def providers_list(args):
"""Lists all providers at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().providers.values()),
output=args.output,
mapper=lambda x: {
"package_name": x[1]["package-name"],
"description": _remove_rst_syntax(x[1]["description"]),
"version": x[0],
},
)
@suppress_logs_and_warning
def hooks_list(args):
"""Lists all hooks at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().hooks.items()),
output=args.output,
mapper=lambda x: {
"connection_type": x[0],
"class": x[1].connection_class,
"conn_id_attribute_name": x[1].connection_id_attribute_name,
'package_name': x[1].package_name,
'hook_name': x[1].hook_name,
},
)
@suppress_logs_and_warning
def connection_form_widget_list(args):
"""Lists all custom connection form fields at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().connection_form_widgets.items()),
output=args.output,
mapper=lambda x: {
"connection_parameter_name": x[0],
"class": x[1].connection_class,
'package_name': x[1].package_name,
'field_type': x[1].field.field_class.__name__,
},
)
@suppress_logs_and_warning
def connection_field_behaviours(args):
"""Lists field behaviours"""
AirflowConsole().print_as(
data=list(ProvidersManager().field_behaviours.keys()),
output=args.output,
mapper=lambda x: {
"field_behaviours": x,
},
)
@suppress_logs_and_warning
def extra_links_list(args):
"""Lists all extra links at the command line"""
AirflowConsole().print_as(
data=ProvidersManager().extra_links_class_names,
output=args.output,
mapper=lambda x: {
"extra_link_class_name": x,
},
)
| nathanielvarona/airflow | airflow/cli/commands/provider_command.py | Python | apache-2.0 | 3,862 | 0.000259 |
from build.management.commands.build_statistics_trees import Command as BuildStatisticsTrees
class Command(BuildStatisticsTrees):
pass | cmunk/protwis | build_gpcr/management/commands/build_statistics_trees.py | Python | apache-2.0 | 142 | 0.021429 |
import os
import sys
import shutil
import glob
import time
import multiprocessing as mp
if len(sys.argv)!=5:
print("Usage: ")
print("python extract_features_for_merlin.py <path_to_merlin_dir> <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")
sys.exit(1)
# top merlin directory
merlin_dir = sys.argv[1]
# input audio directory
wav_dir = sys.argv[2]
# Output features directory
out_dir = sys.argv[3]
# initializations
fs = int(sys.argv[4])
# tools directory
world = os.path.join(merlin_dir, "tools/bin/WORLD")
sptk = os.path.join(merlin_dir, "tools/bin/SPTK-3.9")
sp_dir = os.path.join(out_dir, 'sp' )
mgc_dir = os.path.join(out_dir, 'mgc')
ap_dir = os.path.join(out_dir, 'ap' )
bap_dir = os.path.join(out_dir, 'bap')
f0_dir = os.path.join(out_dir, 'f0' )
lf0_dir = os.path.join(out_dir, 'lf0')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(sp_dir):
os.mkdir(sp_dir)
if not os.path.exists(mgc_dir):
os.mkdir(mgc_dir)
if not os.path.exists(bap_dir):
os.mkdir(bap_dir)
if not os.path.exists(f0_dir):
os.mkdir(f0_dir)
if not os.path.exists(lf0_dir):
os.mkdir(lf0_dir)
if fs == 16000:
nFFTHalf = 1024
alpha = 0.58
elif fs == 48000:
nFFTHalf = 2048
alpha = 0.77
else:
print("As of now, we don't support %d Hz sampling rate." %(fs))
print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")
sys.exit(1)
#bap order depends on sampling rate.
mcsize=59
def get_wav_filelist(wav_dir):
wav_files = []
for file in os.listdir(wav_dir):
whole_filepath = os.path.join(wav_dir,file)
if os.path.isfile(whole_filepath) and str(whole_filepath).endswith(".wav"):
wav_files.append(whole_filepath)
elif os.path.isdir(whole_filepath):
wav_files += get_wav_filelist(whole_filepath)
wav_files.sort()
return wav_files
def process(filename):
'''
The function decomposes a wav file into F0, mel-cepstral coefficients, and aperiodicity
:param filename: path to wav file
:return: .lf0, .mgc and .bap files
'''
file_id = os.path.basename(filename).split(".")[0]
print('\n' + file_id)
### WORLD ANALYSIS -- extract vocoder parameters ###
### extract f0, sp, ap ###
world_analysis_cmd = "%s %s %s %s %s" % (os.path.join(world, 'analysis'), \
filename,
os.path.join(f0_dir, file_id + '.f0'), \
os.path.join(sp_dir, file_id + '.sp'), \
os.path.join(bap_dir, file_id + '.bapd'))
os.system(world_analysis_cmd)
### convert f0 to lf0 ###
sptk_x2x_da_cmd = "%s +da %s > %s" % (os.path.join(sptk, 'x2x'), \
os.path.join(f0_dir, file_id + '.f0'), \
os.path.join(f0_dir, file_id + '.f0a'))
os.system(sptk_x2x_da_cmd)
sptk_x2x_af_cmd = "%s +af %s | %s > %s " % (os.path.join(sptk, 'x2x'), \
os.path.join(f0_dir, file_id + '.f0a'), \
os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \
os.path.join(lf0_dir, file_id + '.lf0'))
os.system(sptk_x2x_af_cmd)
### convert sp to mgc ###
sptk_x2x_df_cmd1 = "%s +df %s | %s | %s >%s" % (os.path.join(sptk, 'x2x'), \
os.path.join(sp_dir, file_id + '.sp'), \
os.path.join(sptk, 'sopr') + ' -R -m 32768.0', \
os.path.join(sptk, 'mcep') + ' -a ' + str(alpha) + ' -m ' + str(
mcsize) + ' -l ' + str(
nFFTHalf) + ' -e 1.0E-8 -j 0 -f 0.0 -q 3 ', \
os.path.join(mgc_dir, file_id + '.mgc'))
os.system(sptk_x2x_df_cmd1)
### convert bapd to bap ###
sptk_x2x_df_cmd2 = "%s +df %s > %s " % (os.path.join(sptk, "x2x"), \
os.path.join(bap_dir, file_id + ".bapd"), \
os.path.join(bap_dir, file_id + '.bap'))
os.system(sptk_x2x_df_cmd2)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = get_wav_filelist(wav_dir)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(process, wav_files)
# clean temporal files
shutil.rmtree(sp_dir, ignore_errors=True)
shutil.rmtree(f0_dir, ignore_errors=True)
for zippath in glob.iglob(os.path.join(bap_dir, '*.bapd')):
os.remove(zippath)
print("You should have your features ready in: "+out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
| bajibabu/merlin | misc/scripts/vocoder/world/extract_features_for_merlin.py | Python | apache-2.0 | 5,044 | 0.011102 |
import aaf
import os
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
if not args:
parser.error("not enough argements")
path = args[0]
name, ext = os.path.splitext(path)
f = aaf.open(path, 'r')
f.save(name + ".xml")
f.close()
| markreidvfx/pyaaf | example/aaf2xml.py | Python | mit | 281 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pprof_profiler."""
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEqual(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEqual(expected_proto, str(profile))
@test_util.run_v1_only('b/120545219')
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.cached_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEqual(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEqual(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEqual(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/python/profiler/pprof_profiler_test.py | Python | apache-2.0 | 5,145 | 0.005442 |
# ElasticQuery
# File: setup.py
# Desc: needed
from setuptools import setup
if __name__ == '__main__':
setup(
version='3.2',
name='ElasticQuery',
description='A simple query builder for Elasticsearch 2',
author='Nick Barrett',
author_email='pointlessrambler@gmail.com',
url='http://github.com/Fizzadar/ElasticQuery',
package_dir={
'ElasticQuery': 'elasticquery',
},
packages=[
'elasticquery',
],
install_requires=['six>=1.4.0'],
)
| Fizzadar/ElasticQuery | setup.py | Python | mit | 554 | 0 |
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision: 2224 $
# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': u'F\u00f6rfattare',
'authors': u'F\u00f6rfattare',
'organization': u'Organisation',
'address': u'Adress',
'contact': u'Kontakt',
'version': u'Version',
'revision': u'Revision',
'status': u'Status',
'date': u'Datum',
'copyright': u'Copyright',
'dedication': u'Dedikation',
'abstract': u'Sammanfattning',
'attention': u'Observera!',
'caution': u'Varning!',
'danger': u'FARA!',
'error': u'Fel',
'hint': u'V\u00e4gledning',
'important': u'Viktigt',
'note': u'Notera',
'tip': u'Tips',
'warning': u'Varning',
'contents': u'Inneh\u00e5ll' }
"""Mapping of node class name to label text."""
bibliographic_fields = {
# 'Author' and 'Authors' identical in Swedish; assume the plural:
u'f\u00f6rfattare': 'authors',
u' n/a': 'author',
u'organisation': 'organization',
u'adress': 'address',
u'kontakt': 'contact',
u'version': 'version',
u'revision': 'revision',
u'status': 'status',
u'datum': 'date',
u'copyright': 'copyright',
u'dedikation': 'dedication',
u'sammanfattning': 'abstract' }
"""Swedish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| jmchilton/galaxy-central | modules/docutils/languages/sv.py | Python | mit | 2,135 | 0.001405 |
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_svm
short_description: Manage NetApp ONTAP svm
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- Create, modify or delete svm on NetApp ONTAP
options:
state:
description:
- Whether the specified SVM should exist or not.
choices: ['present', 'absent']
default: 'present'
name:
description:
- The name of the SVM to manage.
required: true
from_name:
description:
- Name of the SVM to be renamed
version_added: '2.7'
root_volume:
description:
- Root volume of the SVM.
- Cannot be modified after creation.
root_volume_aggregate:
description:
- The aggregate on which the root volume will be created.
- Cannot be modified after creation.
root_volume_security_style:
description:
- Security Style of the root volume.
- When specified as part of the vserver-create,
this field represents the security style for the Vserver root volume.
- When specified as part of vserver-get-iter call,
this will return the list of matching Vservers.
- The 'unified' security style, which applies only to Infinite Volumes,
cannot be applied to a Vserver's root volume.
- Cannot be modified after creation.
choices: ['unix', 'ntfs', 'mixed', 'unified']
allowed_protocols:
description:
- Allowed Protocols.
- When specified as part of a vserver-create,
this field represent the list of protocols allowed on the Vserver.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the protocols specified
as part of the allowed-protocols.
- When part of vserver-modify,
this field should include the existing list
along with new protocol list to be added to prevent data disruptions.
- Possible values
- nfs NFS protocol,
- cifs CIFS protocol,
- fcp FCP protocol,
- iscsi iSCSI protocol,
- ndmp NDMP protocol,
- http HTTP protocol,
- nvme NVMe protocol
aggr_list:
description:
- List of aggregates assigned for volume operations.
- These aggregates could be shared for use with other Vservers.
- When specified as part of a vserver-create,
this field represents the list of aggregates
that are assigned to the Vserver for volume operations.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the aggregates specified as part of the aggr-list.
ipspace:
description:
- IPSpace name
- Cannot be modified after creation.
version_added: '2.7'
snapshot_policy:
description:
- Default snapshot policy setting for all volumes of the Vserver.
This policy will be assigned to all volumes created in this
Vserver unless the volume create request explicitly provides a
snapshot policy or volume is modified later with a specific
snapshot policy. A volume-level snapshot policy always overrides
the default Vserver-wide snapshot policy.
version_added: '2.7'
language:
description:
- Language to use for the SVM
- Default to C.UTF-8
- Possible values Language
- c POSIX
- ar Arabic
- cs Czech
- da Danish
- de German
- en English
- en_us English (US)
- es Spanish
- fi Finnish
- fr French
- he Hebrew
- hr Croatian
- hu Hungarian
- it Italian
- ja Japanese euc-j
- ja_v1 Japanese euc-j
- ja_jp.pck Japanese PCK (sjis)
- ja_jp.932 Japanese cp932
- ja_jp.pck_v2 Japanese PCK (sjis)
- ko Korean
- no Norwegian
- nl Dutch
- pl Polish
- pt Portuguese
- ro Romanian
- ru Russian
- sk Slovak
- sl Slovenian
- sv Swedish
- tr Turkish
- zh Simplified Chinese
- zh.gbk Simplified Chinese (GBK)
- zh_tw Traditional Chinese euc-tw
- zh_tw.big5 Traditional Chinese Big 5
version_added: '2.7'
subtype:
description:
- The subtype for vserver to be created.
- Cannot be modified after creation.
choices: ['default', 'dp_destination', 'sync_source', 'sync_destination']
version_added: '2.7'
'''
EXAMPLES = """
- name: Create SVM
na_ontap_svm:
state: present
name: ansibleVServer
root_volume: vol1
root_volume_aggregate: aggr1
root_volume_security_style: mixed
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapSVM(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
name=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
root_volume=dict(type='str'),
root_volume_aggregate=dict(type='str'),
root_volume_security_style=dict(type='str', choices=['unix',
'ntfs',
'mixed',
'unified'
]),
allowed_protocols=dict(type='list'),
aggr_list=dict(type='list'),
ipspace=dict(type='str', required=False),
snapshot_policy=dict(type='str', required=False),
language=dict(type='str', required=False),
subtype=dict(choices=['default', 'dp_destination', 'sync_source', 'sync_destination'])
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.from_name = p['from_name']
self.root_volume = p['root_volume']
self.root_volume_aggregate = p['root_volume_aggregate']
self.root_volume_security_style = p['root_volume_security_style']
self.allowed_protocols = p['allowed_protocols']
self.aggr_list = p['aggr_list']
self.language = p['language']
self.ipspace = p['ipspace']
self.snapshot_policy = p['snapshot_policy']
self.subtype = p['subtype']
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_vserver(self, vserver_name=None):
"""
Checks if vserver exists.
:return:
vserver object if vserver found
None if vserver is not found
:rtype: object/None
"""
if vserver_name is None:
vserver_name = self.name
vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-info', **{'vserver-name': vserver_name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
vserver_info.add_child_elem(query)
result = self.server.invoke_successfully(vserver_info,
enable_tunneling=False)
vserver_details = None
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
attributes_list = result.get_child_by_name('attributes-list')
vserver_info = attributes_list.get_child_by_name('vserver-info')
aggr_list = list()
''' vserver aggr-list can be empty by default'''
get_list = vserver_info.get_child_by_name('aggr-list')
if get_list is not None:
aggregates = get_list.get_children()
for aggr in aggregates:
aggr_list.append(aggr.get_content())
protocols = list()
'''allowed-protocols is not empty by default'''
get_protocols = vserver_info.get_child_by_name(
'allowed-protocols').get_children()
for protocol in get_protocols:
protocols.append(protocol.get_content())
vserver_details = {'name': vserver_info.get_child_content('vserver-name'),
'root_volume': vserver_info.get_child_content('root-volume'),
'root_volume_aggregate': vserver_info.get_child_content('root-volume-aggregate'),
'root_volume_security_style': vserver_info.get_child_content('root-volume-security-style'),
'subtype': vserver_info.get_child_content('vserver-subtype'),
'aggr_list': aggr_list,
'language': vserver_info.get_child_content('language'),
'snapshot_policy': vserver_info.get_child_content('snapshot-policy'),
'allowed_protocols': protocols}
return vserver_details
def create_vserver(self):
options = {'vserver-name': self.name, 'root-volume': self.root_volume}
if self.root_volume_aggregate is not None:
options['root-volume-aggregate'] = self.root_volume_aggregate
if self.root_volume_security_style is not None:
options['root-volume-security-style'] = self.root_volume_security_style
if self.language is not None:
options['language'] = self.language
if self.ipspace is not None:
options['ipspace'] = self.ipspace
if self.snapshot_policy is not None:
options['snapshot-policy'] = self.snapshot_policy
if self.subtype is not None:
options['vserver-subtype'] = self.subtype
vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-create', **options)
try:
self.server.invoke_successfully(vserver_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning SVM %s \
with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume,
self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def delete_vserver(self):
vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-destroy', **{'vserver-name': self.name})
try:
self.server.invoke_successfully(vserver_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting SVM %s \
with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume,
self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def rename_vserver(self):
vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-rename', **{'vserver-name': self.from_name,
'new-name': self.name})
try:
self.server.invoke_successfully(vserver_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming SVM %s: %s'
% (self.name, to_native(e)),
exception=traceback.format_exc())
def modify_vserver(self, allowed_protocols, aggr_list, language, snapshot_policy):
options = {'vserver-name': self.name}
if language:
options['language'] = self.language
if snapshot_policy:
options['snapshot-policy'] = self.snapshot_policy
vserver_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-modify', **options)
if allowed_protocols:
allowed_protocols = netapp_utils.zapi.NaElement(
'allowed-protocols')
for protocol in self.allowed_protocols:
allowed_protocols.add_new_child('protocol', protocol)
vserver_modify.add_child_elem(allowed_protocols)
if aggr_list:
aggregates = netapp_utils.zapi.NaElement('aggr-list')
for aggr in self.aggr_list:
aggregates.add_new_child('aggr-name', aggr)
vserver_modify.add_child_elem(aggregates)
try:
self.server.invoke_successfully(vserver_modify,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error modifying SVM %s: %s'
% (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
vserver_details = self.get_vserver()
# These are being commentted out as part of bugfix 595.
# if vserver_details is not None:
# results = netapp_utils.get_cserver(self.server)
# cserver = netapp_utils.setup_ontap_zapi(
# module=self.module, vserver=results)
# netapp_utils.ems_log_event("na_ontap_svm", cserver)
rename_vserver = False
modify_protocols = False
modify_aggr_list = False
modify_snapshot_policy = False
modify_language = False
if vserver_details is not None:
if self.state == 'absent':
changed = True
elif self.state == 'present':
# SVM is present, is it a modify?
if self.allowed_protocols is not None:
self.allowed_protocols.sort()
vserver_details['allowed_protocols'].sort()
if self.allowed_protocols != vserver_details['allowed_protocols']:
modify_protocols = True
changed = True
if self.aggr_list is not None:
self.aggr_list.sort()
vserver_details['aggr_list'].sort()
if self.aggr_list != vserver_details['aggr_list']:
modify_aggr_list = True
changed = True
if self.snapshot_policy is not None:
if self.snapshot_policy != vserver_details['snapshot_policy']:
modify_snapshot_policy = True
changed = True
if self.language is not None:
if self.language != vserver_details['language']:
modify_language = True
changed = True
if self.root_volume is not None and self.root_volume != vserver_details['root_volume']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change root volume'))
if self.root_volume_aggregate is not None and self.root_volume_aggregate != vserver_details['root_volume_aggregate']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change root volume aggregate'))
if self.root_volume_security_style is not None and self.root_volume_security_style != vserver_details['root_volume_security_style']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change root volume security style'))
if self.subtype is not None and self.subtype != vserver_details['subtype']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change subtype'))
if self.ipspace is not None and self.ipspace != vserver_details['ipspace']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change ipspace'))
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if vserver_details is None:
# create or rename
if self.from_name is not None and self.get_vserver(self.from_name):
self.rename_vserver()
else:
self.create_vserver()
else:
if modify_protocols or modify_aggr_list:
self.modify_vserver(
modify_protocols, modify_aggr_list, modify_language, modify_snapshot_policy)
elif self.state == 'absent':
self.delete_vserver()
self.module.exit_json(changed=changed)
def main():
v = NetAppOntapSVM()
v.apply()
if __name__ == '__main__':
main()
| trondhindenes/ansible | lib/ansible/modules/storage/netapp/na_ontap_svm.py | Python | gpl-3.0 | 18,691 | 0.001498 |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem303.py
#
# Multiples with small digits
# ===========================
# Published on Saturday, 25th September 2010, 10:00 pm
#
# For a positive integer n, define f(n) as the least positive multiple of n
# that, written in base 10, uses only digits 2. Thus f(2)=2, f(3)=12, f(7)=21,
# f(42)=210, f(89)=1121222. Also, . Find .
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| olduvaihand/ProjectEuler | src/python/problem303.py | Python | mit | 475 | 0.004228 |
import logging, os, marshal, json, cPickle, time, copy, time, datetime, re, urllib, httplib
from base64 import b64encode, b64decode
from lib.crypt import encrypt, decrypt
from uuid import uuid4
from node import Node, InvalidIdentity
class FriendNode(Node):
def __init__(self, *args, **kwargs):
if 'identityData' in kwargs:
identityData = kwargs['identityData']
else:
identityData = args[0]
kwargs['identityData'] = identityData
try:
newIdentity = args[1]
except:
newIdentity = None
if type(kwargs['identityData']) == type(u'') or type(kwargs['identityData']) == type(''):
identityData = self.getManagedNode(kwargs['identityData'])
elif type(kwargs['identityData']) == type({}):
identityData = kwargs['identityData']
else:
raise InvalidIdentity("A valid server Identity was not given nor was a public_key specified.")
super(FriendNode, self).__init__(*args, **kwargs)
self.set('routed_public_key', kwargs['acceptor']['public_key'], True)
self.set('source_indexer_key', kwargs['requester']['public_key'], True)
if 'connector' in kwargs:
self.set('public_key', kwargs['connector']['public_key'])
self.set('private_key', kwargs['connector']['private_key'])
self.setModifiedToNow()
def validIdentity(self, data):
try:
if 'public_key' in data \
and 'private_key' in data \
and 'modified' in data \
and 'data' in data \
and 'friends' in data['data'] \
and 'identity' in data['data'] \
and 'name' in data['data']['identity'] \
and 'avatar' in data['data']['identity']:
return True
else:
raise InvalidIdentity("invalid identity dictionary for identity")
except InvalidIdentity:
raise
class RoutedFriendNode(FriendNode):
def __init__(self, *args, **kwargs):
if 'identityData' in kwargs:
identityData = kwargs['identityData']
else:
identityData = args[0]
kwargs['identityData'] = identityData
try:
newIdentity = args[1]
except:
newIdentity = None
if type(kwargs['identityData']) == type(u'') or type(kwargs['identityData']) == type(''):
identityData = self.getFriend(kwargs['identityData'])
elif type(kwargs['identityData']) == type({}):
identityData = kwargs['identityData']
else:
raise InvalidIdentity("A valid server Identity was not given nor was a public_key specified.")
super(RoutedFriendNode, self).__init__(*args, **kwargs)
def validIdentity(self, data):
try:
if 'public_key' in data \
and 'private_key' in data \
and 'source_indexer_key' in data \
and 'routed_public_key' in data \
and 'modified' in data \
and 'data' in data \
and 'friends' in data['data'] \
and 'identity' in data['data'] \
and 'name' in data['data']['identity']:
return True
else:
raise InvalidIdentity("invalid identity dictionary for identity")
except InvalidIdentity:
raise
| pdxwebdev/yadapy | yadapy/friendnode.py | Python | gpl-3.0 | 3,507 | 0.013117 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""music processing module
A module for running the MUltiple SIgnal Classification (MUSIC) algorithm for the detection of
MSTIDs and wave-like structures in SuperDARN data.
For usage examples, please see the iPython notebooks included in the docs folder of the DaViTPy distribution.
References
----------
See Samson et al. [1990] and Bristow et al. [1994] for details regarding the MUSIC algorithm and SuperDARN-observed MSTIDs.
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Samson, J. C., R. A. Greenwald, J. M. Ruohoniemi, A. Frey, and K. B. Baker (1990), Goose Bay radar observations of Earth-reflected,
atmospheric gravity waves in the high-latitude ionosphere, J. Geophys. Res., 95(A6), 7693-7709, doi:10.1029/JA095iA06p07693.
Module author:: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------------------------------------------------------------------------------
getDataSet get music data object from music array object
stringify_signal convert dictionary to a string
stringify_signal_list convert list of dictionaries into strings
beamInterpolation interpolate music array object along beams
defineLimits set limits for chosen data set
checkDataQuality mark data as bad base on radar operations
applyLimits remove data outside of limits
determineRelativePosition find center of cell in music array object
timeInterpolation interpolate music array object along time
filterTimes calculate time range for data set
detrend linear detrend of music array/data object
nan_to_num convert undefined numbers to finite numbers
windowData apply window to music array object
calculateFFT calculate spectrum of an object
calculateDlm calculate the cross-spectral matrix of a musicArray/musicDataObj object.
calculateKarr calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
simulator insert a simulated MSTID into the processing chain.
scale_karr scale/normalize kArr for plotting and signal detection.
detectSignals detect local maxima of signals
add_signal add signal to detected signal list
del_signal remove signal from detected signal list
--------------------------------------------------------------------------------------------------------------------------
Classes
-----------------------------------------------------------
emptyObj create an empty object
SigDetect information about detected signals
musicDataObj basic container for holding MUSIC data.
musicArray container object for holding musicDataObj's
filter a filter object for VT sig/siStruct objects
-----------------------------------------------------------
"""
import numpy as np
import datetime
import time
import copy
import logging
Re = 6378 #Earth radius
def getDataSet(dataObj,dataSet='active'):
"""Returns a specified musicDataObj from a musicArray object. If the musicArray object has the exact attribute
specified in the dataSet keyword, then that attribute is returned. If not, all attributes of the musicArray object
will be searched for attributes which contain the string specified in the dataSet keyword. If more than one are
found, the last attribute of a sorted list will be returned. If no attributes are found which contain the specified
string, the 'active' dataSet is returned.
Parameters
----------
dataObj : musicArray
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj object
Written by Nathaniel A. Frissell, Fall 2013
"""
lst = dir(dataObj)
if dataSet not in lst:
tmp = []
for item in lst:
if dataSet in item:
tmp.append(item)
if len(tmp) == 0:
dataSet = 'active'
else:
tmp.sort()
dataSet = tmp[-1]
currentData = getattr(dataObj,dataSet)
return currentData
class emptyObj(object):
"""Create an empty object.
"""
def __init__(self):
pass
def stringify_signal(sig):
"""Method to convert a signal information dictionary into a string.
Parameters
----------
sig : dict
Information about a detected signal.
Returns
-------
sigInfo : str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
sigInfo = {}
if sig.has_key('order'):
sigInfo['order'] = '%d' % sig['order'] #Order of signals by strength as detected by image detection algorithm
if sig.has_key('kx'):
sigInfo['kx'] = '%.5f' % sig['kx']
if sig.has_key('ky'):
sigInfo['ky'] = '%.5f' % sig['ky']
if sig.has_key('k'):
sigInfo['k'] = '%.3f' % sig['k']
if sig.has_key('lambda'):
if np.isinf(sig['lambda']):
sigInfo['lambda'] = 'inf'
else:
sigInfo['lambda'] = '%d' % np.round(sig['lambda']) # km
if sig.has_key('lambda_x'):
if np.isinf(sig['lambda_x']):
sigInfo['lambda_x'] = 'inf'
else:
sigInfo['lambda_x'] = '%d' % np.round(sig['lambda_x']) # km
if sig.has_key('lambda_y'):
if np.isinf(sig['lambda_y']):
sigInfo['lambda_y'] = 'inf'
else:
sigInfo['lambda_y'] = '%d' % np.round(sig['lambda_y']) # km
if sig.has_key('azm'):
sigInfo['azm'] = '%d' % np.round(sig['azm']) # degrees
if sig.has_key('freq'):
sigInfo['freq'] = '%.2f' % (sig['freq']*1000.) # mHz
if sig.has_key('period'):
sigInfo['period'] = '%d' % np.round(sig['period']/60.) # minutes
if sig.has_key('vel'):
if np.isinf(np.round(sig['vel'])):
sigInfo['vel'] = 'Inf'
else:
sigInfo['vel'] = '%d' % np.round(sig['vel']) # km/s
if sig.has_key('area'):
sigInfo['area'] = '%d' % sig['area'] # Pixels
if sig.has_key('max'):
sigInfo['max'] = '%.4f' % sig['max'] # Value from kArr in arbitrary units, probably with some normalization
if sig.has_key('maxpos'):
sigInfo['maxpos'] = str(sig['maxpos']) # Index position in kArr of maximum value.
if sig.has_key('labelInx'):
sigInfo['labelInx'] = '%d' % sig['labelInx'] # Label value from image processing
if sig.has_key('serialNr'):
sigInfo['serialNr'] = '%d' % sig['serialNr'] # Label value from image processing
return sigInfo
def stringify_signal_list(signal_list,sort_key='order'):
"""Method to convert a list of signal dictionaries into strings.
Parameters
----------
signal_list : list of dict
Information about a detected signal.
sort_key : Optional[string]
Dictionary key to sort on, or None for no sort. 'order' will sort the signal list
from strongest signal to weakest, as determined by the MUSIC algorithm.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
string_info = []
if sort_key is not None:
orders = [x[sort_key] for x in signal_list]
orders.sort()
for order in orders:
for sig in signal_list:
if sig[sort_key] == order:
string_info.append(stringify_signal(sig))
signal_list.remove(sig)
else:
for sig in signal_list:
string_info.append(stringify_signal(sig))
return string_info
class SigDetect(object):
"""Class to hold information about detected signals.
Methods
-------
string
reorder
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self):
pass
def string(self):
"""Method to convert a list of signal dictionaries into strings.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
return stringify_signal_list(self.info)
def reorder(self):
"""Method to sort items in .info by signal maximum value (from the scaled kArr) and update nrSignals.
Written by Nathaniel A. Frissell, Fall 2013
"""
#Do the sorting...
from operator import itemgetter
newlist = sorted(self.info,key=itemgetter('max'),reverse=True)
#Put in the order numbers...
order = 1
for item in newlist:
item['order'] = order
order = order + 1
#Save the list to the dataObj...
self.info = newlist
#Update the nrSigs
self.nrSigs = len(newlist)
class musicDataObj(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
time : list of datetime.datetime
list of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
comment : Optional[str]
String to be appended to the history of this object
parent : Optional[musicArray]
reference to parent musicArray object
**metadata
keywords sent to matplot lib, etc.
Attributes
----------
time : numpy.array of datetime.datetime
numpy array of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
metadata : dict
keywords sent to matplot lib, etc.
history : dict
Methods
---------
copy
setActive
nyquistFrequency
samplePeriod
applyLimits
setMetadata
printMetadata
appendHistory
printHistory
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, time, data, fov=None, comment=None, parent=0, **metadata):
self.parent = parent
self.time = np.array(time)
self.data = np.array(data)
self.fov = fov
self.metadata = {}
for key in metadata: self.metadata[key] = metadata[key]
self.history = {datetime.datetime.now():comment}
def copy(self,newsig,comment):
"""Copy a musicDataObj object. This deep copies data and metadata, updates the serial
number, and logs a comment in the history. Methods such as plot are kept as a reference.
Parameters
----------
newsig : str
Name for the new musicDataObj object.
comment : str
Comment describing the new musicDataObj object.
Returns
-------
newsigobj : musicDataObj
Copy of the original musicDataObj with new name and history entry.
Written by Nathaniel A. Frissell, Fall 2013
"""
serial = self.metadata['serial'] + 1
newsig = '_'.join(['DS%03d' % serial,newsig])
setattr(self.parent,newsig,copy.copy(self))
newsigobj = getattr(self.parent,newsig)
newsigobj.time = copy.deepcopy(self.time)
newsigobj.data = copy.deepcopy(self.data)
newsigobj.fov = copy.deepcopy(self.fov)
newsigobj.metadata = copy.deepcopy(self.metadata)
newsigobj.history = copy.deepcopy(self.history)
newsigobj.metadata['dataSetName'] = newsig
newsigobj.metadata['serial'] = serial
newsigobj.history[datetime.datetime.now()] = '['+newsig+'] '+comment
return newsigobj
def setActive(self):
"""Sets this signal as the currently active signal.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.parent.active = self
def nyquistFrequency(self,timeVec=None):
"""Calculate the Nyquist frequency of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
nq : float
Nyquist frequency of the signal in Hz.
Written by Nathaniel A. Frissell, Fall 2013
"""
dt = self.samplePeriod(timeVec=timeVec)
nyq = float(1. / (2*dt))
return nyq
def samplePeriod(self,timeVec=None):
"""Calculate the sample period of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
samplePeriod : float
samplePeriod: sample period of signal in seconds.
Written by Nathaniel A. Frissell, Fall 2013
"""
if timeVec == None: timeVec = self.time
diffs = np.diff(timeVec)
diffs_unq = np.unique(diffs)
self.diffs = diffs_unq
if len(diffs_unq) == 1:
samplePeriod = diffs[0].total_seconds()
else:
diffs_sec = np.array([x.total_seconds() for x in diffs])
maxDt = np.max(diffs_sec)
avg = np.mean(diffs_sec)
md = self.metadata
warn = 'WARNING'
if md.has_key('title'): warn = ' '.join([warn,'FOR','"'+md['title']+'"'])
logging.warning(warn + ':')
logging.warning(' Date time vector is not regularly sampled!')
logging.warning(' Maximum difference in sampling rates is ' + str(maxDt) + ' sec.')
logging.warning(' Using average sampling period of ' + str(avg) + ' sec.')
samplePeriod = avg
import ipdb; ipdb.set_trace()
return samplePeriod
def applyLimits(self,rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment='Limits Applied'):
"""Removes data outside of the rangeLimits, gateLimits, and timeLimits boundaries.
Parameters
----------
rangeLimits : Optional[interable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
timeLimits : Optional[]
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Returns
-------
newMusicDataObj : musicDataObj
New musicDataObj. The musicDataObj is also stored in it's parent musicArray object.
Written by Nathaniel A. Frissell, Fall 2013
"""
return applyLimits(self.parent,self.metadata['dataSetName'],rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits,newDataSetName=newDataSetName,comment=comment)
def setMetadata(self,**metadata):
"""Adds information to the current musicDataObj's metadata dictionary.
Metadata affects various plotting parameters and signal processing routinges.
Parameters
----------
**metadata :
keywords sent to matplot lib, etc.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.metadata = dict(self.metadata.items() + metadata.items())
def printMetadata(self):
"""Nicely print all of the metadata associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.metadata.keys()
keys.sort()
for key in keys:
print key+':',self.metadata[key]
def appendHistory(self,comment):
"""Add an entry to the processing history dictionary of the current musicDataObj object.
Parameters
----------
comment : string
Infomation to add to history dictionary.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.history[datetime.datetime.now()] = '['+self.metadata['dataSetName']+'] '+comment
def printHistory(self):
"""Nicely print all of the processing history associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.history.keys()
keys.sort()
for key in keys:
print key,self.history[key]
class musicArray(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
myPtr : pydarn.sdio.radDataTypes.radDataPtr
contains the pipeline to the data we are after
sTime : Optional[datetime.datetime]
start time UT (if None myPtr.sTime is used)
eTime : Optional[datetime.datetime]
end time UT (if None myPtr.eTime is used)
param : Optional[str]
Radar FIT parameter to load and process. Any appropriate attribute of the
FIT data structure is allowed.
gscat : Optional[int]
Ground scatter flag.
0: all backscatter data
1: ground backscatter only
2: ionospheric backscatter only
3: all backscatter data with a ground backscatter flag.
fovElevation : Optional[float]
Passed directly to pydarn.radar.radFov.fov()
fovModel : Optional[str]
Scatter mapping model.
'GS': Ground Scatter Mapping Model. See Bristow et al. [1994]
'IS': Standard SuperDARN scatter mapping model.
fovCoords : Optional[str]
Map coordinate system. WARNING: 'geo' is curently only tested coordinate system.
full_array : Optional[bool]
If True, make the data array the full beam, gate dimensions listed in the hdw.dat file.
If False, truncate the array to the maximum dimensions that there is actually data.
False will save space without throwing out any data, but sometimes it is easier to work
with the full-size array.
Attributes
----------
messages : list
prm :
Methods
-------
get_data_sets
Example
-------
#Set basic event parameters.
rad ='wal'
sTime = datetime.datetime(2011,5,9,8,0)
eTime = datetime.datetime(2011,5,9,19,0)
#Connect to a SuperDARN data source.
myPtr = pydarn.sdio.radDataOpen(sTime,rad,eTime=eTime)
#Create the musicArray Object.
dataObj = music.musicArray(myPtr,fovModel='GS')
References
----------
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,myPtr,sTime=None,eTime=None,param='p_l',gscat=1,
fovElevation=None,fovModel='GS',fovCoords='geo',full_array=False):
from davitpy import pydarn
# Create a list that can be used to store top-level messages.
self.messages = []
no_data_message = 'No data for this time period.'
# If no data, report and return.
if myPtr is None:
self.messages.append(no_data_message)
return
if sTime == None: sTime = myPtr.sTime
if eTime == None: eTime = myPtr.eTime
scanTimeList = []
dataList = []
cpidList = []
#Subscripts of columns in the dataList/dataArray
scanInx = 0
dateInx = 1
beamInx = 2
gateInx = 3
dataInx = 4
beamTime = sTime
scanNr = np.uint64(0)
fov = None
# Create a place to store the prm data.
prm = emptyObj()
prm.time = []
prm.mplgs = []
prm.nave = []
prm.noisesearch = []
prm.scan = []
prm.smsep = []
prm.mplgexs = []
prm.xcf = []
prm.noisesky = []
prm.rsep = []
prm.mppul = []
prm.inttsc = []
prm.frang = []
prm.bmazm = []
prm.lagfr = []
prm.ifmode = []
prm.noisemean = []
prm.tfreq = []
prm.inttus = []
prm.rxrise = []
prm.mpinc = []
prm.nrang = []
while beamTime < eTime:
#Load one scan into memory.
# myScan = pydarn.sdio.radDataRead.radDataReadScan(myPtr)
myScan = myPtr.readScan()
if myScan == None: break
goodScan = False # This flag turns to True as soon as good data is found for the scan.
for myBeam in myScan:
#Calculate the field of view if it has not yet been calculated.
if fov == None:
radStruct = pydarn.radar.radStruct.radar(radId=myPtr.stid)
site = pydarn.radar.radStruct.site(radId=myPtr.stid,dt=sTime)
fov = pydarn.radar.radFov.fov(frang=myBeam.prm.frang, rsep=myBeam.prm.rsep, site=site,elevation=fovElevation,model=fovModel,coords=fovCoords)
#Get information from each beam in the scan.
beamTime = myBeam.time
bmnum = myBeam.bmnum
# Save all of the radar operational parameters.
prm.time.append(beamTime)
prm.mplgs.append(myBeam.prm.mplgs)
prm.nave.append(myBeam.prm.nave)
prm.noisesearch.append(myBeam.prm.noisesearch)
prm.scan.append(myBeam.prm.scan)
prm.smsep.append(myBeam.prm.smsep)
prm.mplgexs.append(myBeam.prm.mplgexs)
prm.xcf.append(myBeam.prm.xcf)
prm.noisesky.append(myBeam.prm.noisesky)
prm.rsep.append(myBeam.prm.rsep)
prm.mppul.append(myBeam.prm.mppul)
prm.inttsc.append(myBeam.prm.inttsc)
prm.frang.append(myBeam.prm.frang)
prm.bmazm.append(myBeam.prm.bmazm)
prm.lagfr.append(myBeam.prm.lagfr)
prm.ifmode.append(myBeam.prm.ifmode)
prm.noisemean.append(myBeam.prm.noisemean)
prm.tfreq.append(myBeam.prm.tfreq)
prm.inttus.append(myBeam.prm.inttus)
prm.rxrise.append(myBeam.prm.rxrise)
prm.mpinc.append(myBeam.prm.mpinc)
prm.nrang.append(myBeam.prm.nrang)
#Get the fitData.
fitDataList = getattr(myBeam.fit,param)
slist = getattr(myBeam.fit,'slist')
gflag = getattr(myBeam.fit,'gflg')
if len(slist) > 1:
for (gate,data,flag) in zip(slist,fitDataList,gflag):
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
elif len(slist) == 1:
gate,data,flag = (slist[0],fitDataList[0],gflag[0])
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
else:
continue
if goodScan:
#Determine the start time for each scan and save to list.
scanTimeList.append(min([x.time for x in myScan]))
#Advance to the next scan number.
scanNr = scanNr + 1
#Convert lists to numpy arrays.
timeArray = np.array(scanTimeList)
dataListArray = np.array(dataList)
# If no data, report and return.
if dataListArray.size == 0:
self.messages.append(no_data_message)
return
#Figure out what size arrays we need and initialize the arrays...
nrTimes = np.max(dataListArray[:,scanInx]) + 1
if full_array:
nrBeams = fov.beams.max() + 1
nrGates = fov.gates.max() + 1
else:
nrBeams = np.max(dataListArray[:,beamInx]) + 1
nrGates = np.max(dataListArray[:,gateInx]) + 1
#Make sure the FOV is the same size as the data array.
if len(fov.beams) != nrBeams:
fov.beams = fov.beams[0:nrBeams]
fov.latCenter = fov.latCenter[0:nrBeams,:]
fov.lonCenter = fov.lonCenter[0:nrBeams,:]
fov.slantRCenter = fov.slantRCenter[0:nrBeams,:]
fov.latFull = fov.latFull[0:nrBeams+1,:]
fov.lonFull = fov.lonFull[0:nrBeams+1,:]
fov.slantRFull = fov.slantRFull[0:nrBeams+1,:]
if len(fov.gates) != nrGates:
fov.gates = fov.gates[0:nrGates]
fov.latCenter = fov.latCenter[:,0:nrGates]
fov.lonCenter = fov.lonCenter[:,0:nrGates]
fov.slantRCenter = fov.slantRCenter[:,0:nrGates]
fov.latFull = fov.latFull[:,0:nrGates+1]
fov.lonFull = fov.lonFull[:,0:nrGates+1]
fov.slantRFull = fov.slantRFull[:,0:nrGates+1]
#Convert the dataListArray into a 3 dimensional array.
dataArray = np.ndarray([nrTimes,nrBeams,nrGates])
dataArray[:] = np.nan
for inx in range(len(dataListArray)):
dataArray[dataListArray[inx,scanInx],dataListArray[inx,beamInx],dataListArray[inx,gateInx]] = dataListArray[inx,dataInx]
#Make metadata block to hold information about the processing.
metadata = {}
metadata['dType'] = myPtr.dType
metadata['stid'] = myPtr.stid
metadata['name'] = radStruct.name
metadata['code'] = radStruct.code
metadata['fType'] = myPtr.fType
metadata['cp'] = myPtr.cp
metadata['channel'] = myPtr.channel
metadata['sTime'] = sTime
metadata['eTime'] = eTime
metadata['param'] = param
metadata['gscat'] = gscat
metadata['elevation'] = fovElevation
metadata['model'] = fovModel
metadata['coords'] = fovCoords
dataSet = 'DS000_originalFit'
metadata['dataSetName'] = dataSet
metadata['serial'] = 0
comment = '['+dataSet+'] '+ 'Original Fit Data'
#Save data to be returned as self.variables
setattr(self,dataSet,musicDataObj(timeArray,dataArray,fov=fov,parent=self,comment=comment))
newSigObj = getattr(self,dataSet)
setattr(newSigObj,'metadata',metadata)
#Set the new data active.
newSigObj.setActive()
#Make prm data part of the object.
self.prm = prm
def get_data_sets(self):
"""Return a sorted list of musicDataObj's contained in this musicArray.
Returns
-------
dataSets : list of str
Names of musicDataObj's contained in this musicArray.
Written by Nathaniel A. Frissell, Fall 2013
"""
attrs = dir(self)
dataSets = []
for item in attrs:
if item.startswith('DS'):
dataSets.append(item)
dataSets.sort()
return dataSets
def beamInterpolation(dataObj,dataSet='active',newDataSetName='beamInterpolated',comment='Beam Linear Interpolation'):
"""Interpolates the data in a musicArray object along the beams of the radar. This method will ensure that no
rangegates are missing data. Ranges outside of metadata['gateLimits'] will be set to 0.
The result is stored as a new musicDataObj in the given musicArray object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
currentData = getDataSet(dataObj,dataSet)
nrTimes = len(currentData.time)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for tt in range(nrTimes):
for bb in range(nrBeams):
rangeVec = currentData.fov.slantRCenter[bb,:]
input_x = copy.copy(rangeVec)
input_y = currentData.data[tt,bb,:]
#If metadata['gateLimits'], select only those measurements...
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates <= limits[1]))[0]
if len(gateInx) < 2: continue
input_x = input_x[gateInx]
input_y = input_y[gateInx]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
intFn = interp1d(input_x,input_y,bounds_error=False,fill_value=0)
interpArr[tt,bb,:] = intFn(rangeVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = interpArr
newDataSet.setActive()
def defineLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,beamLimits=None,timeLimits=None):
"""Sets the range, gate, beam, and time limits for the chosen data set. This method only changes metadata;
it does not create a new data set or alter the data in any way. If you specify rangeLimits, they will be changed to correspond
with the center value of the range cell. Gate limits always override range limits.
Use the applyLimits() method to remove data outside of the data limits.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
try:
if (rangeLimits != None) or (gateLimits != None):
if (rangeLimits != None) and (gateLimits == None):
inx = np.where(np.logical_and(currentData.fov.slantRCenter >= rangeLimits[0],currentData.fov.slantRCenter <= rangeLimits[1]))
gateLimits = [np.min(inx[1][:]),np.max(inx[1][:])]
if gateLimits != None:
rangeMin = np.int(np.min(currentData.fov.slantRCenter[:,gateLimits[0]]))
rangeMax = np.int(np.max(currentData.fov.slantRCenter[:,gateLimits[1]]))
rangeLimits = [rangeMin,rangeMax]
currentData.metadata['gateLimits'] = gateLimits
currentData.metadata['rangeLimits'] = rangeLimits
if beamLimits != None:
currentData.metadata['beamLimits'] = beamLimits
if timeLimits != None:
currentData.metadata['timeLimits'] = timeLimits
except:
logging.warning("An error occured while defining limits. No limits set. Check your input values.")
def checkDataQuality(dataObj,dataSet='active',max_off_time=10,sTime=None,eTime=None):
"""Mark the data set as bad (metadata['good_period'] = False) if the radar was not operational within the chosen time period
for a specified length of time.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
max_off_time : Optional[int/float]
Maximum length in minutes radar may remain off.
sTime : Optional[datetime.datetime]
Starting time of checking period. If None, min(currentData.time) is used.
eTime : Optional[datetime.datetime]
End time of checking period. If None, max(currentData.time) is used.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
if sTime is None:
sTime = np.min(currentData.time)
if eTime is None:
eTime = np.max(currentData.time)
time_vec = currentData.time[np.logical_and(currentData.time > sTime, currentData.time < eTime)]
time_vec = np.concatenate(([sTime],time_vec,[eTime]))
max_diff = np.max(np.diff(time_vec))
if max_diff > datetime.timedelta(minutes=max_off_time):
currentData.setMetadata(good_period=False)
else:
currentData.setMetadata(good_period=True)
return dataObj
def applyLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment=None):
"""Removes data outside of the rangeLimits and gateLimits boundaries.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Returns
-------
newData : musicDataObj
Processed version of input musicDataObj (if succeeded), or the original musicDataObj (if failed).
Written by Nathaniel A. Frissell, Fall 2013
"""
if (rangeLimits != None) or (gateLimits != None) or (timeLimits != None):
defineLimits(dataObj,dataSet='active',rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits)
currentData = getDataSet(dataObj,dataSet)
try:
#Make a copy of the current data set.
commentList = []
if (currentData.metadata.has_key('timeLimits') == False and
currentData.metadata.has_key('beamLimits') == False and
currentData.metadata.has_key('gateLimits') == False):
return currentData
newData = currentData.copy(newDataSetName,comment)
#Apply the gateLimits
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates<= limits[1]))[0]
newData.data = newData.data[:,:,gateInx]
newData.fov.gates = newData.fov.gates[gateInx]
newData.fov.latCenter = newData.fov.latCenter[:,gateInx]
newData.fov.lonCenter = newData.fov.lonCenter[:,gateInx]
newData.fov.slantRCenter = newData.fov.slantRCenter[:,gateInx]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
gateInxFull = np.append(gateInx,gateInx[-1]+1) #We need that extra gate since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[:,gateInxFull]
newData.fov.lonFull = newData.fov.lonFull[:,gateInxFull]
newData.fov.slantRFull = newData.fov.slantRFull[:,gateInxFull]
commentList.append('gate: %i,%i' % tuple(limits))
rangeLim = (np.min(newData.fov.slantRCenter), np.max(newData.fov.slantRCenter))
commentList.append('range [km]: %i,%i' % rangeLim)
#Remove limiting item from metadata.
newData.metadata.pop('gateLimits')
if newData.metadata.has_key('rangeLimits'): newData.metadata.pop('rangeLimits')
#Apply the beamLimits.
if currentData.metadata.has_key('beamLimits'):
limits = currentData.metadata['beamLimits']
beamInx = np.where(np.logical_and(currentData.fov.beams >= limits[0],currentData.fov.beams <= limits[1]))[0]
newData.data = newData.data[:,beamInx,:]
newData.fov.beams = newData.fov.beams[beamInx]
newData.fov.latCenter = newData.fov.latCenter[beamInx,:]
newData.fov.lonCenter = newData.fov.lonCenter[beamInx,:]
newData.fov.slantRCenter = newData.fov.slantRCenter[beamInx,:]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
beamInxFull = np.append(beamInx,beamInx[-1]+1) #We need that extra beam since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[beamInxFull,:]
newData.fov.lonFull = newData.fov.lonFull[beamInxFull,:]
newData.fov.slantRFull = newData.fov.slantRFull[beamInxFull,:]
commentList.append('beam: %i,%i' % tuple(limits))
#Remove limiting item from metadata.
newData.metadata.pop('beamLimits')
#Apply the time limits.
if currentData.metadata.has_key('timeLimits'):
limits = currentData.metadata['timeLimits']
timeInx = np.where(np.logical_and(currentData.time >= limits[0],currentData.time <= limits[1]))[0]
newData.data = newData.data[timeInx,:,:]
newData.time = newData.time[timeInx]
commentList.append('time: '+limits[0].strftime('%Y-%m-%d/%H:%M,')+limits[1].strftime('%Y-%m-%d/%H:%M'))
#Remove limiting item from metadata.
newData.metadata.pop('timeLimits')
#Update the history with what limits were applied.
comment = 'Limits Applied'
commentStr = '['+newData.metadata['dataSetName']+'] '+comment+': '+'; '.join(commentList)
key = max(newData.history.keys())
newData.history[key] = commentStr
logging.debug(commentStr)
newData.setActive()
return newData
except:
if hasattr(dataObj,newDataSetName): delattr(dataObj,newDataSetName)
# print 'Warning! Limits not applied.'
return currentData
def determineRelativePosition(dataObj,dataSet='active',altitude=250.):
"""Finds the center cell of the field-of-view of a musicArray data object.
The range, azimuth, x-range, and y-range from the center to each cell in the FOV
is calculated and saved to the FOV object. The following objects are added to
dataObj.dataSet:
fov.relative_centerInx: [beam, gate] index of the center cell
fov.relative_azm: Azimuth relative to center cell [deg]
fov.relative_range: Range relative to center cell [km]
fov.relative_x: X-range relative to center cell [km]
fov.relative_y: Y-range relative to center cell [km]
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
altitude : Optional[float]
altitude added to Re = 6378.1 km [km]
Returns
-------
None
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
#Get the chosen dataset.
currentData = getDataSet(dataObj,dataSet)
#Determine center beam.
ctrBeamInx = len(currentData.fov.beams)/2
ctrGateInx = len(currentData.fov.gates)/2
currentData.fov.relative_centerInx = [ctrBeamInx, ctrGateInx]
#Set arrays of lat1/lon1 to the center cell value. Use this to calculate all other positions
#with numpy array math.
lat1 = np.zeros_like(currentData.fov.latCenter)
lon1 = np.zeros_like(currentData.fov.latCenter)
lat1[:] = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
lon1[:] = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
#Make lat2/lon2 the center position array of the dataset.
lat2 = currentData.fov.latCenter
lon2 = currentData.fov.lonCenter
#Calculate the azimuth and distance from the centerpoint to the endpoint.
azm = utils.greatCircleAzm(lat1,lon1,lat2,lon2)
dist = (Re + altitude)*utils.greatCircleDist(lat1,lon1,lat2,lon2)
#Save calculated values to the current data object, as well as calculate the
#X and Y relatvie positions of each cell.
currentData.fov.relative_azm = azm
currentData.fov.relative_range = dist
currentData.fov.relative_x = dist * np.sin(np.radians(azm))
currentData.fov.relative_y = dist * np.cos(np.radians(azm))
return None
def timeInterpolation(dataObj,dataSet='active',newDataSetName='timeInterpolated',comment='Time Linear Interpolation',timeRes=10,newTimeVec=None):
"""Interpolates the data in a musicArray object to a regular time grid.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
timeRes : Optional[float]
time resolution of new time vector [seconds]
newTimeVec : Optional[list of datetime.datetime]
Sequence of datetime.datetime objects that data will be interpolated to. This overides timeRes.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
sTime = currentData.time[0]
sTime = datetime.datetime(sTime.year,sTime.month,sTime.day,sTime.hour,sTime.minute) #Make start time a round time.
fTime = currentData.time[-1]
#Create new time vector.
if newTimeVec == None:
newTimeVec = [sTime]
while newTimeVec[-1] < fTime:
newTimeVec.append(newTimeVec[-1] + datetime.timedelta(seconds=timeRes))
#Ensure that the new time vector is within the bounds of the actual data set.
newTimeVec = np.array(newTimeVec)
good = np.where(np.logical_and(newTimeVec > min(currentData.time),newTimeVec < max(currentData.time)))
newTimeVec = newTimeVec[good]
newEpochVec = utils.datetimeToEpoch(newTimeVec)
#Initialize interpolated data.
nrTimes = len(newTimeVec)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for rg in range(nrGates):
for bb in range(nrBeams):
input_x = currentData.time[:]
input_y = currentData.data[:,bb,rg]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
input_x = utils.datetimeToEpoch(input_x)
intFn = interp1d(input_x,input_y,bounds_error=False)#,fill_value=0)
interpArr[:,bb,rg] = intFn(newEpochVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.time = newTimeVec
newDataSet.data = interpArr
newDataSet.setActive()
def filterTimes(sTime,eTime,timeRes,numTaps):
"""The linear filter is going to cause a delay in the signal and also won't get to the end of the signal.
This function will calcuate the full time period of data that needs to be loaded in order to provide filtered data
for the event requested.
Parameters
----------
sTime : datetime.datetime
Start time of event.
eTime : datetime.datetime
End time of event.
timeRes : float
Time resolution in seconds of data to be sent to filter.
numtaps : int
Length of the filter
Returns
-------
newSTime, newETime : datetime.datetime, datetime.datetime
Start and end times of data that needs to be fed into the filter.
Written by Nathaniel A. Frissell, Fall 2013
"""
td = datetime.timedelta(seconds=(numTaps*timeRes/2.))
newSTime = sTime - td
newETime = eTime + td
return (newSTime, newETime)
class filter(object):
"""Filter a VT sig/sigStruct object and define a FIR filter object.
If only cutoff_low is defined, this is a high pass filter.
If only cutoff_high is defined, this is a low pass filter.
If both cutoff_low and cutoff_high is defined, this is a band pass filter.
Uses scipy.signal.firwin()
High pass and band pass filters inspired by Matti Pastell's page:
http://mpastell.com/2010/01/18/fir-with-scipy/
Metadata keys:
'filter_cutoff_low' --> cutoff_low
'filter_cutoff_high' --> cutoff_high
'filter_numtaps' --> cutoff_numtaps
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
numtaps : Optional[int]
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
If dataObj.dataSet.metadata['filter_numptaps'] is set and this keyword is None,
the metadata value will be used.
cutoff_low : Optional[float, 1D array_like or None]
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`. If None, a low-pass filter will not
be applied.
If dataObj.dataSet.metadata['filter_cutoff_low'] is set and this keyword is None,
the metadata value will be used.
cutoff_high : Optional[float, 1D array_like, or None]
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
If dataObj.dataSet.metadata['filter_cutoff_high'] is set and this keyword is None,
the metadata value will be used.
width : Optional[float]
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : Optional[string or tuple of string and parameter values]
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : Optional[bool]
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : Optional[bool]
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero is True);
nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
Attributes
----------
comment : str
cutoff_low : float, 1D array_like or None
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges).
cutoff_high : float, 1D array_like, or None
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
nyq : float
the Nyquist rate
ir :
Methods
-------
plotTransferFunction
plotImpulseResponse
filter
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, dataObj, dataSet='active', numtaps=None, cutoff_low=None, cutoff_high=None, width=None, window='blackman', pass_zero=True, scale=True,newDataSetName='filtered'):
import scipy as sp
sigObj = getattr(dataObj,dataSet)
nyq = sigObj.nyquistFrequency()
#Get metadata for cutoffs and numtaps.
md = sigObj.metadata
if cutoff_high == None:
if md.has_key('filter_cutoff_high'):
cutoff_high = md['filter_cutoff_high']
if cutoff_low == None:
if md.has_key('filter_cutoff_low'):
cutoff_low = md['filter_cutoff_low']
if numtaps == None:
if md.has_key('filter_numtaps'):
numtaps = md['filter_numtaps']
else:
logging.warning('You must provide numtaps.')
return
if cutoff_high != None: #Low pass
lp = sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_high, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
d = lp
if cutoff_low != None: #High pass
hp = -sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_low, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
hp[numtaps/2] = hp[numtaps/2] + 1
d = hp
if cutoff_high != None and cutoff_low != None:
d = -(lp+hp)
d[numtaps/2] = d[numtaps/2] + 1
d = -1.*d #Needed to correct 180 deg phase shift.
if cutoff_high == None and cutoff_low == None:
logging.warning("You must define cutoff frequencies!")
return
self.comment = ' '.join(['Filter:',window+',','Nyquist:',str(nyq),'Hz,','Cuttoff:','['+str(cutoff_low)+', '+str(cutoff_high)+']','Hz,','Numtaps:',str(numtaps)])
self.cutoff_low = cutoff_low
self.cutoff_high = cutoff_high
self.nyq = nyq
self.ir = d
self.filter(dataObj,dataSet=dataSet,newDataSetName=newDataSetName)
def __str__(self):
return self.comment
def plotTransferFunction(self,xmin=0,xmax=None,ymin_mag=-150,ymax_mag=5,ymin_phase=None,ymax_phase=None,worN=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_mag : Optional[float]
Minimum value for y-axis for the frequency response plot.
ymax_mag : Optional[float]
Maximum value for y-axis for the frequency response plot.
ymin_phase : Optional[float]
Minimum value for y-axis for the phase response plot.
ymax_phase : Optional[float]
Maximum value for y-axis for the phase response plot.
worN : Optional[int]
passed to scipy.signal.freqz()
If None, then compute at 512 frequencies around the unit circle.
If the len(filter) > 512, then compute at len(filter) frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
if worN == None:
if len(self.ir) > 512: worN = len(self.ir)
else: worN = None
else: pass
w,h = sp.signal.freqz(self.ir,1,worN=worN)
h_dB = 20 * np.log10(abs(h))
axis = fig.add_subplot(211)
#Compute frequency vector.
w = w/max(w) * self.nyq
axis.plot(w,h_dB,'.-')
#mp.axvline(x=self.fMax,color='r',ls='--',lw=2)
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_mag is not None: axis.set_ylim(ymin=ymin_mag)
if ymax_mag is not None: axis.set_ylim(ymax=ymax_mag)
axis.set_xlabel(r'Frequency (Hz)')
axis.set_ylabel('Magnitude (db)')
axis.set_title(r'Frequency response')
axis = fig.add_subplot(212)
h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
axis.plot(w,h_Phase,'.-')
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_phase is not None: axis.set_ylim(ymin=ymin_phase)
if ymax_phase is not None: axis.set_ylim(ymax=ymax_phase)
axis.set_ylabel('Phase (radians)')
axis.set_xlabel(r'Frequency (Hz)')
axis.set_title(r'Phase response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def plotImpulseResponse(self,xmin=None,xmax=None,ymin_imp=None,ymax_imp=None,ymin_step=None,ymax_step=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_imp : Optional[float]
Minimum value for y-axis for the impulse response plot.
ymax_imp : Optional[float]
Maximum value for y-axis for the impulse response plot.
ymin_step : Optional[float]
Minimum value for y-axis for the step response plot.
ymax_step : Optional[float]
Maximum value for y-axis for the step response plot.
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
l = len(self.ir)
impulse = np.repeat(0.,l); impulse[0] =1.
x = np.arange(0,l)
response = sp.signal.lfilter(self.ir,1,impulse)
axis = fig.add_subplot(211)
axis.stem(x, response)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Impulse response')
axis = fig.add_subplot(212)
step = np.cumsum(response)
axis.stem(x, step)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Step response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def filter(self,dataObj,dataSet='active',newDataSetName='filtered'):
"""Apply the filter to a vtsig object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
sigobj = getattr(dataObj,dataSet)
vtsig = sigobj.parent
nrTimes,nrBeams,nrGates = np.shape(sigobj.data)
#Filter causes a delay in the signal and also doesn't get the tail end of the signal... Shift signal around, provide info about where the signal is valid.
shift = np.int32(-np.floor(len(self.ir)/2.))
start_line = np.zeros(nrTimes)
start_line[0] = 1
start_line = np.roll(start_line,shift)
tinx0 = abs(shift)
tinx1 = np.where(start_line == 1)[0][0]
val_tm0 = sigobj.time[tinx0]
val_tm1 = sigobj.time[tinx1]
filteredData = np.zeros_like(sigobj.data)
#Apply filter
for bm in range(nrBeams):
for rg in range(nrGates):
tmp = sp.signal.lfilter(self.ir,[1.0],sigobj.data[:,bm,rg])
tmp = np.roll(tmp,shift)
filteredData[:,bm,rg] = tmp[:]
#Create new signal object.
newsigobj = sigobj.copy(newDataSetName,self.comment)
#Put in the filtered data.
newsigobj.data = copy.copy(filteredData)
newsigobj.time = copy.copy(sigobj.time)
#Clear out ymin and ymax from metadata; make sure meta data block exists.
#If not, create it.
if hasattr(newsigobj,'metadata'):
delMeta = ['ymin','ymax','ylim']
for key in delMeta:
if newsigobj.metadata.has_key(key):
del newsigobj.metadata[key]
else:
newsigobj.metadata = {}
newsigobj.metadata['timeLimits'] = (val_tm0,val_tm1)
key = 'title'
if newsigobj.metadata.has_key(key):
newsigobj.metadata[key] = ' '.join(['Filtered',newsigobj.metadata[key]])
else:
newsigobj.metadata[key] = 'Filtered'
newsigobj.metadata['fir_filter'] = (self.cutoff_low,self.cutoff_high)
newsigobj.setActive()
def detrend(dataObj,dataSet='active',newDataSetName='detrended',comment=None,type='linear'):
"""Linearly detrend a data in a musicArray/musicDataObj object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
type : Optional[str]
The type of detrending. If type == 'linear' (default), the result of a linear least-squares fit to data
is subtracted from data. If type == 'constant', only the mean of data is subtracted.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
try:
newDataArr[:,bm,rg] = sp.signal.detrend(currentData.data[:,bm,rg],type=type)
except:
newDataArr[:,bm,rg] = np.nan
if comment == None:
comment = type.capitalize() + ' detrend (scipy.signal.detrend)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def nan_to_num(dataObj,dataSet='active',newDataSetName='nan_to_num',comment=None):
"""Convert all NANs and INFs to finite numbers using numpy.nan_to_num().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
if comment == None:
comment = 'numpy.nan_to_num'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = np.nan_to_num(currentData.data)
newDataSet.setActive()
def windowData(dataObj,dataSet='active',newDataSetName='windowed',comment=None,window='hann'):
"""Apply a window to a musicArray object. The window is calculated using scipy.signal.get_window().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
window : Optional[str]
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall,
barthann, kaiser (needs beta), gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
win = sp.signal.get_window(window,nrTimes,fftbins=False)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = currentData.data[:,bm,rg] * win
if comment == None:
comment = window.capitalize() + ' window applied (scipy.signal.get_window)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def calculateFFT(dataObj,dataSet='active',comment=None):
"""Calculate the spectrum of an object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Determine frequency axis.
nyq = currentData.nyquistFrequency()
freq_ax = np.arange(nrTimes,dtype='f8')
freq_ax = (freq_ax / max(freq_ax)) - 0.5
freq_ax = freq_ax * 2. * nyq
#Use complex64, not complex128! If you use complex128, too much numerical noise will accumulate and the final plot will be bad!
newDataArr= np.zeros((nrTimes,nrBeams,nrGates),dtype=np.complex64)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = sp.fftpack.fftshift(sp.fftpack.fft(currentData.data[:,bm,rg])) / np.size(currentData.data[:,bm,rg])
currentData.freqVec = freq_ax
currentData.spectrum = newDataArr
# Calculate the dominant frequency #############################################
posFreqInx = np.where(currentData.freqVec >= 0)[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) #Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) #Use the magnitude of the positive frequency data.
#Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
currentData.dominantFreq = posFreqVec[np.argmax(avg_psd)]
currentData.appendHistory('Calculated FFT')
def calculateDlm(dataObj,dataSet='active',comment=None):
"""Calculate the cross-spectral matrix of a musicaArray object. FFT must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
nCells = nrBeams * nrGates
currentData.llLookupTable = np.zeros([5,nCells])
currentData.Dlm = np.zeros([nCells,nCells],dtype=np.complex128)
#Only use positive frequencies...
posInx = np.where(currentData.freqVec > 0)[0]
#Explicitly write out gate/range indices...
llList = []
for gg in xrange(nrGates):
for bb in xrange(nrBeams):
llList.append((bb,gg))
for ll in range(nCells):
llAI = llList[ll]
ew_dist = currentData.fov.relative_x[llAI]
ns_dist = currentData.fov.relative_y[llAI]
currentData.llLookupTable[:,ll] = [ll, currentData.fov.beams[llAI[0]], currentData.fov.gates[llAI[1]],ns_dist,ew_dist]
spectL = currentData.spectrum[posInx,llAI[0],llAI[1]]
for mm in range(nCells):
mmAI = llList[mm]
spectM = currentData.spectrum[posInx,mmAI[0],mmAI[1]]
currentData.Dlm[ll,mm] = np.sum(spectL * np.conj(spectM))
currentData.appendHistory('Calculated Cross-Spectral Matrix Dlm')
def calculateKarr(dataObj,dataSet='active',kxMax=0.05,kyMax=0.05,dkx=0.001,dky=0.001,threshold=0.15):
"""Calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Cross-spectrum array Dlm must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
kxMax : Optional[float]
Maximum kx (East-West) wavenumber to calculate [rad/km]
kyMax : Optional[float]
Maximum ky (North-South) wavenumber to calculate [rad/km]
dkx : Optional[float]
kx resolution [rad/km]
dky : Optional[float]
ky resolution [rad/km]
threshold : Optional[float]
threshold of signals to detect as a fraction of the maximum eigenvalue
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Calculate eigenvalues, eigenvectors
eVals,eVecs = np.linalg.eig(np.transpose(dataObj.active.Dlm))
nkx = np.ceil(2*kxMax/dkx)
if (nkx % 2) == 0: nkx = nkx+1
kxVec = kxMax * (2*np.arange(nkx)/(nkx-1) - 1)
nky = np.ceil(2*kyMax/dky)
if (nky % 2) == 0: nky = nky+1
kyVec = kyMax * (2*np.arange(nky)/(nky-1) - 1)
nkx = int(nkx)
nky = int(nky)
xm = currentData.llLookupTable[4,:] #x is in the E-W direction.
ym = currentData.llLookupTable[3,:] #y is in the N-S direction.
threshold = 0.15
maxEval = np.max(np.abs(eVals))
minEvalsInx = np.where(eVals <= threshold*maxEval)[0]
cnt = np.size(minEvalsInx)
maxEvalsInx = np.where(eVals > threshold*maxEval)[0]
nSigs = np.size(maxEvalsInx)
if cnt < 3:
logging.warning('Not enough small eigenvalues!')
import ipdb; ipdb.set_trace()
logging.info('K-Array: ' + str(nkx) + ' x ' + str(nky))
logging.info('Kx Max: ' + str(kxMax))
logging.info('Kx Res: ' + str(dkx))
logging.info('Ky Max: ' + str(kyMax))
logging.info('Ky Res: ' + str(dky))
logging.info('')
logging.info('Signal Threshold: ' + str(threshold))
logging.info('Number of Det Signals: ' + str(nSigs))
logging.info('Number of Noise Evals: ' + str(cnt))
logging.info('Starting kArr Calculation...')
t0 = datetime.datetime.now()
def vCalc(um,v):
return np.dot( np.conj(um), v) * np.dot( np.conj(v), um)
vList = [eVecs[:,minEvalsInx[ee]] for ee in xrange(cnt)]
kArr = np.zeros((nkx,nky),dtype=np.complex64)
for kk_kx in xrange(nkx):
kx = kxVec[kk_kx]
for kk_ky in xrange(nky):
ky = kyVec[kk_ky]
um = np.exp(1j*(kx*xm + ky*ym))
kArr[kk_kx,kk_ky]= 1. / np.sum(map(lambda v: vCalc(um,v), vList))
t1 = datetime.datetime.now()
logging.info('Finished kArr Calculation. Total time: ' + str(t1-t0))
currentData.karr = kArr
currentData.kxVec = kxVec
currentData.kyVec = kyVec
currentData.appendHistory('Calculated kArr')
def simulator(dataObj, dataSet='active',newDataSetName='simulated',comment=None,keepLocalRange=True,sigs=None,noiseFactor=0):
"""Replace SuperDARN Data with simulated MSTID(s). This is useful for understanding how the signal processing
routines of this module affect ideal data.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
keepLocalRange : Optional[bool]
If true, the locations calculated for the actual radar field of view will be used. If false,
a linearly-spaced will replace the true grid.
sigs : Optional[list of tuples]
A list of tuples defining the characteristics of the simulated signal. Sample list is as follows.
If this keyword is None, the values in this sample list are used as the default values.::
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
Each signal is evaluated as a cosine and then summed together. The cosine evaluated is::
sig = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
noiseFactor : Optional[float]
Add white gaussian noise to the simulated signal. noiseFactor is a scalar such that:
noise = noiseFactor*np.random.standard_normal(nSteps)
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
#Typical TID Parameters:
# Frequency: 0.0003 mHz
# Period: 55.5 min
# H. Wavelength: 314 km
# k: 0.02 /km
if keepLocalRange == True:
nx, ny = np.shape(currentData.fov.relative_x)
xRange = np.max(currentData.fov.relative_x) - np.min(currentData.fov.relative_x)
yRange = np.max(currentData.fov.relative_y) - np.min(currentData.fov.relative_y)
xgrid = currentData.fov.relative_x
ygrid = currentData.fov.relative_y
else:
nx = 16
xRange = 800.
ny = 25
yRange = 600.
xvec = np.linspace(-xRange/2.,xRange/2.,nx)
yvec = np.linspace(-yRange/2.,yRange/2.,ny)
dx = np.diff(xvec)[0]
dy = np.diff(yvec)[0]
xaxis = np.append(xvec,xvec[-1]+dx)
yayis = np.append(yvec,yvec[-1]+dy)
xgrid = np.zeros((nx,ny))
ygrid = np.zeros((nx,ny))
for kk in xrange(nx): ygrid[kk,:] = yvec[:]
for kk in xrange(ny): xgrid[kk,:] = yvec[:]
if sigs == None:
#Set some default signals.
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
secVec = np.array(utils.datetimeToEpoch(currentData.time))
secVec = secVec - secVec[0]
nSteps = len(secVec)
dt = currentData.samplePeriod()
dataArr = np.zeros((nSteps,nx,ny))
for step in xrange(nSteps):
t = secVec[step]
for kk in xrange(len(sigs)):
amp = sigs[kk][0]
kx = sigs[kk][1]
ky = sigs[kk][2]
f = sigs[kk][3]
phi = sigs[kk][4]
dc = sigs[kk][5]
if 1./dt <= 2.*f:
logging.warning('Nyquist Violation in f.')
logging.warning('Signal #: %i' % kk)
# if 1./dx <= 2.*kx/(2.*np.pi):
# print 'WARNING: Nyquist Violation in kx.'
# print 'Signal #: %i' % kk
#
# if 1./dy <= 2.*ky/(2.*np.pi):
# print 'WARNING: Nyquist Violation in ky.'
# print 'Signal #: %i' % kk
temp = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
dataArr[step,:,:] = dataArr[step,:,:] + temp
#Signal RMS
sig_rms = np.zeros((nx,ny))
for xx in xrange(nx):
for yy in xrange(ny):
sig_rms[xx,yy] = np.sqrt(np.mean((dataArr[:,xx,yy])**2.))
noise_rms = np.zeros((nx,ny))
if noiseFactor > 0:
nf = noiseFactor
#Temporal White Noise
for xx in xrange(nx):
for yy in xrange(ny):
noise = nf*np.random.standard_normal(nSteps)
noise_rms[xx,yy] = np.sqrt(np.mean(noise**2))
dataArr[:,xx,yy] = dataArr[:,xx,yy] + noise
xx = np.arange(ny)
mu = (ny-1.)/2.
sigma2 = 10.0
sigma = np.sqrt(sigma2)
rgDist = 1./(sigma*np.sqrt(2.*np.pi)) * np.exp(-0.5 * ((xx-mu)/sigma)**2)
rgDist = rgDist / np.max(rgDist)
mask = np.zeros((nx,ny))
for nn in xrange(nx): mask[nn,:] = rgDist[:]
mask3d = np.zeros((nSteps,nx,ny))
for nn in xrange(nSteps): mask3d[nn,:,:] = mask[:]
#Apply Range Gate Dependence
dataArr = dataArr * mask3d
snr = (sig_rms/noise_rms)**2
snr_db = 10.*np.log10(snr)
if comment == None:
comment = 'Simulated data injected.'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = dataArr
newDataSet.setActive()
#OPENW,unit,'simstats.txt',/GET_LUN,WIDTH=300
#stats$ = ' Mean: ' + NUMSTR(MEAN(sig_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(sig_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(sig_rms)^2,3)
#PRINTF,unit,'SIG_RMS'
#PRINTF,unit,stats$
#PRINTF,unit,sig_rms
#
#PRINTF,unit,''
#PRINTF,unit,'NOISE_RMS'
#stats$ = ' Mean: ' + NUMSTR(MEAN(noise_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(noise_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(noise_rms)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,noise_rms
#
#PRINTF,unit,''
#PRINTF,unit,'SNR_DB'
#stats$ = ' Mean: ' + NUMSTR(MEAN(snr_db),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(snr_db),3) $
# + ' Var: ' + NUMSTR(STDDEV(snr_db)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,snr_db
#CLOSE,unit
def scale_karr(kArr):
from scipy import stats
"""Scale/normalize kArr for plotting and signal detection.
Parameters
----------
kArr : 2D numpy.array
Two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Returns
-------
data : 2D numpy.array
Scaled and normalized version of kArr.
Written by Nathaniel A. Frissell, Fall 2013
"""
data = np.abs(kArr) - np.min(np.abs(kArr))
#Determine scale for colorbar.
scale = [0.,1.]
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
return data
def detectSignals(dataObj,dataSet='active',threshold=0.35,neighborhood=(10,10)):
"""Automatically detects local maxima/signals in a calculated kArr. This routine uses the watershed
algorithm from the skimage image processing library. Results are automatically stored in
dataObj.dataSet.sigDetect.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
threshold : Optional[float]
Scaled input data must be above this value to be detected. A higher number
will reduce the number of signals detected.
neighborhood : Optional[tuple]
Local region in which to search for peaks at every point in the image/array.
(10,10) will search a 10x10 pixel area.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
################################################################################
#Feature detection...
#Now lets do a little image processing...
from scipy import ndimage
from skimage.morphology import watershed
from skimage.feature import peak_local_max
#sudo pip install cython
#sudo pip install scikit-image
data = scale_karr(currentData.karr)
mask = data > threshold
labels, nb = ndimage.label(mask)
distance = ndimage.distance_transform_edt(mask)
local_maxi = peak_local_max(distance,footprint=np.ones(neighborhood),indices=False)
markers,nb = ndimage.label(local_maxi)
labels = watershed(-distance,markers,mask=mask)
areas = ndimage.sum(mask,labels,xrange(1,labels.max()+1))
maxima = ndimage.maximum(data,labels,xrange(1, labels.max()+1))
order = np.argsort(maxima)[::-1] + 1
maxpos = ndimage.maximum_position(data,labels,xrange(1, labels.max()+1))
sigDetect = SigDetect()
sigDetect.mask = mask
sigDetect.labels = labels
sigDetect.nrSigs = nb
sigDetect.info = []
for x in xrange(labels.max()):
info = {}
info['labelInx'] = x+1
info['order'] = order[x]
info['area'] = areas[x]
info['max'] = maxima[x]
info['maxpos'] = maxpos[x]
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = currentData.dominantFreq
info['period'] = 1./currentData.dominantFreq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
sigDetect.info.append(info)
currentData.appendHistory('Detected KArr Signals')
currentData.sigDetect = sigDetect
return currentData
def add_signal(kx,ky,dataObj,dataSet='active',frequency=None):
"""Manually add a signal to the detected signal list. All signals will be re-ordered according to value in the
scaled kArr. Added signals can be distinguished from autodetected signals because
'labelInx' and 'area' will both be set to -1.
Parameters
----------
kx : float
Value of kx of new signal.
ky : float
Value of ky of new signal.
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
frequency : Optional[float]
Frequency to use to calculate period, phase velocity, etc. If None,
the calculated dominant frequency will be used.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
def find_nearest_inx(array,value):
return (np.abs(array-value)).argmin()
kx_inx = find_nearest_inx(currentData.kxVec,kx)
ky_inx = find_nearest_inx(currentData.kyVec,ky)
maxpos = (kx_inx,ky_inx)
value = data[kx_inx,ky_inx]
true_value = currentData.karr[kx_inx,ky_inx] #Get the unscaled kArr value.
if frequency == None:
freq = currentData.dominantFreq
else:
freq = frequency
info = {}
info['labelInx'] = -1
info['area'] = -1
info['order'] = -1
info['max'] = value
info['true_max'] = true_value #Unscaled kArr value
info['maxpos'] = maxpos
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = freq
info['period'] = 1./freq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
currentData.sigDetect.info.append(info)
currentData.sigDetect.reorder()
currentData.appendHistory('Appended Signal to sigDetect List')
return currentData
def del_signal(order,dataObj,dataSet='active'):
"""Remove a signal to the detected signal list.
Parameters
----------
order :
Single value of list of signal orders (ID's) to be removed from the list.
dataObj : musicArray
object
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
orderArr = np.array(order)
for item in list(currentData.sigDetect.info):
if item['order'] in orderArr:
currentData.sigDetect.info.remove(item)
currentData.sigDetect.reorder()
currentData.appendHistory('Deleted Signals from sigDetect List')
return currentData
| MuhammadVT/davitpy | davitpy/pydarn/proc/music/music.py | Python | gpl-3.0 | 84,879 | 0.014338 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import subprocess
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension(
'pycocotools._mask',
sources=['pycocotools/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [numpy_include, 'pycocotools'],
extra_compile_args={
'gcc': ['-Wno-cpp', '-Wno-unused-function', '-std=c99']},
),
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| mileistone/test | utils/test/setup.py | Python | mit | 5,817 | 0.003094 |
"""Support for Telegram bot using polling."""
import logging
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from . import (
CONF_ALLOWED_CHAT_IDS, PLATFORM_SCHEMA as TELEGRAM_PLATFORM_SCHEMA,
BaseTelegramBotEntity, initialize_bot)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = TELEGRAM_PLATFORM_SCHEMA
async def async_setup_platform(hass, config):
"""Set up the Telegram polling platform."""
bot = initialize_bot(config)
pol = TelegramPoll(bot, hass, config[CONF_ALLOWED_CHAT_IDS])
@callback
def _start_bot(_event):
"""Start the bot."""
pol.start_polling()
@callback
def _stop_bot(_event):
"""Stop the bot."""
pol.stop_polling()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _start_bot)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_bot)
return True
def process_error(bot, update, error):
"""Telegram bot error handler."""
from telegram.error import (
TelegramError, TimedOut, NetworkError, RetryAfter)
try:
raise error
except (TimedOut, NetworkError, RetryAfter):
# Long polling timeout or connection problem. Nothing serious.
pass
except TelegramError:
_LOGGER.error('Update "%s" caused error "%s"', update, error)
def message_handler(handler):
"""Create messages handler."""
from telegram import Update
from telegram.ext import Handler
class MessageHandler(Handler):
"""Telegram bot message handler."""
def __init__(self):
"""Initialize the messages handler instance."""
super().__init__(handler)
def check_update(self, update): # pylint: disable=no-self-use
"""Check is update valid."""
return isinstance(update, Update)
def handle_update(self, update, dispatcher):
"""Handle update."""
optional_args = self.collect_optional_args(dispatcher, update)
return self.callback(dispatcher.bot, update, **optional_args)
return MessageHandler()
class TelegramPoll(BaseTelegramBotEntity):
"""Asyncio telegram incoming message handler."""
def __init__(self, bot, hass, allowed_chat_ids):
"""Initialize the polling instance."""
from telegram.ext import Updater
BaseTelegramBotEntity.__init__(self, hass, allowed_chat_ids)
self.updater = Updater(bot=bot, workers=4)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(message_handler(self.process_update))
self.dispatcher.add_error_handler(process_error)
def start_polling(self):
"""Start the polling task."""
self.updater.start_polling()
def stop_polling(self):
"""Stop the polling task."""
self.updater.stop()
def process_update(self, bot, update):
"""Process incoming message."""
self.process_message(update.to_dict())
| jamespcole/home-assistant | homeassistant/components/telegram_bot/polling.py | Python | apache-2.0 | 3,026 | 0 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import subprocess
import sys
_enable_style_format = None
_clang_format_command_path = None
_gn_command_path = None
def init(root_src_dir, enable_style_format=True):
assert isinstance(root_src_dir, str)
assert isinstance(enable_style_format, bool)
global _enable_style_format
global _clang_format_command_path
global _gn_command_path
assert _enable_style_format is None
assert _clang_format_command_path is None
assert _gn_command_path is None
_enable_style_format = enable_style_format
root_src_dir = os.path.abspath(root_src_dir)
# Determine //buildtools/<platform>/ directory
if sys.platform.startswith("linux"):
platform = "linux64"
exe_suffix = ""
elif sys.platform.startswith("darwin"):
platform = "mac"
exe_suffix = ""
elif sys.platform.startswith(("cygwin", "win")):
platform = "win"
exe_suffix = ".exe"
else:
assert False, "Unknown platform: {}".format(sys.platform)
buildtools_platform_dir = os.path.join(root_src_dir, "buildtools",
platform)
# //buildtools/<platform>/clang-format
_clang_format_command_path = os.path.join(
buildtools_platform_dir, "clang-format{}".format(exe_suffix))
# //buildtools/<platform>/gn
_gn_command_path = os.path.join(buildtools_platform_dir,
"gn{}".format(exe_suffix))
def auto_format(contents, filename):
assert isinstance(filename, str)
_, ext = os.path.splitext(filename)
if ext in (".gn", ".gni"):
return gn_format(contents, filename)
return clang_format(contents, filename)
def clang_format(contents, filename=None):
command_line = [_clang_format_command_path]
if filename is not None:
command_line.append('-assume-filename={}'.format(filename))
return _invoke_format_command(command_line, filename, contents)
def gn_format(contents, filename=None):
command_line = [_gn_command_path, "format", "--stdin"]
if filename is not None:
command_line.append('-assume-filename={}'.format(filename))
return _invoke_format_command(command_line, filename, contents)
def _invoke_format_command(command_line, filename, contents):
if not _enable_style_format:
return StyleFormatResult(stdout_output=contents,
stderr_output="",
exit_code=0,
filename=filename)
kwargs = {}
if sys.version_info.major != 2:
kwargs['encoding'] = 'utf-8'
proc = subprocess.Popen(command_line,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
**kwargs)
stdout_output, stderr_output = proc.communicate(input=contents)
exit_code = proc.wait()
return StyleFormatResult(
stdout_output=stdout_output,
stderr_output=stderr_output,
exit_code=exit_code,
filename=filename)
class StyleFormatResult(object):
def __init__(self, stdout_output, stderr_output, exit_code, filename):
self._stdout_output = stdout_output
self._stderr_output = stderr_output
self._exit_code = exit_code
self._filename = filename
@property
def did_succeed(self):
return self._exit_code == 0
@property
def contents(self):
assert self.did_succeed
return self._stdout_output
@property
def error_message(self):
return self._stderr_output
@property
def filename(self):
return self._filename
| scheib/chromium | third_party/blink/renderer/bindings/scripts/bind_gen/style_format.py | Python | bsd-3-clause | 3,817 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import mavlog
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MAVLog'
copyright = u'2014, Gareth R'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = mavlog.__version__
# The full version, including alpha/beta/rc tags.
release = mavlog.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mavlogdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'mavlog.tex',
u'MAVLog Documentation',
u'Gareth R', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mavlog',
u'MAVLog Documentation',
[u'Gareth R'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mavlog',
u'MAVLog Documentation',
u'Gareth R',
'mavlog',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | blutack/mavlog | docs/conf.py | Python | bsd-3-clause | 8,361 | 0.005502 |
"""Add theme to config
Revision ID: 58ee75910929
Revises: 1c22ceb384a7
Create Date: 2015-08-28 15:15:47.971807
"""
# revision identifiers, used by Alembic.
revision = '58ee75910929'
down_revision = '1c22ceb384a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("INSERT INTO config (category, key, value, description) VALUES ('general', 'theme', '\"zkpylons\"', 'The enabled theme to use. Should match the theme folder name (requires a server restart to take effect)')")
def downgrade():
op.execute("DELETE FROM config WHERE category='general' AND key='theme'")
| iseppi/zookeepr | alembic/versions/20_58ee75910929_add_theme_to_config_.py | Python | gpl-2.0 | 603 | 0.006633 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-02 12:18
from __future__ import unicode_literals
from django.contrib.auth.models import Permission
from django.db import migrations
def delete_old_comment_permission(apps, schema_editor):
"""
Deletes the old 'can_see_and_manage_comments' permission which is
split up into two seperate permissions.
"""
perm = Permission.objects.filter(codename="can_see_and_manage_comments")
if len(perm):
perm = perm.get()
# Save content_type for manual creation of new permissions.
content_type = perm.content_type
# Save groups. list() is necessary to evaluate the database query right now.
groups = list(perm.group_set.all())
# Delete permission
perm.delete()
# Create new permission
perm_see = Permission.objects.create(
codename="can_see_comments",
name="Can see comments",
content_type=content_type,
)
perm_manage = Permission.objects.create(
codename="can_manage_comments",
name="Can manage comments",
content_type=content_type,
)
for group in groups:
group.permissions.add(perm_see)
group.permissions.add(perm_manage)
group.save()
class Migration(migrations.Migration):
dependencies = [("motions", "0004_motionchangerecommendation_other_description")]
operations = [
migrations.AlterModelOptions(
name="motion",
options={
"default_permissions": (),
"ordering": ("identifier",),
"permissions": (
("can_see", "Can see motions"),
("can_create", "Can create motions"),
("can_support", "Can support motions"),
("can_see_comments", "Can see comments"),
("can_manage_comments", "Can manage comments"),
("can_manage", "Can manage motions"),
),
"verbose_name": "Motion",
},
),
migrations.RunPython(delete_old_comment_permission),
]
| CatoTH/OpenSlides | server/openslides/motions/migrations/0005_auto_20180202_1318.py | Python | mit | 2,199 | 0.00091 |
# Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
import jinja2
from django.conf import settings
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.utils import six
from django.utils.module_loading import import_string
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class Jinja2(BaseEngine):
app_dirname = 'jinja2'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(Jinja2, self).__init__(params)
environment = options.pop('environment', 'jinja2.Environment')
environment_cls = import_string(environment)
options.setdefault('autoescape', True)
options.setdefault('loader', jinja2.FileSystemLoader(self.template_dirs))
options.setdefault('auto_reload', settings.DEBUG)
options.setdefault('undefined',
jinja2.DebugUndefined if settings.DEBUG else jinja2.Undefined)
self.env = environment_cls(**options)
def from_string(self, template_code):
return Template(self.env.from_string(template_code))
def get_template(self, template_name):
try:
return Template(self.env.get_template(template_name))
except jinja2.TemplateNotFound as exc:
six.reraise(
TemplateDoesNotExist,
TemplateDoesNotExist(exc.name, backend=self),
sys.exc_info()[2],
)
except jinja2.TemplateSyntaxError as exc:
new = TemplateSyntaxError(exc.args)
new.template_debug = get_exception_info(exc)
six.reraise(TemplateSyntaxError, new, sys.exc_info()[2])
class Template(object):
def __init__(self, template):
self.template = template
self.origin = Origin(
name=template.filename, template_name=template.name,
)
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template.render(context)
class Origin(object):
"""
A container to hold debug information as described in the template API
documentation.
"""
def __init__(self, name, template_name):
self.name = name
self.template_name = template_name
def get_exception_info(exception):
"""
Formats exception information for display on the debug page using the
structure described in the template API documentation.
"""
context_lines = 10
lineno = exception.lineno
lines = list(enumerate(exception.source.strip().split("\n"), start=1))
during = lines[lineno - 1][1]
total = len(lines)
top = max(0, lineno - context_lines - 1)
bottom = min(total, lineno + context_lines)
return {
'name': exception.filename,
'message': exception.message,
'source_lines': lines[top:bottom],
'line': lineno,
'before': '',
'during': during,
'after': '',
'total': total,
'top': top,
'bottom': bottom,
}
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/template/backends/jinja2.py | Python | artistic-2.0 | 3,342 | 0.000598 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import HTTP_SUCCESS
QUOTA_BYTES = 10
QUOTA_COUNT = 3
SKIP_MSG = "Container quotas middleware not available."
class ContainerQuotasTest(base.BaseObjectTest):
"""Attemps to test the perfect behavior of quotas in a container."""
container_quotas_available = \
config.TempestConfig().object_storage_feature_enabled.container_quotas
def setUp(self):
"""Creates and sets a container with quotas.
Quotas are set by adding meta values to the container,
and are validated when set:
- X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
self.container_name = data_utils.rand_name(name="TestContainer")
self.container_client.create_container(self.container_name)
metadata = {"quota-bytes": str(QUOTA_BYTES),
"quota-count": str(QUOTA_COUNT), }
self.container_client.update_container_metadata(
self.container_name, metadata)
def tearDown(self):
"""Cleans the container of any object after each test."""
self.delete_containers([self.container_name])
super(ContainerQuotasTest, self).tearDown()
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_valid_object(self):
"""Attempts to uploads an object smaller than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES)
nbefore = self._get_bytes_used()
resp, _ = self.object_client.create_object(
self.container_name, object_name, data)
self.assertIn(int(resp['status']), HTTP_SUCCESS)
nafter = self._get_bytes_used()
self.assertEqual(nbefore + len(data), nafter)
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_large_object(self):
"""Attempts to upload an object lagger than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES + 1)
nbefore = self._get_bytes_used()
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
@testtools.skipIf(not container_quotas_available, SKIP_MSG)
@attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
for _ in range(QUOTA_COUNT):
name = data_utils.rand_name(name="TestObject")
self.object_client.create_object(self.container_name, name, "")
nbefore = self._get_object_count()
self.assertEqual(nbefore, QUOTA_COUNT)
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, "OverQuotaObject", "")
nafter = self._get_object_count()
self.assertEqual(nbefore, nafter)
def _get_container_metadata(self):
resp, _ = self.container_client.list_container_metadata(
self.container_name)
return resp
def _get_object_count(self):
resp = self._get_container_metadata()
return int(resp["x-container-object-count"])
def _get_bytes_used(self):
resp = self._get_container_metadata()
return int(resp["x-container-bytes-used"])
| eltonkevani/tempest_el_env | tempest/api/object_storage/test_container_quotas.py | Python | apache-2.0 | 4,616 | 0 |
# -*- coding: utf-8 -*-
#
# pysysinfo documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 6 16:05:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysysinfo'
copyright = u'2015, FrostyX'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sysinfodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pysysinfo.tex', u'pysysinfo Documentation',
u'FrostyX', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysysinfo', u'pysysinfo Documentation',
[u'FrostyX'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysysinfo', u'pysysinfo Documentation',
u'FrostyX', 'pysysinfo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| FrostyX/pysysinfo | doc/conf.py | Python | gpl-2.0 | 8,133 | 0.006271 |
#!/usr/bin/env python
import json
import argparse
from webapollo import WAAuth, WebApolloInstance, AssertUser, accessible_organisms
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="List all organisms available in an Apollo instance"
)
WAAuth(parser)
parser.add_argument("email", help="User Email")
args = parser.parse_args()
wa = WebApolloInstance(args.apollo, args.username, args.password)
gx_user = AssertUser(wa.users.loadUsers(email=args.email))
all_orgs = wa.organisms.findAllOrganisms()
orgs = accessible_organisms(gx_user, all_orgs)
cleanedOrgs = []
for organism in all_orgs:
org = {
"name": organism["commonName"],
"id": organism["id"],
"annotations": organism["annotationCount"],
"sequences": organism["sequences"],
}
cleanedOrgs.append(org)
print(json.dumps(cleanedOrgs, indent=2))
| TAMU-CPT/galaxy-tools | tools/webapollo/list_organism_data.py | Python | gpl-3.0 | 951 | 0.001052 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.models.building_location_choice_model import BuildingLocationChoiceModel as UrbansimBuildingLocationChoiceModel
from numpy import where, arange, zeros
from numpy import logical_or, logical_not
from opus_core.variables.variable_name import VariableName
from opus_core.resources import Resources
from opus_core.datasets.dataset import Dataset
class BuildingLocationChoiceModel(UrbansimBuildingLocationChoiceModel):
# def get_weights_for_sampling_locations(self, agent_set, agents_index, data_objects=None):
# where_developable = where(self.apply_filter(self.filter, None, agent_set, agents_index, data_objects=data_objects))[0]
# weight_array = ones((where_developable.size), dtype=int8) #.astype(bool8)
# return (weight_array, where_developable)
def get_weights_for_sampling_locations_for_estimation(self, agent_set, agents_index):
if self.run_config.get("agent_units_string", None): # needs to be corrected
agent_set.compute_variables(self.run_config["agent_units_string"], dataset_pool=self.dataset_pool)
return self.get_weights_for_sampling_locations(agent_set, agents_index)
def prepare_for_estimate(self, add_member_prefix=True,
specification_dict=None,
specification_storage=None,
specification_table=None,
building_set=None,
buildings_for_estimation_storage=None,
buildings_for_estimation_table=None,
constants=None, base_year=0,
building_categories=None,
location_id_variable=None,
join_datasets=False,
data_objects=None, **kwargs):
# buildings = None
if (building_set is not None):
if location_id_variable is not None:
building_set.compute_variables(location_id_variable, resources=Resources(data_objects))
# create agents for estimation
if buildings_for_estimation_storage is not None:
estimation_set = Dataset(in_storage=buildings_for_estimation_storage,
in_table_name=buildings_for_estimation_table,
id_name=building_set.get_id_name(),
dataset_name=building_set.get_dataset_name())
if location_id_variable:
estimation_set.compute_variables(location_id_variable,
resources=Resources(data_objects))
# needs to be a primary attribute because of the join method below
estimation_set.add_primary_attribute(estimation_set.get_attribute(location_id_variable),
VariableName(location_id_variable).alias())
years = estimation_set.get_attribute("scheduled_year")
recent_years = constants['recent_years']
indicator = zeros(estimation_set.size(), dtype="int32")
for year in range(base_year-recent_years, base_year+1):
indicator = logical_or(indicator, years==year)
idx = where(logical_not(indicator))[0]
estimation_set.remove_elements(idx)
#if filter:
#estimation_set.compute_variables(filter, resources=Resources(data_objects))
#index = where(estimation_set.get_attribute(filter) > 0)[0]
#estimation_set.subset_by_index(index, flush_attributes_if_not_loaded=False)
if join_datasets:
building_set.join_by_rows(estimation_set,
require_all_attributes=False,
change_ids_if_not_unique=True)
index = arange(building_set.size()-estimation_set.size(), building_set.size())
else:
index = building_set.get_id_index(estimation_set.get_id_attribute())
else:
if building_set is not None:
index = arange(building_set.size())
else:
index = None
if add_member_prefix:
specification_table = self.group_member.add_member_prefix_to_table_names([specification_table])
from opus_core.model import get_specification_for_estimation
#from urbansim.functions import compute_supply_and_add_to_location_set
specification = get_specification_for_estimation(specification_dict,
specification_storage,
specification_table)
#specification, dummy = AgentLocationChoiceModelMember.prepare_for_estimate(self, add_member_prefix,
#specification_dict, specification_storage,
#specification_table,
#location_id_variable=location_id_variable,
#data_objects=data_objects, **kwargs)
return (specification, index) | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim_parcel/models/building_location_choice_model.py | Python | gpl-2.0 | 5,528 | 0.009949 |
from unittest2.events import Plugin, addOption
from unittest2.util import getSource
import os
import sys
try:
import coverage
except ImportError, e:
coverage = None
coverageImportError = e
help_text1 = 'Enable coverage reporting'
class CoveragePlugin(Plugin):
configSection = 'coverage'
commandLineSwitch = ('C', 'coverage', help_text1)
def __init__(self):
self.configFile = self.config.get('config', '').strip() or True
self.branch = self.config.as_bool('branch', default=None)
self.timid = self.config.as_bool('timid', default=False)
self.cover_pylib = self.config.as_bool('cover-pylib', default=False)
self.excludeLines = self.config.as_list('exclude-lines', default=[])
self.ignoreErrors = self.config.as_bool('ignore-errors', default=False)
def register(self):
if coverage is None:
raise coverageImportError
Plugin.register(self)
def pluginsLoaded(self, event):
args = dict(
config_file=self.configFile,
cover_pylib=self.cover_pylib,
branch=self.branch,
timid=self.timid,
)
self.cov = coverage.coverage(**args)
self.cov.erase()
self.cov.exclude('#pragma:? *[nN][oO] [cC][oO][vV][eE][rR]')
for line in self.excludeLines:
self.cov.exclude(line)
self.cov.start()
def stopTestRun(self, event):
self.cov.stop()
self.cov.save()
| dugan/coverage-reporter | coverage_reporter/extras/unittest2_plugin.py | Python | mit | 1,511 | 0.003971 |
# Copyright 2020 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import url
from nuage_horizon.dashboards.project.gateways.ports import urls as port_urls
from nuage_horizon.dashboards.project.gateways import views as gw_views
GATEWAY = r'^(?P<gateway_id>[^/]+)/%s'
urlpatterns = [
url(r'^$', gw_views.IndexView.as_view(), name='index'),
url(GATEWAY % '$', gw_views.DetailView.as_view(), name='detail'),
url(r'^ports/', include((port_urls, 'ports'), namespace='ports')),
]
| nuagenetworks/nuage-openstack-horizon | nuage_horizon/dashboards/project/gateways/urls.py | Python | apache-2.0 | 1,078 | 0 |
from __future__ import absolute_import
import os
import tempfile
from six.moves import configparser as ConfigParser
from six import iteritems
from linchpin.exceptions import LinchpinError
"""
Provide valid context data to test against.
"""
class ContextData(object):
def __init__(self, parser=ConfigParser.ConfigParser):
self.lib_path = '{0}'.format(os.path.dirname(
os.path.realpath(__file__))).rstrip('/')
current_path = os.path.dirname(os.path.realpath(__file__))
constants_path = '{0}/../../'.format(current_path)
self.constants_path = '{0}'.format(os.path.dirname(
constants_path)).rstrip('/')
self.logfile = tempfile.mktemp(suffix='.log', prefix='linchpin')
self.parser = parser()
self.cfg_data = {}
self._load_constants()
def _load_constants(self):
"""
Create self.cfgs with defaults from the linchpin constants file.
"""
constants_file = '{0}/linchpin.constants'.format(self.constants_path)
constants_file = os.path.realpath(os.path.expanduser(constants_file))
self._parse_config(constants_file)
def load_config_data(self, provider='dummy'):
"""
Load a test-based linchpin.conf into both a configs and evars
dictionary to represent a configuration file
"""
expanded_path = None
config_found = False
# simply modify this variable to adjust where linchpin.conf can be found
CONFIG_PATH = [
'{0}/{1}/conf/linchpin.conf'.format(self.lib_path, provider)
]
for path in CONFIG_PATH:
expanded_path = (
"{0}".format(os.path.realpath(os.path.expanduser(path))))
if os.path.exists(expanded_path):
self._parse_config(expanded_path)
# override logger file
self.cfg_data['logger'] = dict()
self.cfg_data['logger']['file'] = self.logfile
self.evars = self.cfg_data.get('evars', {})
def _parse_config(self, path):
"""
Parse configs into the self.cfg_data dict from provided path.
:param path: A path to a config to parse
"""
try:
config = ConfigParser.ConfigParser()
f = open(path)
config.readfp(f)
f.close()
for section in config.sections():
if not self.cfg_data.get(section):
self.cfg_data[section] = {}
for k in config.options(section):
if section == 'evars':
try:
self.cfg_data[section][k] = (
config.getboolean(section, k)
)
except ValueError:
self.cfg_data[section][k] = config.get(section, k)
else:
try:
self.cfg_data[section][k] = config.get(section, k)
except ConfigParser.InterpolationMissingOptionError:
value = config.get(section, k, raw=True)
self.cfg_data[section][k] = value.replace('%%', '%')
except ConfigParser.InterpolationSyntaxError as e:
raise LinchpinError('Unable to parse configuration file properly:'
' {0}'.format(e))
def get_temp_filename(self):
tmpfile = tempfile.NamedTemporaryFile(delete=False).name
return tmpfile
def write_config_file(self, path):
try:
with open(path, 'a+') as f:
self.parser.write(f)
except Exception as e:
raise LinchpinError('Unable to write configuration file:'
' {0}'.format(e))
def create_config(self, config_data=None):
"""
Creates a config object using ConfigParser from the config_data object
"""
if not config_data:
config_data = self.cfg_data
# we know that data is a dict, containing dicts
try:
for k, v in iteritems(config_data):
self.parser.add_section(k)
for kv, vv in iteritems(v):
if type(vv) is not str:
vv = str(vv)
self.parser.set(k, kv, vv)
except ValueError:
pass
| samvarankashyap/linch-pin | linchpin/tests/mockdata/contextdata.py | Python | gpl-3.0 | 4,450 | 0.002472 |
from enum import Enum
import re
class OutputTypes:
"""Class representing visible output types"""
class Types(Enum):
"""Types"""
Stdout = 1
Stderr = 2
Result = 3
Image = 4
Pdf = 5
def __init__(self, types_str):
"""Initialization from string"""
self.__types = self.__parse_types(types_str)
def is_enabled(self, type_str):
"""Checks if given type is visible"""
type_str = type_str.lower()
if type_str == 'stdout':
return OutputTypes.Types.Stdout in self.__types
if type_str == 'stderr':
return OutputTypes.Types.Stderr in self.__types
if 'text' in type_str:
return OutputTypes.Types.Result in self.__types
if 'image' in type_str:
return OutputTypes.Types.Image in self.__types
if 'application/pdf' in type_str:
return OutputTypes.Types.Pdf in self.__types
@staticmethod
def __parse_types(types_str):
"""Parses types"""
if types_str is None:
types_str = 'All'
types = set()
types_tokens = [token.lower() for token in re.findall(r'\w+', types_str)]
if 'stdout' in types_tokens:
types.add(OutputTypes.Types.Stdout)
if 'stderr' in types_tokens:
types.add(OutputTypes.Types.Stderr)
if 'result' in types_tokens:
types.add(OutputTypes.Types.Result)
if 'image' in types_tokens:
types.add(OutputTypes.Types.Image)
if 'pdf' in types_tokens:
types.add(OutputTypes.Types.Pdf)
if 'all' in types_tokens:
types.add(OutputTypes.Types.Stdout)
types.add(OutputTypes.Types.Stderr)
types.add(OutputTypes.Types.Result)
types.add(OutputTypes.Types.Image)
types.add(OutputTypes.Types.Pdf)
return types
| jablonskim/jupyweave | jupyweave/settings/output_types.py | Python | mit | 1,920 | 0.000521 |
# Test for one implementation of the interface
from lexicon.providers.nsone import Provider
from integration_tests import IntegrationTests
from unittest import TestCase
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
class Ns1ProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'nsone'
domain = 'lexicon-example.com'
def _filter_headers(self):
return ['X-NSONE-Key', 'Authorization']
@pytest.mark.skip(reason="can not set ttl when creating/updating records")
def test_Provider_when_calling_list_records_after_setting_ttl(self):
return
# TODO: this should be enabled
@pytest.mark.skip(reason="regenerating auth keys required")
def test_Provider_when_calling_update_record_should_modify_record_name_specified(self):
return
| tnwhitwell/lexicon | tests/providers/test_nsone.py | Python | mit | 972 | 0.003086 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import time
from devil.android import device_errors
from pylib import flag_changer
from pylib.base import base_test_result
from pylib.local.device import local_device_test_run
TIMEOUT_ANNOTATIONS = [
('Manual', 10 * 60 * 60),
('IntegrationTest', 30 * 60),
('External', 10 * 60),
('EnormousTest', 10 * 60),
('LargeTest', 5 * 60),
('MediumTest', 3 * 60),
('SmallTest', 1 * 60),
]
# TODO(jbudorick): Make this private once the instrumentation test_runner is
# deprecated.
def DidPackageCrashOnDevice(package_name, device):
# Dismiss any error dialogs. Limit the number in case we have an error
# loop or we are failing to dismiss.
try:
for _ in xrange(10):
package = device.DismissCrashDialogIfNeeded()
if not package:
return False
# Assume test package convention of ".test" suffix
if package in package_name:
return True
except device_errors.CommandFailedError:
logging.exception('Error while attempting to dismiss crash dialog.')
return False
_CURRENT_FOCUS_CRASH_RE = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
class LocalDeviceInstrumentationTestRun(
local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
super(LocalDeviceInstrumentationTestRun, self).__init__(env, test_instance)
self._flag_changers = {}
def TestPackage(self):
return None
def SetUp(self):
def substitute_external_storage(d, external_storage):
if not d:
return external_storage
elif isinstance(d, list):
return '/'.join(p if p else external_storage for p in d)
else:
return d
def individual_device_set_up(dev, host_device_tuples):
dev.Install(self._test_instance.apk_under_test,
permissions=self._test_instance.apk_under_test_permissions)
dev.Install(self._test_instance.test_apk,
permissions=self._test_instance.test_permissions)
for apk in self._test_instance.additional_apks:
dev.Install(apk)
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, substitute_external_storage(d, external_storage))
for h, d in host_device_tuples]
logging.info('instrumentation data deps:')
for h, d in host_device_tuples:
logging.info('%r -> %r', h, d)
dev.PushChangedFiles(host_device_tuples)
if self._test_instance.flags:
if not self._test_instance.package_info:
logging.error("Couldn't set flags: no package info")
elif not self._test_instance.package_info.cmdline_file:
logging.error("Couldn't set flags: no cmdline_file")
else:
self._flag_changers[str(dev)] = flag_changer.FlagChanger(
dev, self._test_instance.package_info.cmdline_file)
logging.debug('Attempting to set flags: %r',
self._test_instance.flags)
self._flag_changers[str(dev)].AddFlags(self._test_instance.flags)
self._env.parallel_devices.pMap(
individual_device_set_up,
self._test_instance.GetDataDependencies())
def TearDown(self):
def individual_device_tear_down(dev):
if str(dev) in self._flag_changers:
self._flag_changers[str(dev)].Restore()
self._env.parallel_devices.pMap(individual_device_tear_down)
#override
def _CreateShards(self, tests):
return tests
#override
def _GetTests(self):
return self._test_instance.GetTests()
#override
def _GetTestName(self, test):
return '%s#%s' % (test['class'], test['method'])
#override
def _RunTest(self, device, test):
extras = self._test_instance.GetHttpServerEnvironmentVars()
if isinstance(test, list):
if not self._test_instance.driver_apk:
raise Exception('driver_apk does not exist. '
'Please build it and try again.')
def name_and_timeout(t):
n = self._GetTestName(t)
i = self._GetTimeoutFromAnnotations(t['annotations'], n)
return (n, i)
test_names, timeouts = zip(*(name_and_timeout(t) for t in test))
test_name = ','.join(test_names)
target = '%s/%s' % (
self._test_instance.driver_package,
self._test_instance.driver_name)
extras.update(
self._test_instance.GetDriverEnvironmentVars(
test_list=test_names))
timeout = sum(timeouts)
else:
test_name = self._GetTestName(test)
target = '%s/%s' % (
self._test_instance.test_package, self._test_instance.test_runner)
extras['class'] = test_name
timeout = self._GetTimeoutFromAnnotations(test['annotations'], test_name)
logging.info('preparing to run %s: %s', test_name, test)
time_ms = lambda: int(time.time() * 1e3)
start_ms = time_ms()
output = device.StartInstrumentation(
target, raw=True, extras=extras, timeout=timeout, retries=0)
duration_ms = time_ms() - start_ms
# TODO(jbudorick): Make instrumentation tests output a JSON so this
# doesn't have to parse the output.
logging.debug('output from %s:', test_name)
for l in output:
logging.debug(' %s', l)
result_code, result_bundle, statuses = (
self._test_instance.ParseAmInstrumentRawOutput(output))
results = self._test_instance.GenerateTestResults(
result_code, result_bundle, statuses, start_ms, duration_ms)
if DidPackageCrashOnDevice(self._test_instance.test_package, device):
for r in results:
if r.GetType() == base_test_result.ResultType.UNKNOWN:
r.SetType(base_test_result.ResultType.CRASH)
return results
#override
def _ShouldShard(self):
return True
@staticmethod
def _GetTimeoutFromAnnotations(annotations, test_name):
for k, v in TIMEOUT_ANNOTATIONS:
if k in annotations:
timeout = v
break
else:
logging.warning('Using default 1 minute timeout for %s', test_name)
timeout = 60
try:
scale = int(annotations.get('TimeoutScale', 1))
except ValueError as e:
logging.warning("Non-integer value of TimeoutScale ignored. (%s)", str(e))
scale = 1
timeout *= scale
return timeout
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/build/android/pylib/local/device/local_device_instrumentation_test_run.py | Python | mit | 6,414 | 0.011381 |
# -*- coding: utf-8 -*-
from module.plugins.internal.MultiHook import MultiHook
class SmoozedComHook(MultiHook):
__name__ = "SmoozedComHook"
__type__ = "hook"
__version__ = "0.04"
__status__ = "testing"
__config__ = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description__ = """Smoozed.com hook plugin"""
__license__ = "GPLv3"
__authors__ = [("", "")]
def get_hosters(self):
user, info = self.account.select()
return self.account.get_data(user)['hosters']
| fayf/pyload | module/plugins/hooks/SmoozedComHook.py | Python | gpl-3.0 | 876 | 0.025114 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import numpy
from snisi_core.models.Projects import Cluster
# from snisi_core.models.Reporting import ExpectedReporting
# from snisi_tools.caching import descendants_slugs
from snisi_core.indicators import Indicator, gen_report_indicator
from snisi_vacc.models import VaccCovR, AggVaccCovR
from snisi_tools.misc import import_path
logger = logging.getLogger(__name__)
cluster = Cluster.get_or_none("major_vaccine_monthly")
excludes = ['VaccinationIndicator', 'Indicator']
class VaccinationIndicator(Indicator):
INDIVIDUAL_CLS = VaccCovR
AGGREGATED_CLS = AggVaccCovR
def is_hc(self):
''' whether at HealthCenter/Source level or not (above) '''
return self.entity.type.slug == 'health_center'
def should_yesno(self):
return self.is_hc()
def sum_on_hc(self, field):
return sum(self.all_hc_values(field))
def all_hc_values(self, field):
return [getattr(r, field, None)
for r in self.report.indiv_sources.all()]
def sources_average(self, field):
return float(numpy.mean(self.all_hc_values(field)))
def inverse(self, value):
if value < 0:
return float(1 + numpy.abs(value))
else:
return float(1 - value)
gen_shortcut = lambda field, label=None: gen_report_indicator(
field, name=label, report_cls=VaccCovR,
base_indicator_cls=VaccinationIndicator)
gen_shortcut_agg = lambda field, label=None: gen_report_indicator(
field, name=label, report_cls=AggVaccCovR,
base_indicator_cls=VaccinationIndicator)
class BCGCoverage(VaccinationIndicator):
name = ("Taux de couverture BCG")
is_ratio = True
is_geo_friendly = True
geo_section = "Couverture"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.report.bcg_coverage
else:
return self.sources_average('bcg_coverage')
class Polio3Coverage(VaccinationIndicator):
name = ("Taux de couverture Penta-3")
is_ratio = True
is_geo_friendly = True
geo_section = "Couverture"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.report.polio3_coverage
else:
return self.sources_average('polio3_coverage')
class MeaslesCoverage(VaccinationIndicator):
name = ("Taux de couverture VAR-1")
is_ratio = True
is_geo_friendly = True
geo_section = "Couverture"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.report.measles_coverage
else:
return self.sources_average('measles_coverage')
class NonAbandonmentRate(VaccinationIndicator):
name = ("Taux de poursuite (non-abandon) Penta-1 / Penta-3")
is_ratio = True
is_geo_friendly = True
geo_section = "Abandons"
is_yesno = False
def _compute(self):
if self.is_hc():
return self.inverse(self.report.polio3_abandonment_rate)
else:
return self.inverse(
self.sources_average('polio3_abandonment_rate'))
def is_indicator(module, member, only_geo=False):
ind = get_indicator(module, member)
if not getattr(ind, 'SNISI_INDICATOR', None) or member in excludes:
return False
if only_geo and not getattr(ind, 'is_geo_friendly', None):
return False
return True
def get_indicator(module, member):
if module is None:
return member
return getattr(module, member)
def get_geo_indicators():
indicators = {}
# section = get_section("map")
section = import_path('snisi_vacc.indicators')
for indicator_name in dir(section):
if not is_indicator(section, indicator_name, True):
continue
indicator = import_path('snisi_vacc.indicators.{}'
.format(indicator_name))
geo_section = getattr(indicator, 'geo_section', None)
if geo_section not in indicators.keys():
indicators.update({geo_section: []})
spec = indicator.spec()
spec.update({'slug': indicator.__name__})
indicators[geo_section].append(spec)
return indicators
| yeleman/snisi | snisi_vacc/indicators.py | Python | mit | 4,346 | 0.00069 |
if __name__ == '__main__':
print("Loading Modules...")
from setuptools.command import easy_install
def install_with_easyinstall(package):
easy_install.main(["-U", package])
imported = False
tries = 0
while not imported:
try:
import socket, importlib
globals()['PIL'] = importlib.import_module('PIL')
imported = True
except Exception as ex:
print("An error occured when importing PIL: " + str(ex))
tries += 1
if tries == 6:
print("Install Failed.")
while True:
pass
print("Installing PIL... [Try " + str(tries) + "/5]")
try:
install_with_easyinstall('Pillow')
import site, imp
imp.reload(site)
print("PIL installed.")
except Exception as ex:
print("An error occured when installing PIL: " + str(ex))
import time, math, os, queue #, threading
from multiprocessing import Process, Queue, Value, Manager, Array
globals()["exitFlag"] = False
from tkinter import *
import PIL.Image
from PIL import ImageTk
if __name__ == '__main__':
print("All Modules Successfully Loaded!")
print("")
threadnumber = 2
time.sleep(0.5)
def process_data(threadName, q1, q2, im, qlock, ima, rfunc, rerrors, gfunc, gerrors, bfunc, berrors, percent, op):
import math
def funct_if(test,var_true,var_false):
if (test):
return var_true
else:
return var_false
def scale(var_old_min, var_old_max, var_new_min, var_new_max, var_value):
OldSRange = (var_old_max - var_old_min)
NewSRange = (var_new_max - var_new_min)
return (((var_value - var_old_min) * NewSRange) / OldSRange) + var_new_min
def is_even(value_to_test):
return value_to_test % 2 == 0
def draw_funct(dfunction, dxmin, dxmax, dymin, dymax, resolution):
dx = scale(0,canvas_width,dxmin,dxmax,x)
cdy = eval(dfunction)
dx = scale(0,canvas_width,dxmin,dxmax,x-resolution)
pdy = eval(dfunction)
dx = scale(0,canvas_width,dxmin,dxmax,x+resolution)
ndy = eval(dfunction)
cdsy = canvas_height - scale(dymin,dymax,0,canvas_height,cdy)
pdsy = canvas_height - scale(dymin,dymax,0,canvas_height,pdy)
ndsy = canvas_height - scale(dymin,dymax,0,canvas_height,ndy)
dyval = scale(0,canvas_height,dymin,dymax,y)
py = scale(dymin,dymax,0,canvas_height,dyval-resolution)
ny = scale(dymin,dymax,0,canvas_height,dyval+resolution)
#if y - cdsy > py - pdsy and y - cdsy < ny - ndsy:
#if (cdsy - y < pdsy - y and cdsy - y > ndsy - y) or (cdsy - y > pdsy - y and cdsy - y < ndsy - y):
if (0 < pdsy - y and 0 > ndsy - y) or (0 > pdsy - y and 0 < ndsy - y) or round(cdsy - y) == 0:
# print("dx: " + str(dx) + " , dy: " + str(dy))
# if y - dsy < resolution + 1 and y - dsy > 0-(resolution + 1): #round(dsy) == y:
return 255
else:
return 0
red = 0
green = 0
blue = 0
canvas_height = im.height
canvas_width = im.width
OldXRange = (canvas_width - 0)
OldYRange = (canvas_height - 0)
NewRange = (255 - 0)
def pix2index(xpix,ypix):
return ((((canvas_height - ypix - 1)*canvas_width) + (xpix)) * 3) - 3
def getpix(xval,yval):
pixindex = pix2index(xval,yval)
try:
rpix = ima[pixindex]
gpix = ima[pixindex + 1]
bpix = ima[pixindex + 2]
except:
print("ERROR WITH INDEX: " + str(pixindex))
while True:
pass
return (rpix,gpix,bpix)
def setpix(xval,yval,val):
pixindex = pix2index(xval,yval)
ima[pixindex] = val[0]
ima[pixindex + 1] = val[1]
ima[pixindex + 2] = val[2]
print("[" + str(threadName) + "] Started.")
# rfunccomp = eval('lambda: ' + globals()["rfunc"], locals())
# gfunccomp = eval('lambda: ' + globals()["gfunc"], locals())
# bfunccomp = eval('lambda: ' + globals()["bfunc"], locals())
while not im.exitFlag:
gotqdata = False
#queueLock.acquire()
if not q1.empty() and im.currq == 1:
try:
qlock.acquire()
datax = q1.get()
qlock.release()
gotqdata = True
except Exception as ex:
print("Q1Error: " + str(ex))
elif not q2.empty() and im.currq == 2:
try:
qlock.acquire()
datax = q2.get()
qlock.release()
gotqdata = True
except Exception as ex:
print("Q2Error: " + str(ex))
else:
time.sleep(0.1)
if gotqdata:
#queueLock.release()
#print ("%s processing %s" % (threadName, data))
x = datax
#print("[" + str(threadName) + "] Processing " + str(x))
y = canvas_height
while y > 0:
y = y - 1
qlock.acquire()
im.tmppix = im.tmppix + 1
qlock.release()
#print("Solving: " + str(x) + "," + str(y))
value = getpix(x,y)
XValue = round((x * NewRange) / OldXRange)
YValue = round((y * NewRange) / OldYRange)
progress = 255 * (percent.value / 100)
if op == 1:
level = round((value[0]+value[1]+value[2]) / 3)
pixval = (level,level,level)
elif op == 2:
red = value[0]
green = value[1]
blue = value[2]
try:
# r = rfunccomp()
r = eval(rfunc, locals())
except Exception as ex:
print("An Error occured at pixel (" + str(x) + "," + str(y) + "), Colour: " + str(value) + " with the red function: " + rfunc)
print("Error: " + str(ex))
r = 0
rerrors.value = rerrors.value + 1
try:
# g = gfunccomp()
g = eval(gfunc, locals())
except Exception as ex:
print("An Error occured at pixel (" + str(x) + "," + str(y) + "), Colour: " + str(value) + " with the green function: " + gfunc)
print("Error: " + str(ex))
g = 0
gerrors.value = gerrors.value + 1
try:
# b = bfunccomp()
b = eval(bfunc, locals())
except Exception as ex:
print("An Error occured at pixel (" + str(x) + "," + str(y) + "), Colour: " + str(value) + " with the blue function: " + bfunc)
print("Error: " + str(ex))
b = 0
berrors.value = berrors.value + 1
if r < 0:
r = 0
if r > 255:
r = 255
if g < 0:
g = 0
if g > 255:
g = 255
if b < 0:
b = 0
if b > 255:
b = 255
#print(str(red) + "," + str(green) + "," + str(blue) + ";" + str(r) + "," + str(g) + "," + str(b))
pixval = (round(r),round(g),round(b))
else:
pixval = value
# print("Changing pixel (" + str(x) + "," + str(y) + ") from " + str(value) + " to " + str(pixval))
#print("Before: " + str(x) + "," + str(y) + ":" + str(getpix(x,y)))
setpix(x,y,pixval)
#print("After: " + str(x) + "," + str(y) + ":" + str(getpix(x,y)))
else:
#queueLock.release()
pass
#time.sleep(1)
print("[" + str(threadName) + "] Exiting.")
if __name__ == '__main__':
print("""Modes:
0: Generate New Image
1: Load Image from File
""")
source = 0
gotsource = False
while not gotsource:
try:
source = int(input("Mode: "))
if source == 0 or source == 1:
gotsource = True
else:
print("Please enter either 0 or 1")
except:
print("Please enter either 0 or 1")
print("")
if source == 0:
genapproved = ""
while not genapproved.lower() == "y":
print("")
gotdimensions = False
while not gotdimensions:
try:
genheight = int(input("Image Height in Pixels: "))
genwidth = int(input("Image Width in Pixels: "))
if genheight > 0 and genwidth > 0:
gotdimensions = True
else:
print("Please enter a valid integer")
except:
print("Please enter a valid integer")
filename = input("Image name: ")
genapproved = input("Are these settings correct? [Y/N]: ")
print("")
print("Generating Canvas...")
try:
im = PIL.Image.new("RGB",(genwidth,genheight))
except Exception as ex:
print("An error occured when generating a canvas")
print("Error: " + str(ex))
while True:
pass
time.sleep(1)
print("Canvas Generated Successfully")
elif source == 1:
imported = False
while not imported:
try:
filename = input("Image Filename: ")
im = PIL.Image.open(filename)
imported = True
except Exception as ex:
print("An error occured when importing the image: " + str(ex))
else:
print("An Error Occured With Setting The Mode")
while True:
pass
print("""Operations:
0: Nothing
1: Greyscale
2: Custom
""")
opsuccess = False
while not opsuccess:
try:
op = int(input("Operation: "))
if 0 <= op and op <= 2:
opsuccess = True
else:
print("Invalid Op Code")
except:
print("Invalid Op Code")
canvas_height = im.height
canvas_width = im.width
progress = 0
percent = 0
XValue = 0
YValue = 0
x = 0
y = 0
print("")
print("Image Dimensions")
print("Height: " + str(canvas_height))
print("Width: " + str(canvas_width))
print("")
if op == 0:
rfunc = "red"
gfunc = "green"
bfunc = "blue"
elif op == 1:
rfunc = "round((red+green+blue) / 3)"
gfunc = "round((red+green+blue) / 3)"
bfunc = "round((red+green+blue) / 3)"
elif op == 2:
cusapproved = ""
while cusapproved.lower() != "y" :
print("""
Available Varibles:
canvas_height
canvas_width
x
y
progress
percent
XValue
YValue
red
green
blue
Available Functions:
Anything from the math module
funct_if(thing to test,value if true, value if false)
scale(value minimum, value maximum, new minimum, new maximum, value)
is_even(value)
draw_funct(function(use dx instead of x and put in quotation marks), x value minimum, x value maximum, y value minimum, y value maximum, resolution in px)
""")
globals()["rfunc"] = str(input("Red function: "))
globals()["gfunc"] = str(input("Green function: "))
globals()["bfunc"] = str(input("Blue function: "))
cusapproved = input("Are these functions correct? [Y/N]: ")
x = 0
y = 0
pix = 0
tmpx = 0
OldXRange = (im.width - 0)
OldYRange = (im.height - 0)
NewRange = (255 - 0)
print("Starting Conversion...")
starttime = time.time()
manager = Manager()
#threadList = ["Thread-1", "Thread-2", "Thread-3"]
#queueLock = threading.Lock()
# workQueue = queue.Queue(50000)
workQueue1 = manager.Queue(2500)
workQueue2 = manager.Queue(2500)
threads = []
threadID = 1
threadnum = threadnumber
imlist = list(im.tobytes())
#ima = list(im.getdata())
mcim = manager.Namespace()
mcim.exitFlag = False
mcim.tmppix = 0
mcim.currq = 200
mcim.height = im.height
mcim.width = im.width
mcqlock = manager.Lock()
mcima = Array("i",imlist)
rerrors = Value('d', 0)
gerrors = Value('d', 0)
berrors = Value('d', 0)
percent = Value('d', 0)
# Create new threads
print("Starting Processes...")
for tNum in range(threadnum):
#thread = myThread(threadID, "Thread-" + str(threadID), workQueue)
# process_data(threadName, q, im, exitFlag, tmppix, rfunc, rerrors, gfunc, gerrors, bfunc, berrors, percent)
thread = Process(target=process_data, args=("Process-" + str(threadID), workQueue1 , workQueue2, mcim, mcqlock, mcima, rfunc, rerrors, gfunc, gerrors, bfunc, berrors, percent, op,))
thread.start()
threads.append(thread)
threadID += 1
status = Tk()
status.title(string = "Status")
percentchange = 0
totalpix = im.width * im.height
time.sleep(1)
pixtmp = 0
print("Allocating Pixels...")
mcim.currq = 2
while tmpx < im.width:
while (workQueue1.full() and workQueue2.full()) and not (workQueue1.empty() and workQueue2.empty()):
print("FULL: " + str(workQueue1.full() and workQueue2.full()))
print("EMPTY: " + str(not (workQueue1.empty() and workQueue2.empty())))
if workQueue1.full() and workQueue2.empty():
mcim.currq = 1
print("Q1")
elif workQueue2.full() and workQueue1.empty():
mcim.currq = 2
print("Q2")
elif (mcim.currq == 1 and workQueue2.full()) or (mcim.currq == 2 and workQueue1.full()):
time.sleep(0.5)
else:
pass
try:
if mcim.currq == 1:
workQueue2.put(tmpx)
elif mcim.currq == 2:
workQueue1.put(tmpx)
else:
print("invalid currq")
pixtmp += 1
except:
print("put error")
print(str(pixtmp) + "/" + str(im.width))
oldpercent = percent.value
percentl = (mcim.tmppix - 1) / (totalpix / 100)
percent.value = round(percentl,1)
if oldpercent != percent.value:
Label(status,text = (str(percent.value) + "%"), anchor="w").grid(row = 1, column = 1)
status.update()
tmpx = tmpx + 1
print("Finished allocating pixels")
while mcim.tmppix != totalpix:
if workQueue1.empty() and not workQueue2.empty():
mcim.currq = 2
elif not workQueue1.empty() and workQueue2.empty():
mcim.currq = 1
oldpercent = percent.value
percentl = (mcim.tmppix - 1) / (totalpix / 100)
percent.value = round(percentl,1)
if oldpercent != percent.value:
Label(status,text = (str(percent.value) + "%"), anchor="w").grid(row = 1, column = 1)
status.update()
time.sleep(0.1)
print("Queue Size: " + str(workQueue1.qsize() + workQueue2.qsize()) + " , ExitFlag: " + str(mcim.exitFlag) + " , " + str(mcim.tmppix) + "/" + str(totalpix) + " , QSIZE+TMPPIX: " + str((workQueue1.qsize() + workQueue2.qsize())*im.height + mcim.tmppix))
mcim.exitFlag = True
print("Stopping Processes...")
for t in threads:
t.join()
Label(status,text = (str(100.0) + "%"), anchor="w").grid(row = 1, column = 1)
status.update()
#imoutput = mcim.im
imoutput = PIL.Image.new(im.mode,im.size)
imoutput.frombytes(bytes(mcima))
endtime = time.time()
processtime = endtime - starttime
s2m = divmod(processtime, 60)
m2h = divmod(s2m[0], 60)
timeseconds = round(s2m[1],3)
timeminutes = round(m2h[1])
timehours = round(m2h[0])
print("Conversion Completed Successfully in " + str(timehours) + " hours, " + str(timeminutes) + " minutes and " + str(timeseconds) + " seconds.")
time.sleep(0.5)
print()
print("Conversion Summary:")
time.sleep(0.5)
print("Your Red Function: Red = " + str(rfunc) + " had " + str(rerrors.value) + " error(s).")
time.sleep(0.5)
print("Your Green Function: Green = " + str(gfunc) + " had " + str(gerrors.value) + " error(s).")
time.sleep(0.5)
print("Your Blue Function: Blue = " + str(bfunc) + " had " + str(berrors.value) + " error(s).")
print("")
time.sleep(1)
print("Saving...")
savid = 0
saved = False
while not saved:
if not os.path.isfile(filename + "-" + str(savid) + "sav.png"):
imoutput.save(filename + "-" + str(savid) + "sav.png", "PNG")
saved = True
else:
savid = savid + 1
print("Saved as: " + filename + "-" + str(savid) + "sav.png")
status.destroy()
root = Tk()
photo = ImageTk.PhotoImage(imoutput)
canvas = Canvas(width=canvas_width, height=canvas_height, bg='white')
canvas.pack()
canvas.create_image(canvas_width/2, canvas_height/2, image=photo)
root.mainloop()
while True:
pass
| TNT-Samuel/Coding-Projects | Image Test/_ImageEdit3MultiProcess.py | Python | gpl-3.0 | 17,506 | 0.00914 |
# deviceaction.py
# Device modification action classes for anaconda's storage configuration
# module.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
from udev import *
import math
from devices import StorageDevice
from devices import PartitionDevice
from devices import LVMLogicalVolumeDevice
from formats import getFormat
from errors import *
from parted import partitionFlag, PARTITION_LBA
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("storage")
# The values are just hints as to the ordering.
# Eg: fsmod and devmod ordering depends on the mod (shrink -v- grow)
ACTION_TYPE_NONE = 0
ACTION_TYPE_DESTROY = 1000
ACTION_TYPE_RESIZE = 500
ACTION_TYPE_MIGRATE = 250
ACTION_TYPE_CREATE = 100
action_strings = {ACTION_TYPE_NONE: "None",
ACTION_TYPE_DESTROY: "Destroy",
ACTION_TYPE_RESIZE: "Resize",
ACTION_TYPE_MIGRATE: "Migrate",
ACTION_TYPE_CREATE: "Create"}
ACTION_OBJECT_NONE = 0
ACTION_OBJECT_FORMAT = 1
ACTION_OBJECT_DEVICE = 2
object_strings = {ACTION_OBJECT_NONE: "None",
ACTION_OBJECT_FORMAT: "Format",
ACTION_OBJECT_DEVICE: "Device"}
RESIZE_SHRINK = 88
RESIZE_GROW = 89
resize_strings = {RESIZE_SHRINK: "Shrink",
RESIZE_GROW: "Grow"}
def action_type_from_string(type_string):
if type_string is None:
return None
for (k,v) in action_strings.items():
if v.lower() == type_string.lower():
return k
return resize_type_from_string(type_string)
def action_object_from_string(type_string):
if type_string is None:
return None
for (k,v) in object_strings.items():
if v.lower() == type_string.lower():
return k
def resize_type_from_string(type_string):
if type_string is None:
return None
for (k,v) in resize_strings.items():
if v.lower() == type_string.lower():
return k
class DeviceAction(object):
""" An action that will be carried out in the future on a Device.
These classes represent actions to be performed on devices or
filesystems.
The operand Device instance will be modified according to the
action, but no changes will be made to the underlying device or
filesystem until the DeviceAction instance's execute method is
called. The DeviceAction instance's cancel method should reverse
any modifications made to the Device instance's attributes.
If the Device instance represents a pre-existing device, the
constructor should call any methods or set any attributes that the
action will eventually change. Device/DeviceFormat classes should verify
that the requested modifications are reasonable and raise an
exception if not.
Only one action of any given type/object pair can exist for any
given device at any given time. This is enforced by the
DeviceTree.
Basic usage:
a = DeviceAction(dev)
a.execute()
OR
a = DeviceAction(dev)
a.cancel()
XXX should we back up the device with a deep copy for forcibly
cancelling actions?
The downside is that we lose any checking or verification that
would get done when resetting the Device instance's attributes to
their original values.
The upside is that we would be guaranteed to achieve a total
reversal. No chance of, eg: resizes ending up altering Device
size due to rounding or other miscalculation.
"""
type = ACTION_TYPE_NONE
obj = ACTION_OBJECT_NONE
_id = 0
def __init__(self, device):
if not isinstance(device, StorageDevice):
raise ValueError("arg 1 must be a StorageDevice instance")
self.device = device
# Establish a unique id for each action instance. Making shallow or
# deep copyies of DeviceAction instances will require __copy__ and
# __deepcopy__ methods to handle incrementing the id in the copy
self.id = DeviceAction._id
DeviceAction._id += 1
def execute(self, intf=None):
""" perform the action """
pass
def cancel(self):
""" cancel the action """
pass
@property
def isDestroy(self):
return self.type == ACTION_TYPE_DESTROY
@property
def isCreate(self):
return self.type == ACTION_TYPE_CREATE
@property
def isMigrate(self):
return self.type == ACTION_TYPE_MIGRATE
@property
def isResize(self):
return self.type == ACTION_TYPE_RESIZE
@property
def isShrink(self):
return (self.type == ACTION_TYPE_RESIZE and self.dir == RESIZE_SHRINK)
@property
def isGrow(self):
return (self.type == ACTION_TYPE_RESIZE and self.dir == RESIZE_GROW)
@property
def isDevice(self):
return self.obj == ACTION_OBJECT_DEVICE
@property
def isFormat(self):
return self.obj == ACTION_OBJECT_FORMAT
@property
def format(self):
return self.device.format
def __str__(self):
s = "[%d] %s %s" % (self.id, action_strings[self.type],
object_strings[self.obj])
if self.isResize:
s += " (%s)" % resize_strings[self.dir]
if self.isFormat:
s += " %s" % self.format.desc
if self.isMigrate:
s += " to %s" % self.format.migrationTarget
s += " on"
s += " %s %s (id %d)" % (self.device.type, self.device.name,
self.device.id)
return s
def requires(self, action):
""" Return True if self requires action. """
return False
def obsoletes(self, action):
""" Return True is self obsoletes action.
DeviceAction instances obsolete other DeviceAction instances with
lower id and same device.
"""
return (self.device.id == action.device.id and
self.type == action.type and
self.obj == action.obj and
self.id > action.id)
class ActionCreateDevice(DeviceAction):
""" Action representing the creation of a new device. """
type = ACTION_TYPE_CREATE
obj = ACTION_OBJECT_DEVICE
def __init__(self, device):
if device.exists:
raise ValueError("device already exists")
# FIXME: assert device.fs is None
DeviceAction.__init__(self, device)
def execute(self, intf=None):
self.device.create(intf=intf)
def requires(self, action):
""" Return True if self requires action.
Device create actions require other actions when either of the
following is true:
- this action's device depends on the other action's device
- both actions are partition create actions on the same disk
and this partition has a higher number
"""
rc = False
if self.device.dependsOn(action.device):
rc = True
elif (action.isCreate and action.isDevice and
isinstance(self.device, PartitionDevice) and
isinstance(action.device, PartitionDevice) and
self.device.disk == action.device.disk):
# create partitions in ascending numerical order
selfNum = self.device.partedPartition.number
otherNum = action.device.partedPartition.number
if selfNum > otherNum:
rc = True
elif (action.isCreate and action.isDevice and
isinstance(self.device, LVMLogicalVolumeDevice) and
isinstance(action.device, LVMLogicalVolumeDevice) and
self.device.vg == action.device.vg and
action.device.singlePV and not self.device.singlePV):
rc = True
return rc
class ActionDestroyDevice(DeviceAction):
""" An action representing the deletion of an existing device. """
type = ACTION_TYPE_DESTROY
obj = ACTION_OBJECT_DEVICE
def __init__(self, device):
# XXX should we insist that device.fs be None?
DeviceAction.__init__(self, device)
if device.exists:
device.teardown()
def execute(self, intf=None):
self.device.destroy()
# Make sure libparted does not keep cached info for this device
# and returns it when we create a new device with the same name
if self.device.partedDevice:
self.device.partedDevice.removeFromCache()
def requires(self, action):
""" Return True if self requires action.
Device destroy actions require other actions when either of the
following is true:
- the other action's device depends on this action's device
- both actions are partition create actions on the same disk
and this partition has a lower number
"""
rc = False
if action.device.dependsOn(self.device) and action.isDestroy:
rc = True
elif (action.isDestroy and action.isDevice and
isinstance(self.device, PartitionDevice) and
isinstance(action.device, PartitionDevice) and
self.device.disk == action.device.disk):
# remove partitions in descending numerical order
selfNum = self.device.partedPartition.number
otherNum = action.device.partedPartition.number
if selfNum < otherNum:
rc = True
elif (action.isDestroy and action.isFormat and
action.device.id == self.device.id):
# device destruction comes after destruction of device's format
rc = True
return rc
def obsoletes(self, action):
""" Return True if self obsoletes action.
- obsoletes all actions w/ lower id that act on the same device,
including self, if device does not exist
- obsoletes all but ActionDestroyFormat actions w/ lower id on the
same device if device exists
"""
rc = False
if action.device.id == self.device.id:
if self.id >= action.id and not self.device.exists:
rc = True
elif self.id > action.id and \
self.device.exists and \
not (action.isDestroy and action.isFormat):
rc = True
return rc
class ActionResizeDevice(DeviceAction):
""" An action representing the resizing of an existing device. """
type = ACTION_TYPE_RESIZE
obj = ACTION_OBJECT_DEVICE
def __init__(self, device, newsize):
if not device.resizable:
raise ValueError("device is not resizable")
if long(math.floor(device.currentSize)) == newsize:
raise ValueError("new size same as old size")
DeviceAction.__init__(self, device)
if newsize > long(math.floor(device.currentSize)):
self.dir = RESIZE_GROW
else:
self.dir = RESIZE_SHRINK
if device.targetSize > 0:
self.origsize = device.targetSize
else:
self.origsize = device.size
self.device.targetSize = newsize
def execute(self, intf=None):
self.device.resize(intf=intf)
def cancel(self):
self.device.targetSize = self.origsize
def requires(self, action):
""" Return True if self requires action.
A device resize action requires another action if:
- the other action is a format resize on the same device and
both are shrink operations
- the other action grows a device (or format it contains) that
this action's device depends on
- the other action shrinks a device (or format it contains)
that depends on this action's device
"""
retval = False
if action.isResize:
if self.device.id == action.device.id and \
self.dir == action.dir and \
action.isFormat and self.isShrink:
retval = True
elif action.isGrow and self.device.dependsOn(action.device):
retval = True
elif action.isShrink and action.device.dependsOn(self.device):
retval = True
return retval
class ActionCreateFormat(DeviceAction):
""" An action representing creation of a new filesystem. """
type = ACTION_TYPE_CREATE
obj = ACTION_OBJECT_FORMAT
def __init__(self, device, format=None):
DeviceAction.__init__(self, device)
if format:
self.origFormat = device.format
if self.device.format.exists:
self.device.format.teardown()
self.device.format = format
else:
self.origFormat = getFormat(None)
def execute(self, intf=None):
self.device.setup()
if isinstance(self.device, PartitionDevice):
for flag in partitionFlag.keys():
# Keep the LBA flag on pre-existing partitions
if flag in [ PARTITION_LBA, self.format.partedFlag ]:
continue
self.device.unsetFlag(flag)
if self.format.partedFlag is not None:
self.device.setFlag(self.format.partedFlag)
if self.format.partedSystem is not None:
self.device.partedPartition.system = self.format.partedSystem
self.device.disk.format.commitToDisk()
self.device.format.create(intf=intf,
device=self.device.path,
options=self.device.formatArgs)
# Get the UUID now that the format is created
udev_settle()
self.device.updateSysfsPath()
info = udev_get_block_device(self.device.sysfsPath)
self.device.format.uuid = udev_device_get_uuid(info)
def cancel(self):
self.device.format = self.origFormat
def requires(self, action):
""" Return True if self requires action.
Format create action can require another action if:
- this action's device depends on the other action's device
and the other action is not a device destroy action
- the other action is a create or resize of this action's
device
"""
return ((self.device.dependsOn(action.device) and
not (action.isDestroy and action.isDevice)) or
(action.isDevice and (action.isCreate or action.isResize) and
self.device.id == action.device.id))
def obsoletes(self, action):
""" Return True if this action obsoletes action.
Format create actions obsolete the following actions:
- format actions w/ lower id on this action's device, other
than those that destroy existing formats
"""
return (self.device.id == action.device.id and
self.obj == action.obj and
not (action.isDestroy and action.format.exists) and
self.id > action.id)
class ActionDestroyFormat(DeviceAction):
""" An action representing the removal of an existing filesystem. """
type = ACTION_TYPE_DESTROY
obj = ACTION_OBJECT_FORMAT
def __init__(self, device):
DeviceAction.__init__(self, device)
self.origFormat = self.device.format
if device.format.exists:
device.format.teardown()
self.device.format = None
def execute(self, intf=None):
""" wipe the filesystem signature from the device """
self.device.setup(orig=True)
self.format.destroy()
udev_settle()
self.device.teardown()
def cancel(self):
self.device.format = self.origFormat
@property
def format(self):
return self.origFormat
def requires(self, action):
""" Return True if self requires action.
Format destroy actions require other actions when:
- the other action's device depends on this action's device
and the other action is a destroy action
"""
return action.device.dependsOn(self.device) and action.isDestroy
def obsoletes(self, action):
""" Return True if this action obsoletes action.
Format destroy actions obsolete the following actions:
- format actions w/ lower id on same device, including self if
format does not exist
- format destroy action on a non-existent format shouldn't
obsolete a format destroy action on an existing one
"""
return (self.device.id == action.device.id and
self.obj == self.obj and
(self.id > action.id or
(self.id == action.id and not self.format.exists)) and
not (action.format.exists and not self.format.exists))
class ActionResizeFormat(DeviceAction):
""" An action representing the resizing of an existing filesystem.
XXX Do we even want to support resizing of a filesystem without
also resizing the device it resides on?
"""
type = ACTION_TYPE_RESIZE
obj = ACTION_OBJECT_FORMAT
def __init__(self, device, newsize):
if not device.format.resizable:
raise ValueError("format is not resizable")
if long(math.floor(device.format.currentSize)) == newsize:
raise ValueError("new size same as old size")
DeviceAction.__init__(self, device)
if newsize > long(math.floor(device.format.currentSize)):
self.dir = RESIZE_GROW
else:
self.dir = RESIZE_SHRINK
self.origSize = self.device.format.targetSize
self.device.format.targetSize = newsize
def execute(self, intf=None):
self.device.setup(orig=True)
self.device.format.doResize(intf=intf)
def cancel(self):
self.device.format.targetSize = self.origSize
def requires(self, action):
""" Return True if self requires action.
A format resize action requires another action if:
- the other action is a device resize on the same device and
both are grow operations
- the other action shrinks a device (or format it contains)
that depends on this action's device
- the other action grows a device (or format) that this
action's device depends on
"""
retval = False
if action.isResize:
if self.device.id == action.device.id and \
self.dir == action.dir and \
action.isDevice and self.isGrow:
retval = True
elif action.isShrink and action.device.dependsOn(self.device):
retval = True
elif action.isGrow and self.device.dependsOn(action.device):
retval = True
return retval
class ActionMigrateFormat(DeviceAction):
""" An action representing the migration of an existing filesystem. """
type = ACTION_TYPE_MIGRATE
obj = ACTION_OBJECT_FORMAT
def __init__(self, device):
if not device.format.migratable or not device.format.exists:
raise ValueError("device format is not migratable")
DeviceAction.__init__(self, device)
self.device.format.migrate = True
def execute(self, intf=None):
self.device.setup(orig=True)
self.device.format.doMigrate(intf=intf)
def cancel(self):
self.device.format.migrate = False
| kalev/anaconda | pyanaconda/storage/deviceaction.py | Python | gpl-2.0 | 20,775 | 0.000722 |
from django.db import models
# Create your models here.
class Autor(models.Model):
nombre = models.CharField(max_length=50)
edad = models.IntegerField(null=True, blank=True)
email = models.EmailField()
def __unicode__(self):
return self.nombre
class Meta:
verbose_name_plural = "Autores"
class Articulo(models.Model):
autor = models.ForeignKey('Autor', null=True)
titulo = models.CharField(max_length=100)
texto = models.TextField(blank=True, null=True)
created = models.DateTimeField('Agregado',auto_now_add=True, null=True, blank=True)
def __unicode__(self):
return self.titulo
| melizeche/PyBlog | blog/models.py | Python | gpl-2.0 | 607 | 0.039539 |
from django.db import models
from django import forms
class installed(models.Model):
name = models.CharField(max_length=300)
active = models.CharField(max_length=300)
class vectors(models.Model):
name = models.CharField(max_length=300)
active = models.CharField(max_length=300)
class iptrack(models.Model):
address = models.CharField(max_length=300)
mac = models.CharField(max_length=300)
os = models.CharField(max_length=300, default = 'unknown')
osdetails = models.CharField(max_length=300)
injected = models.CharField(max_length=300)
expand = models.CharField(max_length=300, default = '0')
class scan(models.Model):
address = models.CharField(max_length=300)
ports = models.CharField(max_length=300)
osdetails = models.CharField(max_length=300)
hostname = models.CharField(max_length=300)
scanning = models.CharField(max_length=300, default = '0')
class apgen(models.Model):
essid = models.CharField(max_length=300)
channel = models.CharField(max_length=300)
atknic = models.CharField(max_length=300)
netnic = models.CharField(max_length=300)
class arppoison(models.Model):
target = models.CharField(max_length=300, default = 'none')
method = models.CharField(max_length=300, default = 'none')
class sessions(models.Model):
source = models.CharField(max_length=300)
session = models.CharField(max_length=300)
date = models.CharField(max_length=300)
| 0sm0s1z/subterfuge | modules/models.py | Python | gpl-3.0 | 1,614 | 0.02912 |
"""
Course rerun page in Studio
"""
from .course_page import CoursePage
from .utils import set_input_value
class CourseRerunPage(CoursePage):
"""
Course rerun page in Studio
"""
url_path = "course_rerun"
COURSE_RUN_INPUT = '.rerun-course-run'
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the course rerun page.
"""
return self.q(css='body.view-course-create-rerun').present
@property
def course_run(self):
"""
Returns the value of the course run field.
"""
return self.q(css=self.COURSE_RUN_INPUT).text[0]
@course_run.setter
def course_run(self, value):
"""
Sets the value of the course run field.
"""
set_input_value(self, self.COURSE_RUN_INPUT, value)
def create_rerun(self):
"""
Clicks the create rerun button.
"""
self.q(css='.rerun-course-save')[0].click()
| ahmadiga/min_edx | common/test/acceptance/pages/studio/course_rerun.py | Python | agpl-3.0 | 971 | 0 |
'''
Copyright 2010-2013 DIMA Research Group, TU Berlin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Dec 15, 2011
@author: Alexander Alexandrov <alexander.alexandrov@tu-berlin.de>
'''
from myriad.compiler.visitor import AbstractVisitor
class PrintVisitor(AbstractVisitor):
'''
classdocs
'''
__indent = 0
__indentPrefix = " "
def __init__(self, *args, **kwargs):
super(PrintVisitor, self).__init__(*args, **kwargs)
def traverse(self, node):
print "~" * 160
node.accept(self)
print "~" * 160
def _preVisitAbstractNode(self, node):
if (len(node.allAttributes()) == 0):
# print node with attributes
print "%s+ %s" % (self.__indentPrefix * self.__indent, node.__class__.__name__)
else:
# print node without attributes
print "%s+ %s {" % (self.__indentPrefix * self.__indent, node.__class__.__name__)
for (k, v) in node.allAttributes().iteritems():
print "%s'%s': '%s'," % (self.__indentPrefix * (self.__indent + 3), k, v)
print "%s}" % (self.__indentPrefix * (self.__indent + 2))
self._increaseIndent()
def _postVisitAbstractNode(self, node):
self._decreaseIndent()
# def _preVisitSetItemNode(self, node):
# pass
#
# def _postVisitSetItemNode(self, node):
# pass
def _increaseIndent(self):
self.__indent = self.__indent + 1
def _decreaseIndent(self):
self.__indent = self.__indent - 1
| codeaudit/myriad-toolkit | tools/python/myriad/compiler/debug.py | Python | apache-2.0 | 2,056 | 0.006323 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
### ebrpc
## query = ebencode({'y':'q', 'q':'<method>', 'a':[<params>])
## response = ebencode({'y':'r', 'r':<return value>}}
## fault = ebencode({'y':'e','c':'<fault code>', 's':'<fault string>'
from xmlrpclib import Error, Fault
from types import TupleType
from BTL.ebencode import ebencode, ebdecode
def dump_fault(code, msg):
return ebencode({'y':'e', 'c':code, 's':msg})
def dumps(params, methodname=None, methodresponse=None, encoding=None, allow_none=False):
if methodresponse and isinstance(params, TupleType):
assert len(params) == 1, "response tuple must be a singleton"
if methodname:
out = ebencode({'y':'q', 'q':methodname, 'a':params})
elif isinstance(params, Fault):
out = ebencode({'y':'e', 'c':params.faultCode, 's':params.faultString})
elif methodresponse:
out = ebencode({'y':'r', 'r':params[0]})
else:
raise Error("")
return out
def loads(data):
d = ebdecode(data)
if d['y'] == 'e':
raise Fault(d['c'], d['s']) # the server raised a fault
elif d['y'] == 'r':
# why is this return value so weird?
# because it's the way that loads works in xmlrpclib
return (d['r'],), None
elif d['y'] == 'q':
return d['a'], d['q']
raise ValueError
class DFault(Exception):
"""Indicates an Datagram EBRPC fault package."""
# If you return a DFault with tid=None from within a function called via
# twispread's TEBRPC.callRemote then TEBRPC will insert the tid for the call.
def __init__(self, faultCode, faultString, tid=None):
self.faultCode = faultCode
self.faultString = faultString
self.tid = tid
self.args = (faultCode, faultString)
def __repr__(self):
return (
"<Fault %s: %s>" %
(self.faultCode, repr(self.faultString))
)
### datagram interface
### has transaction ID as third return valuebt
### slightly different API, returns a tid as third argument in query/response
def dumpd(params, methodname=None, methodresponse=None, encoding=None, allow_none=False, tid=None):
assert tid is not None, "need a transaction identifier"
if methodname:
out = ebencode({'y':'q', 't':tid, 'q':methodname, 'a':params})
elif isinstance(params, DFault):
out = ebencode({'y':'e', 't':tid, 'c':params.faultCode, 's':params.faultString})
elif methodresponse:
out = ebencode({'y':'r', 't':tid, 'r':params})
else:
raise Error("")
return out
def loadd(data):
d = ebdecode(data)
if d['y'] == 'e':
raise DFault(d['c'], d['s'], d['t'])
elif d['y'] == 'r':
return d['r'], None, d['t']
elif d['y'] == 'q':
return d['a'], d['q'], d['t']
raise ValueError
| kenorb-contrib/BitTorrent | python_bt_codebase_library/BTL/ebrpc.py | Python | gpl-3.0 | 3,425 | 0.012555 |
from openerp import api, models, fields, SUPERUSER_ID
class reminder(models.AbstractModel):
_name = 'reminder'
_reminder_date_field = 'date'
_reminder_description_field = 'description'
# res.users or res.partner fields
_reminder_attendees_fields = ['user_id']
reminder_event_id = fields.Many2one('calendar.event',
string='Reminder Calendar Event')
reminder_alarm_ids = fields.Many2many('calendar.alarm', string='Reminders',
related='reminder_event_id.alarm_ids')
@api.one
def _get_reminder_event_name(self):
return '%s: %s' % (self._description, self.display_name)
@api.model
def _create_reminder_event(self):
vals = {
'reminder_res_model': self._name,
# dummy values
'name': 'TMP NAME',
'allday': True,
'start_date': fields.Date.today(),
'stop_date': fields.Date.today(),
}
event = self.env['calendar.event'].with_context({}).create(vals)
return event
@api.model
def _init_reminder(self):
domain = [(self._reminder_date_field, '!=', False)]
self.search(domain)._do_update_reminder()
@api.one
def _update_reminder(self, vals):
if self._context.get('do_not_update_reminder'):
# ignore own calling of write function
return
if not vals:
return
if not self.reminder_event_id and self._reminder_date_field not in vals:
# don't allow to create reminder if date is not set
return
fields = ['reminder_alarm_ids',
self._reminder_date_field,
self._reminder_description_field]
if not any([k in vals for k in fields if k]):
return
self._do_update_reminder(update_date=self._reminder_date_field in vals)
@api.one
def _do_update_reminder(self, update_date=True):
vals = {'name': self._get_reminder_event_name()[0]}
event = self.reminder_event_id
if not event:
event = self._create_reminder_event()
self.with_context(do_not_update_reminder=True).write({'reminder_event_id': event.id})
if not event.reminder_res_id:
vals['reminder_res_id'] = self.id
if update_date:
fdate = self._fields[self._reminder_date_field]
fdate_value = getattr(self, self._reminder_date_field)
if not fdate_value:
event.unlink()
return
if fdate.type == 'date':
vals.update({
'allday': True,
'start_date': fdate_value,
'stop_date': fdate_value,
})
elif fdate.type == 'datetime':
vals.update({
'allday': False,
'start_datetime': fdate_value,
'stop_datetime': fdate_value,
})
if self._reminder_description_field:
vals['description'] = getattr(self, self._reminder_description_field)
if self._reminder_attendees_fields:
partner_ids = []
for field_name in self._reminder_attendees_fields:
field = self._columns[field_name]
partner = getattr(self, field_name)
model = None
try:
model = field.comodel_name
except AttributeError:
model = field._obj # v7
if model == 'res.users':
partner = partner.partner_id
if partner.id not in partner_ids:
partner_ids.append(partner.id)
vals['partner_ids'] = [(6, 0, partner_ids)]
event.write(vals)
@api.model
def _check_and_create_reminder_event(self, vals):
fields = [self._reminder_date_field]
if any([k in vals for k in fields]):
event = self._create_reminder_event()
vals['reminder_event_id'] = event.id
return vals
@api.model
def create(self, vals):
vals = self._check_and_create_reminder_event(vals)
res = super(reminder, self).create(vals)
res._update_reminder(vals)
return res
@api.one
def write(self, vals):
if not self.reminder_event_id:
vals = self._check_and_create_reminder_event(vals)
res = super(reminder, self).write(vals)
self._update_reminder(vals)
return res
class calendar_event(models.Model):
_inherit = 'calendar.event'
reminder_res_model = fields.Char('Related Document Model for reminding')
reminder_res_id = fields.Integer('Related Document ID for reminding')
@api.multi
def open_reminder_object(self):
r = self[0]
target = self._context.get('target', 'current')
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': r.reminder_res_model,
'res_id': r.reminder_res_id,
'views': [(False, 'form')],
'target': target,
}
class reminder_admin_wizard(models.TransientModel):
_name = 'reminder.admin'
model = fields.Selection(string='Model', selection='_get_model_list', required=True)
events_count = fields.Integer(string='Count of calendar records', compute='_get_events_count')
action = fields.Selection(string='Action', selection=[('create', 'Create Calendar Records'), ('delete', 'Delete Calendar Records')],
required=True, default='create',)
def _get_model_list(self):
res = []
for r in self.env['ir.model.fields'].search([('name', '=', 'reminder_event_id')]):
if r.model_id.model == 'reminder':
# ignore abstract class
continue
res.append( (r.model_id.model, r.model_id.name) )
return res
@api.onchange('model')
@api.one
def _get_events_count(self):
count = 0
if self.model:
count = self.env['calendar.event'].search_count([('reminder_res_model', '=', self.model)])
self.events_count = count
@api.one
def action_execute(self):
if self.action == 'delete':
self.env['calendar.event'].search([('reminder_res_model', '=', self.model)]).unlink()
elif self.action == 'create':
self.env[self.model]._init_reminder()
| Trust-Code/addons-yelizariev | reminder_base/reminder_base_models.py | Python | lgpl-3.0 | 6,555 | 0.001831 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from astropy.utils.decorators import wraps
from astropy.utils.misc import isiterable
from .core import Unit, UnitBase, UnitsError, add_enabled_equivalencies
from .physical import _unit_physical_mapping
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try: # unit passed in as a string
target_unit = Unit(target)
except ValueError:
try: # See if the function writer specified a physical type
physical_type_id = _unit_physical_mapping[target]
except KeyError: # Function argument target is invalid
raise ValueError("Invalid unit or physical type '{}'."
.format(target))
# get unit directly from physical type id
target_unit = Unit._from_physical_type_id(physical_type_id)
allowed_units.append(target_unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError("Argument '{}' to function '{}' has {}. "
"You may want to pass in an astropy Quantity instead."
.format(param_name, func_name, error_msg))
else:
if len(targets) > 1:
raise UnitsError("Argument '{}' to function '{}' must be in units"
" convertible to one of: {}."
.format(param_name, func_name,
[str(targ) for targ in targets]))
else:
raise UnitsError("Argument '{}' to function '{}' must be in units"
" convertible to '{}'."
.format(param_name, func_name,
str(targets[0])))
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the decorator,
or by using function annotation syntax. Arguments to the decorator
take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator
or in the annotation.
If the argument has no unit attribute, i.e. it is not a Quantity object, a
`ValueError` will be raised unless the argument is an annotation. This is to
allow non Quantity annotations to pass through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if param.name not in bound_args.arguments and param.default is not param.empty:
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isiterable(targets):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [t for t in valid_targets if isinstance(t, (str, UnitBase))]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(param.name, wrapped_function.__name__,
arg, valid_targets, self.equivalencies)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
if wrapped_signature.return_annotation not in (inspect.Signature.empty, None):
return return_.to(wrapped_signature.return_annotation)
else:
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
| stargaser/astropy | astropy/units/decorators.py | Python | bsd-3-clause | 9,242 | 0.001839 |
username = "x"
password = "x"
subreddit = "x"
client_id = "x"
| ciaranlangton/reddit-cxlive-bot | config.py | Python | mit | 62 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved
# $Pedro Gómez$ <pegomez@elnogal.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sales_mrp_forecast
import sales_stock_forecast
| Comunitea/CMNT_00040_2016_ELN_addons | sales_mrp_stock_forecast_link/wizard/__init__.py | Python | agpl-3.0 | 1,048 | 0 |
from django.contrib import admin
from .models import Type_Of_Crime
admin.site.register(Type_Of_Crime)
# Register your models here.
| shudwi/CrimeMap | Type_Of_Crime/admin.py | Python | gpl-3.0 | 131 | 0 |
import numpy as np
from sklearn import cluster, datasets, preprocessing
import pickle
import gensim
import time
import re
import tokenize
from scipy import spatial
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
def combine(v1,v2):
A = np.add(v1,v2)
M = np.multiply(A,A)
lent=0
for i in M:
lent+=i
return np.divide(A,lent)
# 3M word google dataset of pretrained 300D vectors
model = gensim.models.Word2Vec.load_word2vec_format('vectors.bin', binary=True)
model.init_sims(replace=True)
#### getting all vecs from w2v using the inbuilt syn0 list see code
# X_Scaled_Feature_Vecs = []
# for w in model.vocab:
# X_Scaled_Feature_Vecs.append(model.syn0[model.vocab[w].index])
# model.syn0 = X_Scaled_Feature_Vecs
# X_Scaled_Feature_Vecs = None
# X_Scaled_Feature_Vecs = model.syn0
# ### scaling feature vecs
# min_max_scaler = preprocessing.MinMaxScaler()
# X_Scaled_Feature_Vecs = min_max_scaler.fit_transform(X)
# X_Scaled_Feature_Vecs = X
# W2V = dict(zip(model.vocab, X_Scaled_Feature_Vecs))
#Cosine Distance
# from scipy import spatial
# dataSetI = model["travel"]
# dataSetII = model["travelling"]
# result = 1 - spatial.distance.cosine(dataSetI, dataSetII)
# print(result)
X_Scaled_Feature_Vecs=[]
for word in model.vocab:
X_Scaled_Feature_Vecs.append(model[word])
# ######## Interested Categories
cat = ["advertising","beauty","business","celebrity","diy craft","entertainment","family","fashion","food","general","health","lifestyle","music","news","pop","culture","social","media","sports","technology","travel","video games"]
nums = range(0,22)
num2cat = dict(zip(nums, cat))
# new Categories Seeds (787 seeds) DICT [seed: cat]
Word2CatMap = load_obj("baseWord2CatMap")
baseWords = Word2CatMap.keys()
catVec=[]
newBaseWords =[]
# load from C file output
for bw in baseWords:
try:
catVec.append(np.array(model[bw]))
newBaseWords.append(bw)
except:
words = bw.split()
try:
vec = np.array(model[words[0]])
for word in words[1:]:
try:
vec = combine(vec,np.array(model[word]))
except:
#print(word + " Skipped!")
continue
catVec.append(vec)
newBaseWords.append(bw)
except:
#print(words)
continue
# print(len(catVec))
# print(len(newBaseWords))
#cluster Size
# newBaseWords has the list of new base words that are in word2vec vocab
k = len(catVec)
# form a num(k) to cat(22) mapping
numK2CatMap = dict()
for w in newBaseWords:
numK2CatMap[newBaseWords.index(w)] = Word2CatMap[w]
# kmeans
##### better code
t0 = time.time()
# Assign Max_Iter to 1 (ONE) if u just want to fit vectors around seeds
kmeans = cluster.KMeans(n_clusters=k, init=np.array(catVec), max_iter=1).fit(X_Scaled_Feature_Vecs)
#kmeans = cluster.KMeans(n_clusters=22, init=np.array(catVec), max_iter=900).fit(X_Scaled_Feature_Vecs)
print(str(time.time()-t0))
print(kmeans.inertia_)
###### After Fiting the Cluster Centers are recomputed : update catVec (Order Preserved)
catVec = kmeans.cluster_centers_
# #test
# for c in catVec:
# print(num2cat[kmeans.predict(c)[0]])
##### save best for future use
save_obj(kmeans,"clusterLarge")
KM = kmeans
# Cluster_lookUP = dict(zip(model.vocab, KM.labels_))
Cluster_lookUP = dict()
Cluster_KlookUP = dict()
for word in model.vocab:
kmap = KM.predict(model[word])[0]
Cluster_lookUP[word] = numK2CatMap[kmap]
Cluster_KlookUP[word] = kmap
## Precomputing the cosine similarities
Cosine_Similarity = dict()
for k in Cluster_lookUP.keys():
# if len(Cluster_lookUP[k]) == 1:
Cosine_Similarity[k] = 1 - spatial.distance.cosine(model[k], catVec[Cluster_KlookUP[k]])
# else:
# Cosine_Similarity[k] = [1 - spatial.distance.cosine(model[k], catVec[wk]) for wk in Cluster_KlookUP[k]]
#check
print(num2cat[Cluster_lookUP["flight"][0]] + " "+str(Cosine_Similarity["flight"]))
print(num2cat[Cluster_lookUP["gamecube"][0]] +" "+str(Cosine_Similarity["gamecube"]))
#Saving Models
# for 22 topics
save_obj(Cluster_lookUP,"Cluster_lookUP")
save_obj(Cosine_Similarity,"Cosine_Similarity")
save_obj(num2cat,"num2cat")
save_obj(catVec,"catVec")
save_obj(numK2CatMap,"numK2CatMap") | tpsatish95/Topic-Modeling-Social-Network-Text-Data | Kseeds/modifiedCluster.py | Python | apache-2.0 | 4,434 | 0.016013 |
#!/usr/bin/env python
import json
import os
import subprocess
def normalize_target(target):
if ':' in target: return target
return target + ':' + os.path.basename(target)
def gn_desc(root_out_dir, target, *what_to_show):
# gn desc may fail transiently for an unknown reason; retry loop
for i in xrange(2):
desc = subprocess.check_output([
os.path.join(os.environ['FUCHSIA_DIR'], 'buildtools', 'gn'), 'desc',
root_out_dir, '--format=json', target
] + list(what_to_show))
try:
output = json.loads(desc)
break
except ValueError:
if i >= 1:
print 'Failed to describe target ', target, '; output: ', desc
raise
if target not in output:
target = normalize_target(target)
return output[target]
| ianloic/fuchsia-sdk | scripts/common.py | Python | apache-2.0 | 848 | 0.004717 |
import sys, pickle, copy
import numpy as np
import matplotlib.pyplot as pl
import astropy.io.fits as pyfits
import magellanic.regionsed as rsed
import magellanic.mcutils as utils
from magellanic.lfutils import *
try:
import fsps
from sedpy import observate
except ImportError:
#you wont be able to predict the integrated spectrum or magnitudes
# filterlist must be set to None in calls to total_cloud_data
sps = None
wlengths = {'2': '{4.5\mu m}',
'4': '{8\mu m}'}
dmod = {'smc':18.9,
'lmc':18.5}
cloud_info = {}
cloud_info['smc'] = [utils.smc_regions(), 20, 23, [7, 13, 16], [3,5,6]]
cloud_info['lmc'] = [utils.lmc_regions(), 48, 38, [7, 11, 13, 16], [3,4,5,6]]
def total_cloud_data(cloud, filternames = None, basti=False,
lfstring=None, agb_dust=1.0,
one_metal=None):
#########
# SPS
#########
#
if filternames is not None:
sps = fsps.StellarPopulation(add_agb_dust_model=True)
sps.params['sfh'] = 0
sps.params['agb_dust'] = agb_dust
dust = ['nodust', 'agbdust']
sps.params['imf_type'] = 0.0 #salpeter
filterlist = observate.load_filters(filternames)
else:
filterlist = None
##########
# SFHs
##########
regions, nx, ny, zlist, zlist_basti = cloud_info[cloud.lower()]
if basti:
zlist = basti_zlist
if 'header' in regions.keys():
rheader = regions.pop('header') #dump the header info from the reg. dict
total_sfhs = None
for n, dat in regions.iteritems():
total_sfhs = sum_sfhs(total_sfhs, dat['sfhs'])
total_zmet = dat['zmet']
#collapse SFHs to one metallicity
if one_metal is not None:
ts = None
for sfh in total_sfhs:
ts = sum_sfhs(ts, sfh)
total_sfh = ts
zlist = [zlist[one_metal]]
total_zmet = [total_zmet[one_metal]]
#############
# LFs
############
bins = rsed.lfbins
if lfstring is not None:
# these are stored as a list of different metallicities
lffiles = [lfstring.format(z) for z in zlist]
lf_base = [read_villaume_lfs(f) for f in lffiles]
#get LFs broken out by age and metallicity as well as the total
lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(total_sfhs), lf_base)
else:
lfs_zt, lf, logages = None, None, None
###########
# SED
############
if filterlist is not None:
spec, wave, mass = rsed.one_region_sed(copy.deepcopy(total_sfhs), total_zmet, sps)
mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist)
maggies = 10**(-0.4 * np.atleast_1d(mags))
else:
maggies, mass = None, None
#############
# Write output
############
total_values = {}
total_values['agb_clf'] = lf
total_values['agb_clfs_zt'] = lfs_zt
total_values['clf_mags'] = bins
total_values['logages'] = logages
total_values['sed_ab_maggies'] = maggies
total_values['sed_filters'] = filternames
total_values['lffile'] = lfstring
total_values['mstar'] = mass
total_values['zlist'] = zlist
return total_values, total_sfhs
def sum_sfhs(sfhs1, sfhs2):
"""
Accumulate individual sets of SFHs into a total set of SFHs. This
assumes that the individual SFH sets all have the same number and
order of metallicities, and the same time binning.
"""
if sfhs1 is None:
return copy.deepcopy(sfhs2)
elif sfhs2 is None:
return copy.deepcopy(sfhs1)
else:
out = copy.deepcopy(sfhs1)
for s1, s2 in zip(out, sfhs2):
s1['sfr'] += s2['sfr']
return out
if __name__ == '__main__':
filters = ['galex_NUV', 'spitzer_irac_ch2',
'spitzer_irac_ch4', 'spitzer_mips_24']
#filters = None
ldir, cdir = 'lf_data/', 'composite_lfs/'
outst = '{0}_n2teffcut.p'
# total_cloud_data will loop over the appropriate (for the
# isochrone) metallicities for a given lfst filename template
lfst = '{0}z{{0:02.0f}}_tau{1:2.1f}_vega_irac{2}_n2_teffcut_lf.txt'
basti = False
agb_dust=1.0
agebins = np.arange(9)*0.3 + 7.4
#loop over clouds (and bands and agb_dust) to produce clfs
for cloud in ['smc']:
rdir = '{0}cclf_{1}_'.format(cdir, cloud)
for band in ['2','4']:
lfstring = lfst.format(ldir, agb_dust, band)
dat, sfhs = total_cloud_data(cloud, filternames=filters, agb_dust=agb_dust,
lfstring=lfstring, basti=basti)
agebins = sfhs[0]['t1'][3:-1]
outfile = lfstring.replace(ldir, rdir).replace('z{0:02.0f}_','').replace('.txt','.dat')
write_clf_many([dat['clf_mags'], dat['agb_clf']], outfile, lfstring)
#fig, ax = plot_weighted_lfs(dat, agebins = agebins, dm=dmod[cloud])
#fig.suptitle('{0} @ IRAC{1}'.format(cloud.upper(), band))
#fig.savefig('byage_clfs/{0}_clfs_by_age_and_Z_irac{1}'.format(cloud, band))
#pl.close(fig)
colheads = (len(agebins)-1) * ' N<m(t={})'
colheads = colheads.format(*(agebins[:-1]+agebins[1:])/2.)
tbin_lfs = np.array([rebin_lfs(lf, ages, agebins) for lf, ages
in zip(dat['agb_clfs_zt'], dat['logages'])])
write_clf_many([dat['clf_mags'], tbin_lfs.sum(axis=0)],
outfile.replace(cdir,'byage_clfs/'), lfstring,
colheads=colheads)
pl.figure()
for s, z in zip(sfhs, dat['zlist']):
pl.step(s['t1'], s['sfr'], where='post', label='zind={0}'.format(z), linewidth=3)
pl.legend(loc=0)
pl.title(cloud.upper())
print(cloud, dat['mstar'])
| bd-j/magellanic | magellanic/sfhs/prediction_scripts/predicted_total.py | Python | gpl-2.0 | 5,894 | 0.009841 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2020 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""HOLE Analysis --- :mod:`MDAnalysis.analysis.hole2.hole`
=====================================================================================
:Author: Lily Wang
:Year: 2020
:Copyright: GNU Public License v3
.. versionadded:: 1.0.0
This module contains the tools to interface with HOLE_ [Smart1993]_
[Smart1996]_ to analyse an ion channel pore or transporter pathway [Stelzl2014]_ .
Using HOLE on a PDB file
------------------------
Use the :func:``hole`` function to run `HOLE`_ on a single PDB file. For example,
the code below runs the `HOLE`_ program installed at '~/hole2/exe/hole' ::
from MDAnalysis.tests.datafiles import PDB_HOLE
from MDAnalysis.analysis import hole2
profiles = hole2.hole(PDB_HOLE, executable='~/hole2/exe/hole')
# to create a VMD surface of the pore
hole2.create_vmd_surface(filename='hole.vmd')
``profiles`` is a dictionary of HOLE profiles, indexed by the frame number. If only
a PDB file is passed to the function, there will only be one profile at frame 0.
You can visualise the pore by loading your PDB file into VMD, and in
Extensions > Tk Console, type::
source hole.vmd
You can also pass a DCD trajectory with the same atoms in the same order as
your PDB file with the ``dcd`` keyword argument. In that case, ``profiles`` will
contain multiple HOLE profiles, indexed by frame.
The HOLE program will create some output files:
* an output file (default name: hole.out)
* an sphpdb file (default name: hole.sph)
* a file of van der Waals' radii
(if not specified with ``vdwradii_file``. Default name: simple2.rad)
* a symlink of your PDB or DCD files (if the original name is too long)
* the input text (if you specify ``infile``)
By default (`keep_files=True`), these files are kept. If you would like to
delete the files after the function is wrong, set `keep_files=False`. Keep in
mind that if you delete the sphpdb file, you cannot then create a VMD surface.
Using HOLE on a trajectory
--------------------------
You can also run HOLE on a trajectory through the :class:`HoleAnalysis` class.
This behaves similarly to the ``hole`` function, although arguments such as ``cpoint``
and ``cvect`` become runtime arguments for the :meth:`~HoleAnalysis.run` function.
The class can be set-up and run like a normal MDAnalysis analysis class::
import MDAnalysis as mda
from MDAnalysis.tests.datafiles import MULTIPDB_HOLE
from MDAnalysis.analysis import hole2
u = mda.Universe(MULTIPDB_HOLE)
ha = hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
ha.run()
ha.create_vmd_surface(filename='hole.vmd')
The VMD surface created by the class updates the pore for each frame of the trajectory.
Use it as normal by loading your trajectory in VMD and sourcing the file in the Tk Console.
You can access the actual profiles generated in the ``results`` attribute::
print(ha.results.profiles)
Again, HOLE writes out files for each frame. If you would like to delete these files
after the analysis, you can call :meth:`~HoleAnalysis.delete_temporary_files`::
ha.delete_temporary_files()
Alternatively, you can use HoleAnalysis as a context manager that deletes temporary
files when you are finished with the context manager::
with hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
h2.run()
h2.create_vmd_surface()
Using HOLE with VMD
-------------------
The :program:`sos_triangle` program that is part of HOLE_ can write an input
file for VMD_ to display a triangulated surface of the pore found by
:program:`hole`. This functionality is available with the
:meth:`HoleAnalysis.create_vmd_surface` method
[#create_vmd_surface_function]_. For an input trajectory MDAnalysis writes a
*trajectory* of pore surfaces that can be animated in VMD together with the
frames from the trajectory.
Analyzing a full trajectory
~~~~~~~~~~~~~~~~~~~~~~~~~~~
To analyze a full trajectory and write pore surfaces for all frames to file
:file:`hole_surface.vmd`, use ::
import MDAnalysis as mda
from MDAnalysis.analysis import hole2
# load example trajectory MULTIPDB_HOLE
from MDAnalysis.tests.datafiles import MULTIPDB_HOLE
u = mda.Universe(MULTIPDB_HOLE)
with hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
h2.run()
h2.create_vmd_surface(filename="hole_surface.vmd")
In VMD, load your trajectory and then in the tcl console
(e.g.. :menuselection:`Extensions --> Tk Console`) load the surface
trajectory:
.. code-block:: tcl
source hole_surface.vmd
If you only want to *subsample the trajectory* and only show the surface at
specific frames then you can either load the trajectory with the same
subsampling into VMD or create a subsampled trajectory.
Creating subsampled HOLE surface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For example, if we want to start displaying at frame 1 (i.e., skip frame 0), stop at frame 7, and
only show every other frame (step 2) then the HOLE analysis will be ::
with hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
h2.run(start=1, stop=9, step=2)
h2.create_vmd_surface(filename="hole_surface_subsampled.vmd")
The commands produce the file ``hole_surface_subsampled.vmd`` that can be loaded into VMD.
.. Note::
Python (and MDAnalysis) stop indices are *exclusive* so the parameters
``start=1``, ``stop=9``, and ``step=2`` will analyze frames 1, 3, 5, 7.
.. _Loading-a-trajectory-into-VMD-with-subsampling:
Loading a trajectory into VMD with subsampling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Load your system into VMD. This can mean to load the topology file with
:menuselection:`File --> New Molecule` and adding the trajectory with
:menuselection:`File --> Load Data into Molecule` or just :menuselection:`File
--> New Molecule`.
When loading the trajectory, subsample the frames by setting parametes in in
the :guilabel:`Frames` section. Select *First: 1*, *Last: 7*, *Stride: 2*. Then
:guilabel:`Load` everything.
.. Note::
VMD considers the stop/last frame to be *inclusive* so you need to typically
choose one less than the ``stop`` value that you selected in MDAnalysis.
Then load the surface trajectory:
.. code-block:: tcl
source hole_surface_subsampled.vmd
You should see a different surface for each frame in the trajectory. [#vmd_extra_frame]_
Creating a subsampled trajectory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Instead of having VMD subsample the trajectory as described in
:ref:`Loading-a-trajectory-into-VMD-with-subsampling` we can write a subsampled
trajectory to a file. Although it requires more disk space, it can be
convenient if we want to visualize the system repeatedly.
The example trajectory comes as a multi-PDB file so we need a suitable topology
file. If you already have a topology file such as a PSF, TPR, or PRMTOP file
then skip this step. We write frame 0 as a PDB :file:`frame0.pdb` (which we
will use as the topology in VMD)::
u.atoms.write("frame0.pdb")
Then write the actual trajectory in a convenient format such as TRR (or
DCD). Note that we apply the same slicing (``start=1``, ``stop=9``, ``step=2``)
to the trajectory itself and then use it as the value for the ``frames``
parameter of :meth:`AtomGroup.write<MDAnalysis.core.groups.AtomGroup.write>`
method::
u.atoms.write("subsampled.trr", frames=u.trajectory[1:9:2])
This command creates the subsampled trajectory file :file:`subsampled.trr` in
TRR format.
In VMD we load the topology and the trajectory and then load the surface. In
our example we have a PDB file (:file:`frame0.pdb`) as topology so we need to
remove the first frame [#vmd_extra_frame]_ (skip the "trim" step below if you
are using a true topology file such as PSF, TPR, or PRMTOP). To keep this
example compact, we are using the tcl command line interface in VMD_
(:menuselection:`Extensions --> Tk Console`) for loading and trimming the
trajectory; you can use the menu commands if you prefer.
.. code-block:: tcl
# load topology and subsampled trajectory
mol load pdb frame0.pdb trr subsampled.trr
# trim first frame (frame0) -- SKIP if using PSF, TPR, PRMTOP
animate delete beg 0 end 0
# load the HOLE surface trajectory
source hole_surface_subsampled.vmd
You can now animate your molecule together with the surface and render it.
.. _HOLE: http://www.holeprogram.org
.. _VMD: https://www.ks.uiuc.edu/Research/vmd/
Functions and classes
---------------------
.. autofunction:: hole
.. autoclass:: HoleAnalysis
:members:
References
----------
.. [Smart1993] O.S. Smart, J.M. Goodfellow and B.A. Wallace.
The Pore Dimensions of Gramicidin A. Biophysical Journal 65:2455-2460, 1993.
DOI: 10.1016/S0006-3495(93)81293-1
.. [Smart1996] O.S. Smart, J.G. Neduvelil, X. Wang, B.A. Wallace, and M.S.P. Sansom.
HOLE: A program for the analysis of the pore dimensions of ion channel
structural models. J.Mol.Graph., 14:354–360, 1996.
DOI: 10.1016/S0263-7855(97)00009-X
URL http://www.holeprogram.org/
.. [Stelzl2014] L. S. Stelzl, P. W. Fowler, M. S. P. Sansom, and O. Beckstein.
Flexible gates generate occluded intermediates in the transport cycle
of LacY. J Mol Biol, 426:735–751, 2014.
DOI: 10.1016/j.jmb.2013.10.024
.. Footnotes
.. [#create_vmd_surface_function] If you use the :class:`hole` class to run
:program:`hole` on a single PDB file then you can use
:func:`MDAnalysis.analysis.hole2.utils.create_vmd_surface`
function to manually run :program:`sph_process` and
:program:`sos_triangle` on the output files andcr eate a surface
file.
.. [#vmd_extra_frame] If you loaded your system in VMD_ from separate topology
and trajectory files and the topology file contained coordinates
(such as a PDB or GRO) file then your trajectory will have an
extra initial frame containing the coordinates from your topology
file. Delete the initial frame with :menuselection:`Molecule -->
Delete Frames` by setting *First* to 0 and *Last* to 0 and
selecting :guilabel:`Delete`.
.. [#HOLEDCD] PDB files are not the only files that :program:`hole` can
read. In principle, it is also able to read CHARMM DCD
trajectories and generate a hole profile for each frame. However,
native support for DCD in :program:`hole` is patchy and not every
DCD is recognized. In particular, At the moment, DCDs generated
with MDAnalysis are not accepted by HOLE. To overcome this
PDB / DCD limitation, use :class:`HoleAnalysis` which creates
temporary PDB files for each frame of a
:class:`~MDAnalysis.core.universe.Universe` or
:class:`~MDAnalysis.core.universe.AtomGroup` and runs
:func:``hole`` on each of them.
"""
import os
import errno
import tempfile
import textwrap
import logging
import itertools
import warnings
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import OrderedDict
from ...exceptions import ApplicationError
from ..base import AnalysisBase
from ...lib import util
from .utils import (check_and_fix_long_filename, write_simplerad2,
set_up_hole_input, run_hole, collect_hole,
create_vmd_surface)
from .templates import (hole_input, hole_lines, vmd_script_array,
vmd_script_function, exe_err,
IGNORE_RESIDUES)
logger = logging.getLogger(__name__)
def hole(pdbfile,
infile_text=None,
infile=None,
outfile='hole.out',
sphpdb_file='hole.sph',
vdwradii_file=None,
executable='hole',
tmpdir=os.path.curdir,
sample=0.2,
end_radius=22.0,
cpoint=None,
cvect=None,
random_seed=None,
ignore_residues=IGNORE_RESIDUES,
output_level=0,
dcd=None,
dcd_iniskip=0,
dcd_step=1,
keep_files=True):
r"""Run :program:`hole` on a single frame or a DCD trajectory.
:program:`hole` is part of the HOLE_ suite of programs. It is used to
analyze channels and cavities in proteins, especially ion channels.
Only a subset of all `HOLE control parameters <http://www.holeprogram.org/doc/old/hole_d03.html>`_
is supported and can be set with keyword arguments.
Parameters
----------
pdbfile : str
The `filename` is used as input for HOLE in the "COORD" card of the
input file. It specifies the name of a PDB coordinate file to be
used. This must be in Brookhaven protein databank format or
something closely approximating this. Both ATOM and HETATM records
are read.
infile_text: str, optional
HOLE input text or template. If set to ``None``, the function will
create the input text from the other parameters.
infile: str, optional
File to write the HOLE input text for later inspection. If set to
``None``, the input text is not written out.
outfile : str, optional
file name of the file collecting HOLE's output (which can be
parsed using :meth:`collect_hole(outfile)`.
sphpdb_file : str, optional
path to the HOLE sph file, a PDB-like file containing the
coordinates of the pore centers.
The coordinates are set to the sphere centres and the occupancies
are the sphere radii. All centres are assigned the atom name QSS and
residue name SPH and the residue number is set to the storage
number of the centre. In VMD, sph
objects are best displayed as "Points". Displaying .sph objects
rather than rendered or dot surfaces can be useful to analyze the
distance of particular atoms from the sphere-centre line.
.sph files can be used to produce molecular graphical
output from a hole run, by using the
:program:`sph_process` program to read the .sph file.
vdwradii_file: str, optional
path to the file specifying van der Waals radii for each atom. If
set to ``None``, then a set of default radii,
:data:`SIMPLE2_RAD`, is used (an extension of ``simple.rad`` from
the HOLE distribution).
executable: str, optional
Path to the :program:`hole` executable.
(e.g. ``~/hole2/exe/hole``). If
:program:`hole` is found on the :envvar:`PATH`, then the bare
executable name is sufficient.
tmpdir: str, optional
The temporary directory that files can be symlinked to, to shorten
the path name. HOLE can only read filenames up to a certain length.
sample : float, optional
distance of sample points in Å.
Specifies the distance between the planes used in the HOLE
procedure. The default value should be reasonable for most
purposes. However, if you wish to visualize a very tight
constriction then specify a smaller value.
This value determines how many points in the pore profile are
calculated.
end_radius : float, optional
Radius in Å, which is considered to be the end of the pore. This
keyword can be used to specify the radius above which the
program regards a result as indicating that the end of the pore
has been reached. This may need to be increased for large channels,
or reduced for small channels.
cpoint : array_like, 'center_of_geometry' or None, optional
coordinates of a point inside the pore, e.g. ``[12.3, 0.7,
18.55]``. If set to ``None`` (the default) then HOLE's own search
algorithm is used.
``cpoint`` specifies a point which lies within the channel. For
simple channels (e.g. gramicidin), results do not show great
sensitivity to the exact point taken. An easy way to produce an
initial point is to use molecular graphics to find two atoms which
lie either side of the pore and to average their coordinates. Or
if the channel structure contains water molecules or counter ions
then take the coordinates of one of these (and use the
``ignore_residues`` keyword to ignore them in the pore radius
calculation).
If this card is not specified, then HOLE (from version 2.2)
attempts to guess where the channel will be. The procedure
assumes the channel is reasonably symmetric. The initial guess on
cpoint will be the centroid of all alpha carbon atoms (name 'CA'
in pdb file). This is then refined by a crude grid search up to 5
Å from the original position. This procedure works most of the
time but is far from infallible — results should be
carefully checked (with molecular graphics) if it is used.
cvect : array_like, optional
Search direction, should be parallel to the pore axis,
e.g. ``[0,0,1]`` for the z-axis.
If this keyword is ``None`` (the default), then HOLE attempts to guess
where the channel will be. The procedure assumes that the channel is
reasonably symmetric. The guess will be either along the X axis
(1,0,0), Y axis (0,1,0) or Z axis (0,0,1). If the structure is not
aligned on one of these axis the results will clearly be
approximate. If a guess is used then results should be carefully
checked.
random_seed : int, optional
integer number to start the random number generator.
By default,
:program:`hole` will use the time of the day.
For reproducible runs (e.g., for testing) set ``random_seed``
to an integer.
ignore_residues : array_like, optional
sequence of three-letter residues that are not taken into
account during the calculation; wildcards are *not*
supported. Note that all residues must have 3 letters. Pad
with space on the right-hand side if necessary.
output_level : int, optional
Determines the output of output in the ``outfile``.
For automated processing, this must be < 3.
0: Full text output,
1: All text output given except "run in progress" (i.e.,
detailed contemporary description of what HOLE is doing).
2: Ditto plus no graph type output - only leaving minimum
radius and conductance calculations.
3: All text output other than input card mirroring and error messages
turned off.
dcd : str, optional
File name of CHARMM-style DCD trajectory (must be supplied together with a
matching PDB file `filename`) and then HOLE runs its analysis on
each frame. HOLE can *not* read DCD trajectories written by MDAnalysis,
which are NAMD-style (see Notes). Note that structural parameters
determined for each individual structure are written in a tagged
format so that it is possible to extract the information from the text
output file using a :program:`grep` command. The reading of the file
can be controlled by the ``dcd_step`` keyword and/or setting
``dcd_iniskip`` to the number of frames to be skipped
initially.
dcd_step : int, optional
step size for going through the trajectory (skips ``dcd_step-1``
frames).
keep_files : bool, optional
Whether to keep the HOLE output files and possible temporary
symlinks after running the function.
Returns
-------
dict
A dictionary of :class:`numpy.recarray`\ s, indexed by frame.
Notes
-----
- HOLE is very picky and does not read all DCD-like formats [#HOLEDCD]_.
If in doubt, look into the `outfile` for error diagnostics.
.. versionadded:: 1.0
"""
if output_level > 3:
msg = 'output_level ({}) needs to be < 3 in order to extract a HOLE profile!'
warnings.warn(msg.format(output_level))
# get executable
exe = util.which(executable)
if exe is None:
raise OSError(errno.ENOENT, exe_err.format(name=executable,
kw='executable'))
# get temp files
tmp_files = [outfile, sphpdb_file]
short_filename = check_and_fix_long_filename(pdbfile, tmpdir=tmpdir)
if os.path.islink(short_filename):
tmp_files.append(short_filename)
if dcd is not None:
dcd = check_and_fix_long_filename(dcd, tmpdir=tmpdir)
if os.path.islink(dcd):
tmp_files.append(dcd)
if vdwradii_file is not None:
vdwradii_file = check_and_fix_long_filename(vdwradii_file,
tmpdir=tmpdir)
else:
vdwradii_file = write_simplerad2()
tmp_files.append(vdwradii_file)
infile_text = set_up_hole_input(short_filename,
infile_text=infile_text,
infile=infile,
sphpdb_file=sphpdb_file,
vdwradii_file=vdwradii_file,
tmpdir=tmpdir, sample=sample,
end_radius=end_radius,
cpoint=cpoint, cvect=cvect,
random_seed=random_seed,
ignore_residues=ignore_residues,
output_level=output_level,
dcd=dcd,
dcd_iniskip=dcd_iniskip,
dcd_step=dcd_step-1)
run_hole(outfile=outfile, infile_text=infile_text, executable=exe)
recarrays = collect_hole(outfile=outfile)
if not keep_files:
for file in tmp_files:
try:
os.unlink(file)
except OSError:
pass
return recarrays
class HoleAnalysis(AnalysisBase):
r"""
Run :program:`hole` on a trajectory.
:program:`hole` is part of the HOLE_ suite of programs. It is used to
analyze channels and cavities in proteins, especially ion channels.
Only a subset of all `HOLE control parameters <http://www.holeprogram.org/doc/old/hole_d03.html>`_
is supported and can be set with keyword arguments.
This class creates temporary PDB files for each frame and runs HOLE on
the frame. It can be used normally, or as a context manager. If used as a
context manager, the class will try to delete any temporary files created
by HOLE, e.g. sphpdb files and logfiles. ::
with hole2.HoleAnalysis(u, executable='~/hole2/exe/hole') as h2:
h2.run()
h2.create_vmd_surface()
Parameters
----------
universe : Universe or AtomGroup
The Universe or AtomGroup to apply the analysis to.
select : string, optional
The selection string to create an atom selection that the HOLE
analysis is applied to.
vdwradii_file : str, optional
path to the file specifying van der Waals radii for each atom. If
set to ``None``, then a set of default radii,
:data:`SIMPLE2_RAD`, is used (an extension of ``simple.rad`` from
the HOLE distribution).
executable : str, optional
Path to the :program:`hole` executable.
(e.g. ``~/hole2/exe/hole``). If
:program:`hole` is found on the :envvar:`PATH`, then the bare
executable name is sufficient.
tmpdir : str, optional
The temporary directory that files can be symlinked to, to shorten
the path name. HOLE can only read filenames up to a certain length.
cpoint : array_like, 'center_of_geometry' or None, optional
coordinates of a point inside the pore, e.g. ``[12.3, 0.7,
18.55]``. If set to ``None`` (the default) then HOLE's own search
algorithm is used.
``cpoint`` specifies a point which lies within the channel. For
simple channels (e.g. gramicidin), results do not show great
sensitivity to the exact point taken. An easy way to produce an
initial point is to use molecular graphics to find two atoms which
lie either side of the pore and to average their coordinates. Or
if the channel structure contains water molecules or counter ions
then take the coordinates of one of these (and use the
``ignore_residues`` keyword to ignore them in the pore radius
calculation).
If this card is not specified, then HOLE (from version 2.2)
attempts to guess where the channel will be. The procedure
assumes the channel is reasonably symmetric. The initial guess on
cpoint will be the centroid of all alpha carbon atoms (name 'CA'
in pdb file). This is then refined by a crude grid search up to 5
Å from the original position. This procedure works most of the
time but is far from infallible — results should be
carefully checked (with molecular graphics) if it is used.
cvect : array_like, optional
Search direction, should be parallel to the pore axis,
e.g. ``[0,0,1]`` for the z-axis.
If this keyword is ``None`` (the default), then HOLE attempts to guess
where the channel will be. The procedure assumes that the channel is
reasonably symmetric. The guess will be either along the X axis
(1,0,0), Y axis (0,1,0) or Z axis (0,0,1). If the structure is not
aligned on one of these axis the results will clearly be
approximate. If a guess is used then results should be carefully
checked.
sample : float, optional
distance of sample points in Å.
Specifies the distance between the planes used in the HOLE
procedure. The default value should be reasonable for most
purposes. However, if you wish to visualize a very tight
constriction then specify a smaller value.
This value determines how many points in the pore profile are
calculated.
end_radius : float, optional
Radius in Å, which is considered to be the end of the pore. This
keyword can be used to specify the radius above which the
program regards a result as indicating that the end of the pore
has been reached. This may need to be increased for large channels,
or reduced for small channels.
output_level : int, optional
Determines the output of output in the ``outfile``.
For automated processing, this must be < 3.
0: Full text output,
1: All text output given except "run in progress" (i.e.,
detailed contemporary description of what HOLE is doing).
2: Ditto plus no graph type output - only leaving minimum
radius and conductance calculations.
3: All text output other than input card mirroring and error messages
turned off.
ignore_residues : array_like, optional
sequence of three-letter residues that are not taken into
account during the calculation; wildcards are *not*
supported. Note that all residues must have 3 letters. Pad
with space on the right-hand side if necessary.
prefix : str, optional
Prefix for HOLE output files.
write_input_files : bool, optional
Whether to write out the input HOLE text as files.
Files are called `hole.inp`.
Attributes
----------
results.sphpdbs: numpy.ndarray
Array of sphpdb filenames
.. versionadded:: 2.0.0
results.outfiles: numpy.ndarray
Arrau of output filenames
.. versionadded:: 2.0.0
results.profiles: dict
Profiles generated by HOLE2.
A dictionary of :class:`numpy.recarray`\ s, indexed by frame.
.. versionadded:: 2.0.0
sphpdbs: numpy.ndarray
Alias of :attr:`results.sphpdbs`
.. deprecated:: 2.0.0
This will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.sphpdbs` instead.
outfiles: numpy.ndarray
Alias of :attr:`results.outfiles`
.. deprecated:: 2.0.0
This will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.outfiles` instead.
profiles: dict
Alias of :attr:`results.profiles`
.. deprecated:: 2.0.0
This will be removed in MDAnalysis 3.0.0. Please use
:attr:`results.profiles` instead.
.. versionadded:: 1.0
.. versionchanged:: 2.0.0
:attr:`sphpdbs`, :attr:`outfiles` and :attr:`profiles `
are now stored in a :class:`MDAnalysis.analysis.base.Results`
instance.
"""
input_file = '{prefix}hole{i:03d}.inp'
output_file = '{prefix}hole{i:03d}.out'
sphpdb_file = '{prefix}hole{i:03d}.sph'
input_file = '{prefix}hole{i:03d}.inp'
output_file = '{prefix}hole{i:03d}.out'
sphpdb_file = '{prefix}hole{i:03d}.sph'
hole_header = textwrap.dedent("""
! Input file for Oliver Smart's HOLE program
! written by MDAnalysis.analysis.hole2.HoleAnalysis
! for a Universe
! u = mda.Universe({}
! )
! Frame {{i}}
""")
hole_body = textwrap.dedent("""
COORD {{coordinates}}
RADIUS {radius}
SPHPDB {{sphpdb}}
SAMPLE {sample:f}
ENDRAD {end_radius:f}
IGNORE {ignore}
SHORTO {output_level:d}
""")
_guess_cpoint = False
def __init__(self, universe,
select='protein',
verbose=False,
ignore_residues=IGNORE_RESIDUES,
vdwradii_file=None,
executable='hole',
sos_triangle='sos_triangle',
sph_process='sph_process',
tmpdir=os.path.curdir,
cpoint=None,
cvect=None,
sample=0.2,
end_radius=22,
output_level=0,
prefix=None,
write_input_files=False):
super(HoleAnalysis, self).__init__(universe.universe.trajectory,
verbose=verbose)
if output_level > 3:
msg = 'output_level ({}) needs to be < 3 in order to extract a HOLE profile!'
warnings.warn(msg.format(output_level))
if prefix is None:
prefix = ''
if isinstance(cpoint, str):
if 'geometry' in cpoint.lower():
self._guess_cpoint = True
self.cpoint = '{cpoint[0]:.10f} {cpoint[1]:.10f} {cpoint[2]:.10f}'
else:
self._guess_cpoint = False
self.cpoint = cpoint
self.prefix = prefix
self.cvect = cvect
self.sample = sample
self.end_radius = end_radius
self.output_level = output_level
self.write_input_files = write_input_files
self.select = select
self.ag = universe.select_atoms(select, updating=True)
self.universe = universe
self.tmpdir = tmpdir
self.ignore_residues = ignore_residues
# --- finding executables ----
hole = util.which(executable)
if hole is None:
raise OSError(errno.ENOENT, exe_err.format(name=executable,
kw='executable'))
self.base_path = os.path.dirname(hole)
sos_triangle_path = util.which(sos_triangle)
if sos_triangle_path is None:
path = os.path.join(self.base_path, sos_triangle)
sos_triangle_path = util.which(path)
if sos_triangle_path is None:
raise OSError(errno.ENOENT, exe_err.format(name=sos_triangle,
kw='sos_triangle'))
sph_process_path = util.which(sph_process)
if sph_process_path is None:
path = os.path.join(self.base_path, sph_process)
sph_process_path = util.which(path)
if sph_process_path is None:
raise OSError(errno.ENOENT, exe_err.format(name=sph_process,
kw='sph_process'))
self.exe = {
'hole': hole,
'sos_triangle': sos_triangle_path,
'sph_process': sph_process_path
}
# --- setting up temp files ----
self.tmp_files = []
if vdwradii_file is not None:
self.vdwradii_file = check_and_fix_long_filename(vdwradii_file,
tmpdir=self.tmpdir)
if os.path.islink(self.vdwradii_file):
self.tmp_files.append(self.vdwradii_file)
else:
self.vdwradii_file = write_simplerad2()
self.tmp_files.append(self.vdwradii_file)
# --- setting up input header ----
filenames = [universe.filename]
try:
filenames.extend(universe.trajectory.filenames)
except AttributeError:
filenames.append(universe.trajectory.filename)
hole_filenames = '\n! '.join(filenames)
self._input_header = self.hole_header.format(hole_filenames)
def run(self, start=None, stop=None, step=None, verbose=None,
random_seed=None):
"""
Perform the calculation
Parameters
----------
start : int, optional
start frame of analysis
stop : int, optional
stop frame of analysis
step : int, optional
number of frames to skip between each analysed frame
verbose : bool, optional
Turn on verbosity
random_seed : int, optional
integer number to start the random number generator.
By default,
:program:`hole` will use the time of the day.
For reproducible runs (e.g., for testing) set ``random_seed``
to an integer.
"""
self.random_seed = random_seed
return super(HoleAnalysis, self).run(start=start, stop=stop,
step=step, verbose=verbose)
@property
def sphpdbs(self):
wmsg = ("The `sphpdbs` attribute was deprecated in "
"MDAnalysis 2.0.0 and will be removed in MDAnalysis 3.0.0. "
"Please use `results.sphpdbs` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.sphpdbs
@property
def outfiles(self):
wmsg = ("The `outfiles` attribute was deprecated in "
"MDAnalysis 2.0.0 and will be removed in MDAnalysis 3.0.0. "
"Please use `results.outfiles` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.outfiles
@property
def profiles(self):
wmsg = ("The `profiles` attribute was deprecated in "
"MDAnalysis 2.0.0 and will be removed in MDAnalysis 3.0.0. "
"Please use `results.profiles` instead.")
warnings.warn(wmsg, DeprecationWarning)
return self.results.profiles
def _prepare(self):
"""Set up containers and generate input file text"""
# set up containers
self.results.sphpdbs = np.zeros(self.n_frames, dtype=object)
self.results.outfiles = np.zeros(self.n_frames, dtype=object)
self.results.profiles = {}
# generate input file
body = set_up_hole_input('',
infile_text=self.hole_body,
infile=None,
vdwradii_file=self.vdwradii_file,
tmpdir=self.tmpdir,
sample=self.sample,
end_radius=self.end_radius,
cpoint=self.cpoint,
cvect=self.cvect,
random_seed=self.random_seed,
ignore_residues=self.ignore_residues,
output_level=self.output_level,
dcd=None)
self.infile_text = self._input_header + body
def guess_cpoint(self):
"""Guess a point inside the pore.
This method simply uses the center of geometry of the selection as a
guess.
Returns
-------
float:
center of geometry of selected AtomGroup
"""
return self.ag.center_of_geometry()
def _single_frame(self):
"""Run HOLE analysis and collect profiles"""
# set up files
frame = self._ts.frame
i = self._frame_index
outfile = self.output_file.format(prefix=self.prefix, i=frame)
sphpdb = self.sphpdb_file.format(prefix=self.prefix, i=frame)
self.results.sphpdbs[i] = sphpdb
self.results.outfiles[i] = outfile
if outfile not in self.tmp_files:
self.tmp_files.append(outfile)
if sphpdb not in self.tmp_files:
self.tmp_files.append(sphpdb)
else:
self.tmp_files.append(sphpdb + '.old')
# temp pdb
logger.info('HOLE analysis frame {}'.format(frame))
fd, pdbfile = tempfile.mkstemp(suffix='.pdb')
os.close(fd) # close immediately (Issue 129)
# get infile text
fmt_kwargs = {'i': frame, 'coordinates': pdbfile, 'sphpdb': sphpdb}
if self._guess_cpoint:
fmt_kwargs['cpoint'] = self.guess_cpoint()
infile_text = self.infile_text.format(**fmt_kwargs)
if self.write_input_files:
infile = self.input_file.format(prefix=self.prefix, i=frame)
with open(infile, 'w') as f:
f.write(infile_text)
try:
self.ag.write(pdbfile)
run_hole(outfile=outfile, infile_text=infile_text,
executable=self.exe['hole'])
finally:
try:
os.unlink(pdbfile)
except OSError:
pass
recarrays = collect_hole(outfile=outfile)
try:
self.results.profiles[frame] = recarrays[0]
except KeyError:
msg = 'No profile found in HOLE output. Output level: {}'
logger.info(msg.format(self.output_level))
def create_vmd_surface(self, filename='hole.vmd', dot_density=15,
no_water_color='red', one_water_color='green',
double_water_color='blue'):
"""Process HOLE output to create a smooth pore surface suitable for VMD.
Takes the ``sphpdb`` file for each frame and feeds it to `sph_process
<http://www.holeprogram.org/doc/old/hole_d04.html#sph_process>`_ and
`sos_triangle
<http://www.holeprogram.org/doc/old/hole_d04.html#sos_triangle>`_ as
described under `Visualization of HOLE results
<http://www.holeprogram.org/doc/index.html>`_.
Load the output file *filename* into VMD in :menuselection:`Extensions
--> Tk Console` ::
source hole.vmd
The level of detail is determined by ``dot_density``.
The surface will be colored by ``no_water_color``, ``one_water_color``, and
``double_water_color``. You can change these in the
Tk Console::
set no_water_color blue
Parameters
----------
filename: str, optional
file to write the pore surfaces to.
dot_density: int, optional
density of facets for generating a 3D pore representation.
The number controls the density of dots that will be used.
A sphere of dots is placed on each centre determined in the
Monte Carlo procedure. The actual number of dots written is
controlled by ``dot_density`` and the ``sample`` level of the
original analysis. ``dot_density`` should be set between 5
(few dots per sphere) and 35 (many dots per sphere).
no_water_color: str, optional
Color of the surface where the pore radius is too tight for a
water molecule.
one_water_color: str, optional
Color of the surface where the pore can fit one water molecule.
double_water_color: str, optional
Color of the surface where the radius is at least double the
minimum radius for one water molecule.
Returns
-------
str
``filename`` with the pore surfaces.
"""
if not np.any(self.results.get("sphpdbs", [])):
raise ValueError('No sphpdb files to read. Try calling run()')
frames = []
for i, frame in enumerate(self.frames):
sphpdb = self.results.sphpdbs[i]
tmp_tri = create_vmd_surface(sphpdb=sphpdb,
sph_process=self.exe['sph_process'],
sos_triangle=self.exe['sos_triangle'],
dot_density=dot_density)
shapes = [[], [], []]
with open(tmp_tri) as f:
for line in f:
if line.startswith('draw color'):
color = line.split()[-1].lower()
if color == 'red':
dest = shapes[0]
elif color == 'green':
dest = shapes[1]
elif color == 'blue':
dest = shapes[2]
else:
msg = 'Encountered unknown color {}'
raise ValueError(msg.format(color))
if line.startswith('draw trinorm'):
line = line.strip('draw trinorm').strip()
dest.append('{{ {} }}'.format(line))
try:
os.unlink(tmp_tri)
except OSError:
pass
tri = '{ { ' + ' } { '.join(list(map(' '.join, shapes))) + ' } }'
frames.append(f'set triangles({i}) ' + tri)
trinorms = '\n'.join(frames)
vmd_1 = vmd_script_array.format(no_water_color=no_water_color,
one_water_color=one_water_color,
double_water_color=double_water_color)
vmd_text = vmd_1 + trinorms + vmd_script_function
with open(filename, 'w') as f:
f.write(vmd_text)
return filename
def min_radius(self):
"""Return the minimum radius over all profiles as a function of q"""
profiles = self.results.get("profiles")
if not profiles:
raise ValueError('No profiles available. Try calling run()')
return np.array([[q, p.radius.min()] for q, p in profiles.items()])
def delete_temporary_files(self):
"""Delete temporary files"""
for f in self.tmp_files:
try:
os.unlink(f)
except OSError:
pass
self.tmp_files = []
self.results.outfiles = []
self.results.sphpdbs = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Delete temporary files on exit"""
self.delete_temporary_files()
def _process_plot_kwargs(self, frames=None,
color=None, cmap='viridis',
linestyle='-'):
"""Process the colors and linestyles for plotting
Parameters
----------
frames : array-like, optional
Frames to plot. If ``None``, plots all of them.
color : str or array-like, optional
Color or colors for the plot. If ``None``, colors are
drawn from ``cmap``.
cmap : str, optional
color map to make colors for the plot if ``color`` is
not given. Names should be from the ``matplotlib.pyplot.cm``
module.
linestyle : str or array-like, optional
Line style for the plot.
Returns
-------
(array-like, array-like, array-like)
frames, colors, linestyles
"""
if frames is None:
frames = self.frames
else:
frames = util.asiterable(frames)
if color is None:
colormap = plt.cm.get_cmap(cmap)
norm = matplotlib.colors.Normalize(vmin=min(frames),
vmax=max(frames))
colors = colormap(norm(frames))
else:
colors = itertools.cycle(util.asiterable(color))
linestyles = itertools.cycle(util.asiterable(linestyle))
return frames, colors, linestyles
def plot(self, frames=None,
color=None, cmap='viridis',
linestyle='-', y_shift=0.0,
label=True, ax=None,
legend_loc='best', **kwargs):
r"""Plot HOLE profiles :math:`R(\zeta)` in a 1D graph.
Lines are colored according to the specified ``color`` or
drawn from the color map ``cmap``. One line is
plotted for each trajectory frame.
Parameters
----------
frames: array-like, optional
Frames to plot. If ``None``, plots all of them.
color: str or array-like, optional
Color or colors for the plot. If ``None``, colors are
drawn from ``cmap``.
cmap: str, optional
color map to make colors for the plot if ``color`` is
not given. Names should be from the ``matplotlib.pyplot.cm``
module.
linestyle: str or array-like, optional
Line style for the plot.
y_shift : float, optional
displace each :math:`R(\zeta)` profile by ``y_shift`` in the
:math:`y`-direction for clearer visualization.
label : bool or string, optional
If ``False`` then no legend is
displayed.
ax : :class:`matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes.
legend_loc : str, optional
Location of the legend.
kwargs : `**kwargs`
All other `kwargs` are passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
ax : :class:`~matplotlib.axes.Axes`
Axes with the plot, either `ax` or the current axes.
"""
if not self.results.get("profiles"):
raise ValueError('No profiles available. Try calling run()')
if ax is None:
fig, ax = plt.subplots()
fcl = self._process_plot_kwargs(frames=frames, color=color,
cmap=cmap, linestyle=linestyle)
for i, (frame, c, ls) in enumerate(zip(*fcl)):
profile = self.results.profiles[frame]
dy = i*y_shift
ax.plot(profile.rxn_coord, profile.radius+dy, color=c,
linestyle=ls, zorder=-frame, label=str(frame),
**kwargs)
ax.set_xlabel(r"Pore coordinate $\zeta$ ($\AA$)")
ax.set_ylabel(r"HOLE radius $R$ ($\AA$)")
if label == True:
ax.legend(loc=legend_loc)
return ax
def plot3D(self, frames=None,
color=None, cmap='viridis',
linestyle='-', ax=None, r_max=None,
ylabel='Frames', **kwargs):
r"""Stacked 3D graph of profiles :math:`R(\zeta)`.
Lines are colored according to the specified ``color`` or
drawn from the color map ``cmap``. One line is
plotted for each trajectory frame.
Parameters
----------
frames : array-like, optional
Frames to plot. If ``None``, plots all of them.
color : str or array-like, optional
Color or colors for the plot. If ``None``, colors are
drawn from ``cmap``.
cmap : str, optional
color map to make colors for the plot if ``color`` is
not given. Names should be from the ``matplotlib.pyplot.cm``
module.
linestyle : str or array-like, optional
Line style for the plot.
r_max : float, optional
only display radii up to ``r_max``. If ``None``, all radii are
plotted.
ax : :class:`matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes.
ylabel : str, optional
Y-axis label.
**kwargs :
All other `kwargs` are passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
ax : :class:`~mpl_toolkits.mplot3d.Axes3D`
Axes with the plot, either `ax` or the current axes.
"""
if not self.results.get("profiles"):
raise ValueError('No profiles available. Try calling run()')
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fcl = self._process_plot_kwargs(frames=frames,
color=color, cmap=cmap,
linestyle=linestyle)
for frame, c, ls in zip(*fcl):
profile = self.results.profiles[frame]
if r_max is None:
radius = profile.radius
rxn_coord = profile.rxn_coord
else:
# does not seem to work with masked arrays but with nan hack!
# http://stackoverflow.com/questions/4913306/python-matplotlib-mplot3d-how-do-i-set-a-maximum-value-for-the-z-axis
rxn_coord = profile.rxn_coord
radius = profile.radius.copy()
radius[radius > r_max] = np.nan
ax.plot(rxn_coord, frame*np.ones_like(rxn_coord), radius,
color=c, linestyle=ls, zorder=-frame, label=str(frame),
**kwargs)
ax.set_xlabel(r"Pore coordinate $\zeta$ ($\AA$)")
ax.set_ylabel(ylabel)
ax.set_zlabel(r"HOLE radius $R$ ($\AA$)")
plt.tight_layout()
return ax
def over_order_parameters(self, order_parameters, frames=None):
"""Get HOLE profiles sorted over order parameters ``order_parameters``.
Parameters
----------
order_parameters : array-like or string
Sequence or text file containing order parameters (float
numbers) corresponding to the frames in the trajectory. Must
be same length as trajectory.
frames : array-like, optional
Selected frames to return. If ``None``, returns all of them.
Returns
-------
collections.OrderedDict
sorted dictionary of {order_parameter:profile}
"""
if not self.results.get("profiles"):
raise ValueError('No profiles available. Try calling run()')
if isinstance(order_parameters, str):
try:
order_parameters = np.loadtxt(order_parameters)
except IOError:
raise ValueError('Data file not found: {}'.format(order_parameters))
except (ValueError, TypeError):
msg = ('Could not parse given file: {}. '
'`order_parameters` must be array-like '
'or a filename with array data '
'that can be read by np.loadtxt')
raise ValueError(msg.format(order_parameters))
order_parameters = np.asarray(order_parameters)
if len(order_parameters) != len(self._trajectory):
msg = ('The number of order parameters ({}) must match the '
'length of the trajectory ({} frames)')
raise ValueError(msg.format(len(order_parameters),
len(self._trajectory)))
if frames is None:
frames = self.frames
else:
frames = np.asarray(util.asiterable(frames))
idx = np.argsort(order_parameters[frames])
sorted_frames = frames[idx]
profiles = OrderedDict()
for frame in sorted_frames:
profiles[order_parameters[frame]] = self.results.profiles[frame]
return profiles
def plot_order_parameters(self, order_parameters,
aggregator=min,
frames=None,
color='blue',
linestyle='-', ax=None,
ylabel=r'Minimum HOLE pore radius $r$ ($\AA$)',
xlabel='Order parameter',
**kwargs):
r"""Plot HOLE radii over order parameters. This function needs
an ``aggregator`` function to reduce the ``radius`` array to a
single value, e.g. ``min``, ``max``, or ``np.mean``.
Parameters
----------
order_parameters : array-like or string
Sequence or text file containing order parameters (float
numbers) corresponding to the frames in the trajectory. Must
be same length as trajectory.
aggregator : callable, optional
Function applied to the radius array of each profile to
reduce it to one representative value.
frames : array-like, optional
Frames to plot. If ``None``, plots all of them.
color : str or array-like, optional
Color for the plot.
linestyle : str or array-like, optional
Line style for the plot.
ax : :class:`matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes.
xlabel : str, optional
X-axis label.
ylabel : str, optional
Y-axis label.
**kwargs :
All other `kwargs` are passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
ax : :class:`~matplotlib.axes.Axes`
Axes with the plot, either `ax` or the current axes.
"""
op_profiles = self.over_order_parameters(order_parameters,
frames=frames)
if ax is None:
fig, ax = plt.subplots()
data = [[x, aggregator(p.radius)] for x, p in op_profiles.items()]
arr = np.array(data)
ax.plot(arr[:, 0], arr[:, 1], color=color, linestyle=linestyle)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def gather(self, frames=None, flat=False):
"""Gather the fields of each profile recarray together.
Parameters
----------
frames : int or iterable of ints, optional
Profiles to include by frame. If ``None``, includes
all frames.
flat : bool, optional
Whether to flatten the list of field arrays into a
single array.
Returns
-------
dict
dictionary of fields
"""
if frames is None:
frames = self.frames
frames = util.asiterable(frames)
profiles = [self.results.profiles[k] for k in frames]
rxncoords = [p.rxn_coord for p in profiles]
radii = [p.radius for p in profiles]
cen_line_Ds = [p.cen_line_D for p in profiles]
if flat:
rxncoords = np.concatenate(rxncoords)
radii = np.concatenate(radii)
cen_line_Ds = np.concatenate(cen_line_Ds)
dct = {'rxn_coord': rxncoords,
'radius': radii,
'cen_line_D': cen_line_Ds}
return dct
def bin_radii(self, frames=None, bins=100, range=None):
"""Collects the pore radii into bins by reaction coordinate.
Parameters
----------
frames : int or iterable of ints, optional
Profiles to include by frame. If ``None``, includes
all frames.
bins : int or iterable of edges, optional
If bins is an int, it defines the number of equal-width bins in the given
range. If bins is a sequence, it defines a monotonically increasing array of
bin edges, including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins.
If not provided, ``range`` is simply ``(a.min(), a.max())``,
where ``a`` is the array of reaction coordinates.
Values outside the range are ignored. The first element of the range must be
less than or equal to the second.
Returns
-------
list of arrays of floats
List of radii present in each bin
array of (float, float)
Edges of each bin
"""
agg = self.gather(frames=frames, flat=True)
coords = agg['rxn_coord']
if not util.iterable(bins):
if range is None:
range = (coords.min(), coords.max())
xmin, xmax = range
if xmin == xmax:
xmin -= 0.5
xmax += 0.5
bins = np.linspace(xmin, xmax, bins+1, endpoint=True)
else:
bins = np.asarray(bins)
bins = bins[np.argsort(bins)]
idx = np.argsort(coords)
coords = coords[idx]
radii = agg['radius'][idx]
# left: inserts at i where coords[:i] < edge
# right: inserts at i where coords[:i] <= edge
# r_ concatenates
bin_idx = np.r_[coords.searchsorted(bins, side='right')]
binned = [radii[i:j] for i, j in zip(bin_idx[:-1], bin_idx[1:])]
return binned, bins
def histogram_radii(self, aggregator=np.mean, frames=None,
bins=100, range=None):
"""Histograms the pore radii into bins by reaction coordinate,
aggregate the radii with an `aggregator` function, and returns the
aggregated radii and bin edges.
Parameters
----------
aggregator : callable, optional
this function must take an iterable of floats and return a
single value.
frames : int or iterable of ints, optional
Profiles to include by frame. If ``None``, includes
all frames.
bins : int or iterable of edges, optional
If bins is an int, it defines the number of equal-width bins in the given
range. If bins is a sequence, it defines a monotonically increasing array of
bin edges, including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins.
If not provided, ``range`` is simply ``(a.min(), a.max())``,
where ``a`` is the array of reaction coordinates.
Values outside the range are ignored. The first element of the range must be
less than or equal to the second.
Returns
-------
array of floats
histogrammed, aggregate value of radii
array of (float, float)
Edges of each bin
"""
binned, bins = self.bin_radii(frames=frames, bins=bins, range=range)
return np.array(list(map(aggregator, binned))), bins
def plot_mean_profile(self, bins=100, range=None,
frames=None, color='blue',
linestyle='-', ax=None,
xlabel='Frame', fill_alpha=0.3,
n_std=1, legend=True,
legend_loc='best',
**kwargs):
"""Collects the pore radii into bins by reaction coordinate.
Parameters
----------
frames : int or iterable of ints, optional
Profiles to include by frame. If ``None``, includes
all frames.
bins : int or iterable of edges, optional
If bins is an int, it defines the number of equal-width bins in the given
range. If bins is a sequence, it defines a monotonically increasing array of
bin edges, including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins.
If not provided, ``range`` is simply ``(a.min(), a.max())``,
where ``a`` is the array of reaction coordinates.
Values outside the range are ignored. The first element of the range must be
less than or equal to the second.
color : str or array-like, optional
Color for the plot.
linestyle : str or array-like, optional
Line style for the plot.
ax : :class:`matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes.
xlabel : str, optional
X-axis label.
fill_alpha : float, optional
Opacity of filled standard deviation area
n_std : int, optional
Number of standard deviations from the mean to fill between.
legend : bool, optional
Whether to plot a legend.
legend_loc : str, optional
Location of legend.
**kwargs :
All other `kwargs` are passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
ax : :class:`~matplotlib.axes.Axes`
Axes with the plot, either `ax` or the current axes.
"""
binned, bins = self.bin_radii(frames=frames, bins=bins, range=range)
mean = np.array(list(map(np.mean, binned)))
midpoints = 0.5 * bins[1:] + bins[:-1]
fig, ax = plt.subplots()
if n_std:
std = np.array(list(map(np.std, binned)))
ax.fill_between(midpoints, mean-(n_std*std), mean+(n_std*std),
color=color, alpha=fill_alpha,
label='{} std'.format(n_std))
ax.plot(midpoints, mean, color=color,
linestyle=linestyle, label='mean', **kwargs)
ax.set_xlabel(r"Pore coordinate $\zeta$ ($\AA$)")
ax.set_ylabel(r"HOLE radius $R$ ($\AA$)")
if legend:
ax.legend(loc=legend_loc)
return ax
def plot3D_order_parameters(self, order_parameters,
frames=None,
color=None,
cmap='viridis',
linestyle='-', ax=None,
r_max=None,
ylabel=r'Order parameter',
**kwargs):
r"""Plot HOLE radii over order parameters as a 3D graph.
Lines are colored according to the specified ``color`` or
drawn from the color map ``cmap``. One line is
plotted for each trajectory frame.
Parameters
----------
order_parameters : array-like or string
Sequence or text file containing order parameters(float
numbers) corresponding to the frames in the trajectory. Must
be same length as trajectory.
frames : array-like, optional
Frames to plot. If ``None``, plots all of them.
color : str or array-like, optional
Color or colors for the plot. If ``None``, colors are
drawn from ``cmap``.
cmap : str, optional
color map to make colors for the plot if ``color`` is
not given. Names should be from the ``matplotlib.pyplot.cm``
module.
linestyle : str or array-like, optional
Line style for the plot.
ax : : class: `matplotlib.axes.Axes`
If no `ax` is supplied or set to ``None`` then the plot will
be added to the current active axes.
r_max : float, optional
only display radii up to ``r_max``. If ``None``, all radii are
plotted.
ylabel : str, optional
Y-axis label.
**kwargs :
All other `kwargs` are passed to: func: `matplotlib.pyplot.plot`.
Returns
-------
ax: : class: `~mpl_toolkits.mplot3d.Axes3D`
Axes with the plot, either `ax` or the current axes.
"""
op_profiles = self.over_order_parameters(order_parameters,
frames=frames)
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ocl = self._process_plot_kwargs(frames=list(op_profiles.keys()),
color=color, cmap=cmap,
linestyle=linestyle)
for op, c, ls in zip(*ocl):
profile = op_profiles[op]
if r_max is None:
radius = profile.radius
rxn_coord = profile.rxn_coord
else:
# does not seem to work with masked arrays but with nan hack!
# http://stackoverflow.com/questions/4913306/python-matplotlib-mplot3d-how-do-i-set-a-maximum-value-for-the-z-axis
rxn_coord = profile.rxn_coord
radius = profile.radius.copy()
radius[radius > r_max] = np.nan
ax.plot(rxn_coord, op*np.ones_like(rxn_coord), radius,
color=c, linestyle=ls, zorder=int(-op), label=str(op),
**kwargs)
ax.set_xlabel(r"Pore coordinate $\zeta$ ($\AA$)")
ax.set_ylabel(ylabel)
ax.set_zlabel(r"HOLE radius $R$ ($\AA$)")
plt.tight_layout()
return ax
| MDAnalysis/mdanalysis | package/MDAnalysis/analysis/hole2/hole.py | Python | gpl-2.0 | 67,157 | 0.000626 |
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness, PyAPITestHarness
import openmc
class FilterMaterialTestHarness(PyAPITestHarness):
def _build_inputs(self):
filt = openmc.Filter(type='material', bins=(1, 2, 3, 4))
tally = openmc.Tally(tally_id=1)
tally.add_filter(filt)
tally.add_score('total')
self._input_set.tallies = openmc.TalliesFile()
self._input_set.tallies.add_tally(tally)
super(FilterMaterialTestHarness, self)._build_inputs()
def _cleanup(self):
super(FilterMaterialTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'tallies.xml')
if os.path.exists(f): os.remove(f)
if __name__ == '__main__':
harness = FilterMaterialTestHarness('statepoint.10.*', True)
harness.main()
| mjlong/openmc | tests/test_filter_material/test_filter_material.py | Python | mit | 858 | 0.003497 |
from __future__ import unicode_literals
from django import forms
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class Accounts(User):
class Meta:
proxy = True
class LoginForm(forms.Form):
# This creates two variables called username and password that are assigned form character fields
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name',
'email', 'password1', 'password2')
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email']
if commit:
user.save()
return user
| unlessbamboo/django | accounts/models.py | Python | gpl-3.0 | 1,076 | 0.005576 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Include the encoders we use
'encoders': {
u'consumption': {
'fieldname': u'consumption',
'resolution': 0.88,
'seed': 1,
'name': u'consumption',
'type': 'RandomDistributedScalarEncoder',
},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.85,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.04,
'synPermInactiveDec': 0.005,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
'implementation': 'cpp',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
| david-ragazzi/nupic | examples/opf/simple_server/model_params.py | Python | gpl-3.0 | 9,288 | 0.001507 |
# python 3
# tensorflow 2.0
from __future__ import print_function, division, absolute_import
import os
import argparse
import random
import numpy as np
import datetime
# from numpy import linalg
import os.path as osp
import sys
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(1, osp.join(cur_dir, '.'))
from sklearn.datasets import load_svmlight_file
from scipy.sparse import csr_matrix
# from scipy.sparse import linalg
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import tensorflow as tf
from tf_utils import pinv_naive, pinv
path_train = osp.join(cur_dir, "../a9a/a9a")
path_test = osp.join(cur_dir, "../a9a/a9a.t")
MAX_ITER = 100
np_dtype = np.float32
tf_dtype = tf.float32
# manual seed
manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
np.random.seed(manualSeed)
# load all data
X_train, y_train = load_svmlight_file(path_train, n_features=123, dtype=np_dtype)
X_test, y_test = load_svmlight_file(path_test, n_features=123, dtype=np_dtype)
# X: scipy.sparse.csr.csr_matrix
# X_train: (32561, 123), y_train: (32561,)
# X_test: (16281, 123), y_test:(16281,)
# stack a dimension of ones to X to simplify computation
N_train = X_train.shape[0]
N_test = X_test.shape[0]
X_train = np.hstack((np.ones((N_train, 1)), X_train.toarray())).astype(np_dtype)
X_test = np.hstack((np.ones((N_test, 1)), X_test.toarray())).astype(np_dtype)
# print(X_train.shape, X_test.shape)
y_train = y_train.reshape((N_train, 1))
y_test = y_test.reshape((N_test, 1))
# label: -1, +1 ==> 0, 1
y_train = np.where(y_train == -1, 0, 1)
y_test = np.where(y_test == -1, 0, 1)
# NB: here X's shape is (N,d), which differs to the derivation
def neg_log_likelihood(w, X, y, L2_param=None):
"""
w: dx1
X: Nxd
y: Nx1
L2_param: \lambda>0, will introduce -\lambda/2 ||w||_2^2
"""
# print(type(X), X.dtype)
res = tf.matmul(tf.matmul(tf.transpose(w), tf.transpose(X)), y.astype(np_dtype)) - \
tf.reduce_sum(tf.math.log(1 + tf.exp(tf.matmul(X, w))))
if L2_param != None and L2_param > 0:
res += -0.5 * L2_param * tf.matmul(tf.transpose(w), w)
return -res[0][0]
def prob(X, w):
"""
X: Nxd
w: dx1
---
prob: N x num_classes(2)"""
y = tf.constant(np.array([0.0, 1.0]), dtype=tf.float32)
prob = tf.exp(tf.matmul(X, w) * y) / (1 + tf.exp(tf.matmul(X, w)))
return prob
def compute_acc(X, y, w):
p = prob(X, w)
y_pred = tf.cast(tf.argmax(p, axis=1), tf.float32)
y = tf.cast(tf.squeeze(y), tf.float32)
acc = tf.reduce_mean(tf.cast(tf.equal(y, y_pred), tf.float32))
return acc
def update(w_old, X, y, L2_param=0):
"""
w_new = w_old - w_update
w_update = (X'RX+lambda*I)^(-1) (X'(mu-y) + lambda*w_old)
lambda is L2_param
w_old: dx1
X: Nxd
y: Nx1
---
w_update: dx1
"""
d = X.shape[1]
mu = tf.sigmoid(tf.matmul(X, w_old)) # Nx1
R_flat = mu * (1 - mu) # element-wise, Nx1
L2_reg_term = L2_param * tf.eye(d)
XRX = tf.matmul(tf.transpose(X), R_flat * X) + L2_reg_term # dxd
# np.save('XRX_tf.npy', XRX.numpy())
# calculate pseudo inverse via SVD
# method 1
# slightly better than tfp.math.pinv when L2_param=0
XRX_pinv = pinv_naive(XRX)
# method 2
# XRX_pinv = pinv(XRX)
# w = w - (X^T R X)^(-1) X^T (mu-y)
# w_new = tf.assign(w_old, w_old - tf.matmul(tf.matmul(XRX_pinv, tf.transpose(X)), mu - y))
y = tf.cast(y, tf_dtype)
w_update = tf.matmul(XRX_pinv, tf.matmul(tf.transpose(X), mu - y) + L2_param * w_old)
return w_update
def optimize(w_old, w_update):
"""custom update op, instead of using SGD variants"""
return w_old.assign(w_old - w_update)
def train_IRLS(X_train, y_train, X_test=None, y_test=None, L2_param=0, max_iter=MAX_ITER):
"""train Logistic Regression via IRLS algorithm
X: Nxd
y: Nx1
---
"""
N, d = X_train.shape
w = tf.Variable(0.01 * tf.ones((d, 1), dtype=tf.float32), name="w")
current_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
summary_writer = tf.summary.create_file_writer(f"./logs/{current_time}")
print("start training...")
print("L2 param(lambda): {}".format(L2_param))
i = 0
# iteration
while i <= max_iter:
print("iter: {}".format(i))
# print('\t neg log likelihood: {}'.format(sess.run(neg_L, feed_dict=train_feed_dict)))
neg_L = neg_log_likelihood(w, X_train, y_train, L2_param)
print("\t neg log likelihood: {}".format(neg_L))
train_acc = compute_acc(X_train, y_train, w)
with summary_writer.as_default():
tf.summary.scalar("train_acc", train_acc, step=i)
tf.summary.scalar("train_neg_L", neg_L, step=i)
test_acc = compute_acc(X_test, y_test, w)
with summary_writer.as_default():
tf.summary.scalar("test_acc", test_acc, step=i)
print("\t train acc: {}, test acc: {}".format(train_acc, test_acc))
L2_norm_w = np.linalg.norm(w.numpy())
print("\t L2 norm of w: {}".format(L2_norm_w))
if i > 0:
diff_w = np.linalg.norm(w_update.numpy())
print("\t diff of w_old and w: {}".format(diff_w))
if diff_w < 1e-2:
break
w_update = update(w, X_train, y_train, L2_param)
w = optimize(w, w_update)
i += 1
print("training done.")
if __name__ == "__main__":
# test_acc should be about 0.85
lambda_ = 20 # 0
train_IRLS(X_train, y_train, X_test, y_test, L2_param=lambda_, max_iter=100)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train.reshape(N_train,))
y_pred_train = classifier.predict(X_train)
train_acc = np.sum(y_train.reshape(N_train,) == y_pred_train)/N_train
print('train_acc: {}'.format(train_acc))
y_pred_test = classifier.predict(X_test)
test_acc = np.sum(y_test.reshape(N_test,) == y_pred_test)/N_test
print('test acc: {}'.format(test_acc))
| wangg12/IRLS_tf_pytorch | src/IRLS_tf_v2.py | Python | apache-2.0 | 6,061 | 0.003135 |
# Copyright (c) 2011 - Rui Batista <ruiandrebatista@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import locale
import logging
from accessible_output import speech
logger = logging.getLogger(__name__)
_speaker = None
def init():
global _speaker
if _speaker:
return
_speaker = speech.Speaker()
def speak(message, cancel=True):
global _speaker
assert _speaker, "Speech module not initialized"
if cancel:
_speaker.silence()
_speaker.output(message)
def cancel():
assert _speaker, "Speech module not initialized"
_speaker.silence()
def quit():
pass
| ragb/sudoaudio | sudoaudio/speech/__init__.py | Python | gpl-3.0 | 1,242 | 0.003221 |
import unittest
import calc
class CalcTestCase(unittest.TestCase):
"""Test calc.py"""
def setUp(self):
self.num1 = 10
self.num2 = 5
def tearDown(self):
pass
def test_add(self):
self.assertTrue(calc.add(self.num1, self.num2), self.num1 + self.num2)
def test_subtract(self):
self.assertTrue(calc.subtract(self.num1, self.num2),
self.num1 - self.num2)
def test_multiply(self):
self.assertTrue(calc.multiply(self.num1, self.num2),
self.num1 * self.num2)
def test_divide(self):
self.assertTrue(calc.divide(self.num1, self.num2),
self.num1 / self.num2)
if __name__ == '__main__':
unittest.main()
| dariosena/LearningPython | general/dry/test_calc.py | Python | gpl-3.0 | 794 | 0 |
# -*- coding: utf-8 -*-
#
# API configuration
#####################
DEBUG = False
# Top-level URL for deployment. Numerous other URLs depend on this.
CYCLADES_BASE_URL = "https://compute.example.synnefo.org/compute/"
# The API will return HTTP Bad Request if the ?changes-since
# parameter refers to a point in time more than POLL_LIMIT seconds ago.
POLL_LIMIT = 3600
# Astakos groups that have access to '/admin' views.
ADMIN_STATS_PERMITTED_GROUPS = ["admin-stats"]
# Enable/Disable the snapshots feature altogether at the API level.
# If set to False, Cyclades will not expose the '/snapshots' API URL
# of the 'volume' app.
CYCLADES_SNAPSHOTS_ENABLED = True
# Enable/Disable the feature of a sharing a resource to the members of the
# project to which it belongs, at the API level.
CYCLADES_SHARED_RESOURCES_ENABLED = False
# Enable/Disable the of feature of rescuing a Virtual Machine at the API
# level
RESCUE_ENABLED = False
#
# Network Configuration
#
# CYCLADES_DEFAULT_SERVER_NETWORKS setting contains a list of networks to
# connect a newly created server to, *if the user has not* specified them
# explicitly in the POST /server API call.
# Each member of the list may be a network UUID, a tuple of network UUIDs,
# "SNF:ANY_PUBLIC_IPV4" [any public network with an IPv4 subnet defined],
# "SNF:ANY_PUBLIC_IPV6 [any public network with only an IPV6 subnet defined],
# or "SNF:ANY_PUBLIC" [any public network].
#
# Access control and quota policy are enforced, just as if the user had
# specified the value of CYCLADES_DEFAULT_SERVER_NETWORKS in the content
# of the POST /call, after processing of "SNF:*" directives."
CYCLADES_DEFAULT_SERVER_NETWORKS = []
# This setting contains a list of networks which every new server
# will be forced to connect to, regardless of the contents of the POST
# /servers call, or the value of CYCLADES_DEFAULT_SERVER_NETWORKS.
# Its format is identical to that of CYCLADES_DEFAULT_SERVER_NETWORKS.
# WARNING: No access control or quota policy are enforced.
# The server will get all IPv4/IPv6 addresses needed to connect to the
# networks specified in CYCLADES_FORCED_SERVER_NETWORKS, regardless
# of the state of the floating IP pool of the user, and without
# allocating any floating IPs."
CYCLADES_FORCED_SERVER_NETWORKS = []
# Maximum allowed network size for private networks.
MAX_CIDR_BLOCK = 22
# Default settings used by network flavors
DEFAULT_MAC_PREFIX = 'aa:00:0'
DEFAULT_BRIDGE = 'br0'
# Network flavors that users are allowed to create through API requests
# Available flavors are IP_LESS_ROUTED, MAC_FILTERED, PHYSICAL_VLAN
API_ENABLED_NETWORK_FLAVORS = ['MAC_FILTERED']
# Settings for MAC_FILTERED network:
# ------------------------------------------
# All networks of this type are bridged to the same bridge. Isolation between
# networks is achieved by assigning a unique MAC-prefix to each network and
# filtering packets via ebtables.
DEFAULT_MAC_FILTERED_BRIDGE = 'prv0'
# Firewalling. Firewall tags should contain '%d' to be filled with the NIC
# ID.
GANETI_FIREWALL_ENABLED_TAG = 'synnefo:network:%s:protected'
GANETI_FIREWALL_DISABLED_TAG = 'synnefo:network:%s:unprotected'
GANETI_FIREWALL_PROTECTED_TAG = 'synnefo:network:%s:limited'
# The default firewall profile that will be in effect if no tags are defined
DEFAULT_FIREWALL_PROFILE = 'DISABLED'
# Fixed mapping of user VMs to a specific backend.
# e.g. BACKEND_PER_USER = {'example@synnefo.org': 2}
BACKEND_PER_USER = {}
# Encryption key for the instance hostname in the stat graphs URLs. Set it to
# a random string and update the STATS_SECRET_KEY setting in the snf-stats-app
# host (20-snf-stats-app-settings.conf) accordingly.
CYCLADES_STATS_SECRET_KEY = "secret_key"
# URL templates for the stat graphs.
# The API implementation replaces '%s' with the encrypted backend id.
CPU_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/cpu-bar/%s'
CPU_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/cpu-ts/%s'
NET_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/net-bar/%s'
NET_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/net-ts/%s'
# Recommended refresh period for server stats
STATS_REFRESH_PERIOD = 60
# The maximum number of file path/content pairs that can be supplied on server
# build
MAX_PERSONALITY = 5
# The maximum size, in bytes, for each personality file
MAX_PERSONALITY_SIZE = 10240
# Authentication URL of the astakos instance to be used for user management
ASTAKOS_AUTH_URL = 'https://accounts.example.synnefo.org/identity/v2.0'
# Tune the size of the Astakos http client connection pool
# This limit the number of concurrent requests to Astakos.
CYCLADES_ASTAKOSCLIENT_POOLSIZE = 50
# Key for password encryption-decryption. After changing this setting, synnefo
# will be unable to decrypt all existing Backend passwords. You will need to
# store again the new password by using 'snf-manage backend-modify'.
# SECRET_ENCRYPTION_KEY may up to 32 bytes. Keys bigger than 32 bytes are not
# supported.
SECRET_ENCRYPTION_KEY = "Password Encryption Key"
# Astakos service token
# The token used for astakos service api calls (e.g. api to retrieve user email
# using a user uuid)
CYCLADES_SERVICE_TOKEN = ''
# Template to use to build the FQDN of VMs. The setting will be formated with
# the id of the VM.
CYCLADES_SERVERS_FQDN = 'snf-%(id)s.vm.example.synnefo.org'
# Description of applied port forwarding rules (DNAT) for Cyclades VMs. This
# setting contains a mapping from the port of each VM to a tuple contaning the
# destination IP/hostname and the new port: (host, port). Instead of a tuple a
# python callable object may be used which must return such a tuple. The caller
# will pass to the callable the following positional arguments, in the
# following order:
# * server_id: The ID of the VM in the DB
# * ip_address: The IPv4 address of the public VM NIC
# * fqdn: The FQDN of the VM
# * user: The UUID of the owner of the VM
#
# Here is an example describing the mapping of the SSH port of all VMs to
# the external address 'gate.example.synnefo.org' and port 60000+server_id.
# e.g. iptables -t nat -A prerouting -d gate.example.synnefo.org \
# --dport (61000 + $(VM_ID)) -j DNAT --to-destination $(VM_IP):22
#CYCLADES_PORT_FORWARDING = {
# 22: lambda ip_address, server_id, fqdn, user:
# ("gate.example.synnefo.org", 61000 + server_id),
#}
CYCLADES_PORT_FORWARDING = {}
# Extra configuration options required for snf-vncauthproxy (>=1.5). Each dict
# of the list, describes one vncauthproxy instance.
CYCLADES_VNCAUTHPROXY_OPTS = [
{
# These values are required for VNC console support. They should match
# a user / password configured in the snf-vncauthproxy authentication /
# users file (/var/lib/vncauthproxy/users).
'auth_user': 'synnefo',
'auth_password': 'secret_password',
# server_address and server_port should reflect the --listen-address and
# --listen-port options passed to the vncauthproxy daemon
'server_address': '127.0.0.1',
'server_port': 24999,
# Set to True to enable SSL support on the control socket.
'enable_ssl': False,
# If you enabled SSL support for snf-vncauthproxy you can optionally
# provide a path to a CA file and enable strict checkfing for the server
# certficiate.
'ca_cert': None,
'strict': False,
},
]
# The maximum allowed size(GB) for a Cyclades Volume
CYCLADES_VOLUME_MAX_SIZE = 200
# The maximum allowed metadata items for a Cyclades Volume
CYCLADES_VOLUME_MAX_METADATA = 10
# The volume types that Cyclades allow to be detached
CYCLADES_DETACHABLE_DISK_TEMPLATES = ("ext_archipelago", "ext_vlmc")
# The maximum number of tags allowed for a Cyclades Virtual Machine
CYCLADES_VM_MAX_TAGS = 50
# The maximmum allowed metadata items for a Cyclades Virtual Machine
CYCLADES_VM_MAX_METADATA = 10
# Define cache for public stats
PUBLIC_STATS_CACHE = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
"KEY_PREFIX": "publicstats",
"TIMEOUT": 300,
}
# Permit users of specific groups to override the flavor allow_create policy
CYCLADES_FLAVOR_OVERRIDE_ALLOW_CREATE = {}
# Define cache for VM password
VM_PASSWORD_CACHE = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
"KEY_PREFIX": "vmpassword",
"TIMEOUT": None,
}
| grnet/synnefo | snf-cyclades-app/synnefo/app_settings/default/api.py | Python | gpl-3.0 | 8,466 | 0.000472 |
#!/usr/bin/env python
from os import path
import sys
import sqlite3
import random
import argparse
import re
import gzip
import mvmv.mvmv as mvmv
import mvmv.mvmvd as mvmvd
import mvmv.parse as parse
class DownloadDB(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(DownloadDB, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
movie_list_name = "movies.list"
list_url = "ftp://ftp.fu-berlin.de/pub/misc/movies/database/movies.list.gz"
sys.stdout.write("Downloading ... ")
sys.stdout.flush()
if sys.version_info >= (3, 0):
import urllib.request
urllib.request.urlretrieve(list_url, movie_list_name + ".gz")
else:
import urllib
urllib.urlretrieve(list_url, movie_list_name + ".gz")
sys.stdout.write("Done\n")
sys.stdout.write("Adding to table ... ")
sys.stdout.flush()
with open(movie_list_name, 'wb') as movie_list:
with gzip.open(movie_list_name + ".gz", 'rb') as decompressed:
movie_list.write(decompressed.read())
parse.create_table(movie_list_name, "movie.db")
sys.stdout.write("Done.\n")
def get_parser():
usage_str = "%(prog)s [OPTIONS] [-r] [-w] [-s] DIRECTORY [DIRECTORY ...] -t DESTDIR"
parser = argparse.ArgumentParser(usage=usage_str)
parser.add_argument("-f", "--file", dest="files", metavar="FILE",
type=str, nargs='*', default=[],
help="Rename this FILE")
parser.add_argument("-s", "--srcdir", dest="srcdirs", metavar="SRCDIR",
type=str, nargs='*', default=[],
help="Rename all files in this DIRECTORY")
parser.add_argument("-t", "--destdir", dest="destdir", metavar="DESTDIR",
type=str, nargs=1, action='store', required=True,
help="Move all the files to this directory.")
parser.add_argument("-e", "--excludes", dest="excludes", metavar="REGEX",
type=str, nargs='*', default=[],
help="Rename all files in this DIRECTORY")
parser.add_argument("-r", "-R", "--recursive", action="store_true",
dest="recursive", default=False,
help="Recursively scan the directories for files." +
"(Unsupported)",)
parser.add_argument("-m", "--max-depth", dest="depth", metavar="DEPTH",
default=None, type=int, nargs='?',
help="Recursively scan the directories for files." +
"(Unsupported)",)
parser.add_argument("-g", "--gui", action="store_true", dest="start_gui",
default=False,
help="Start the program as a GUI." + "(Unsupported)")
parser.add_argument("-w", "--watch", action="store_true", dest="watch",
default=False,
help="Watch the given directories for new files")
parser.add_argument("--stop", action="store_true", dest="stop_daemon",
default=False,
help="Stop the daemon.")
parser.add_argument("--pidfile", dest="pidfile", nargs=1,
metavar="FILE", type=str, default="./mvmvd.pid",
help="The file where the pid is stored for the daemon")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
default=False,
help="Be more verbose." + "(Unsupported)")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true",
default=False,
help="Only output errors." + "(Unsupported)")
parser.add_argument("-y", "--always-yes", dest="always_yes",
action="store_true", default=False,
help="Assume yes for every prompt." + "(Unsupported)")
parser.add_argument("-u", "--updatedb", dest="remotedb", default=None,
metavar="PATH", type=str, nargs='?',
action=DownloadDB,
help="Update the movies list from the given DBPATH." +
"(Unsupported custom DBPATH)")
# TODO(pbhandari): default db path should be sane.
parser.add_argument("-p", "--dbpath", dest="dbpath", nargs='?',
metavar="PATH", type=str, default="movie.db",
help="Alternate path for the database of movies.")
parser.add_argument('args', nargs=argparse.REMAINDER)
return parser
def error(message, end='\n'):
sys.stderr.write(sys.argv[0] + ": error: " + message + end)
sys.stderr.flush()
def main():
args = get_parser().parse_args()
args.files = [path.abspath(fname) for fname in args.files
if mvmv.is_valid_file(fname, args.excludes)]
args.srcdirs = [path.abspath(sdir) for sdir in args.srcdirs
if path.isdir(sdir)]
args.destdir = path.abspath(args.destdir[0])
for arg in args.args:
if path.isdir(arg):
args.srcdirs.append(path.abspath(arg))
elif mvmv.is_valid_file(arg):
args.files.append(arg)
if not path.isdir(args.destdir[0]):
error("'%s' is not a directory." % args.destdir[0])
sys.exit(1)
if not args.srcdirs and not args.files:
error("You must specify a directory or filename in the commandline.")
sys.exit(1)
conn = sqlite3.connect(args.dbpath)
cursor = conn.cursor()
args.excludes = [re.compile(a) for a in args.excludes]
if args.stop_daemon:
mvmvd.mvmvd(args.pidfile).stop()
if args.watch:
mvmvd.mvmvd(args.pidfile,
dirs=args.srcdirs,
dest=args.destdir,
recursive=args.recursive).start()
for query in args.files:
mvmv.movemovie(path.split(path.abspath(query)), args.destdir, cursor)
for dirname in args.srcdirs:
mvmv.movemovies(dirname, args.destdir, cursor, args.excludes)
conn.close()
# TODO(pbhandari): Code is ugly and stupid.
if __name__ == '__main__':
main()
| wmak/mvmv | mvmv/cli.py | Python | mit | 6,353 | 0.001259 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging sinks list' command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class List(base.ListCommand):
"""Lists the defined sinks."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
parser.add_argument(
'--only-v2-sinks', required=False, action='store_true',
help='Display only v2 sinks.')
util.AddNonProjectArgs(parser, 'List sinks')
def Collection(self):
return 'logging.sinks'
def ListLogSinks(self, project, log_name):
"""List log sinks from the specified log."""
result = util.GetClientV1().projects_logs_sinks.List(
util.GetMessagesV1().LoggingProjectsLogsSinksListRequest(
projectsId=project, logsId=log_name))
for sink in result.sinks:
yield util.TypedLogSink(sink, log_name=log_name)
def ListLogServiceSinks(self, project, service_name):
"""List log service sinks from the specified service."""
result = util.GetClientV1().projects_logServices_sinks.List(
util.GetMessagesV1().LoggingProjectsLogServicesSinksListRequest(
projectsId=project, logServicesId=service_name))
for sink in result.sinks:
yield util.TypedLogSink(sink, service_name=service_name)
def ListSinks(self, parent):
"""List sinks."""
# Use V2 logging API.
result = util.GetClient().projects_sinks.List(
util.GetMessages().LoggingProjectsSinksListRequest(
parent=parent))
for sink in result.sinks:
yield util.TypedLogSink(sink)
def YieldAllSinks(self, project):
"""Yield all log and log service sinks from the specified project."""
client = util.GetClientV1()
messages = util.GetMessagesV1()
# First get all the log sinks.
response = list_pager.YieldFromList(
client.projects_logs,
messages.LoggingProjectsLogsListRequest(projectsId=project),
field='logs', batch_size=None, batch_size_attribute='pageSize')
for log in response:
# We need only the base log name, not the full resource uri.
log_id = util.ExtractLogId(log.name)
for typed_sink in self.ListLogSinks(project, log_id):
yield typed_sink
# Now get all the log service sinks.
response = list_pager.YieldFromList(
client.projects_logServices,
messages.LoggingProjectsLogServicesListRequest(projectsId=project),
field='logServices', batch_size=None, batch_size_attribute='pageSize')
for service in response:
# In contrast, service.name correctly contains only the name.
for typed_sink in self.ListLogServiceSinks(project, service.name):
yield typed_sink
# Lastly, get all v2 sinks.
for typed_sink in self.ListSinks(util.GetCurrentProjectParent()):
yield typed_sink
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The list of sinks.
"""
util.CheckLegacySinksCommandArguments(args)
project = properties.VALUES.core.project.Get(required=True)
if args.log:
return self.ListLogSinks(project, args.log)
elif args.service:
return self.ListLogServiceSinks(project, args.service)
elif (args.organization or args.folder or args.billing_account or
args.only_v2_sinks):
return self.ListSinks(util.GetParentFromArgs(args))
else:
return self.YieldAllSinks(project)
List.detailed_help = {
'DESCRIPTION': """\
{index}
If either the *--log* or *--log-service* flags are included, then
the only sinks listed are for that log or that service.
If *--only-v2-sinks* flag is included, then only v2 sinks
are listed.
If none of the flags are included, then all sinks in use are listed.
""",
}
| Sorsly/subtle | google-cloud-sdk/lib/surface/logging/sinks/list.py | Python | mit | 4,667 | 0.004928 |
"""Configuration for imitation.scripts.train_adversarial."""
import sacred
from imitation.rewards import reward_nets
from imitation.scripts.common import common, demonstrations, reward, rl, train
train_adversarial_ex = sacred.Experiment(
"train_adversarial",
ingredients=[
common.common_ingredient,
demonstrations.demonstrations_ingredient,
reward.reward_ingredient,
rl.rl_ingredient,
train.train_ingredient,
],
)
@train_adversarial_ex.config
def defaults():
show_config = False
total_timesteps = int(1e6) # Num of environment transitions to sample
algorithm_kwargs = dict(
demo_batch_size=1024, # Number of expert samples per discriminator update
n_disc_updates_per_round=4, # Num discriminator updates per generator round
)
algorithm_specific = {} # algorithm_specific[algorithm] is merged with config
checkpoint_interval = 0 # Num epochs between checkpoints (<0 disables)
@train_adversarial_ex.config
def aliases_default_gen_batch_size(algorithm_kwargs, rl):
# Setting generator buffer capacity and discriminator batch size to
# the same number is equivalent to not using a replay buffer at all.
# "Disabling" the replay buffer seems to improve convergence speed, but may
# come at a cost of stability.
algorithm_kwargs["gen_replay_buffer_capacity"] = rl["batch_size"]
# Shared settings
MUJOCO_SHARED_LOCALS = dict(rl=dict(rl_kwargs=dict(ent_coef=0.1)))
ANT_SHARED_LOCALS = dict(
total_timesteps=int(3e7),
algorithm_kwargs=dict(shared=dict(demo_batch_size=8192)),
rl=dict(batch_size=16384),
)
# Classic RL Gym environment named configs
@train_adversarial_ex.named_config
def acrobot():
env_name = "Acrobot-v1"
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def cartpole():
common = dict(env_name="CartPole-v1")
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def seals_cartpole():
common = dict(env_name="seals/CartPole-v0")
total_timesteps = int(1.4e6)
@train_adversarial_ex.named_config
def mountain_car():
common = dict(env_name="MountainCar-v0")
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def seals_mountain_car():
common = dict(env_name="seals/MountainCar-v0")
@train_adversarial_ex.named_config
def pendulum():
common = dict(env_name="Pendulum-v1")
# Standard MuJoCo Gym environment named configs
@train_adversarial_ex.named_config
def seals_ant():
locals().update(**MUJOCO_SHARED_LOCALS)
locals().update(**ANT_SHARED_LOCALS)
common = dict(env_name="seals/Ant-v0")
@train_adversarial_ex.named_config
def half_cheetah():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="HalfCheetah-v2")
rl = dict(batch_size=16384, rl_kwargs=dict(batch_size=1024))
algorithm_specific = dict(
airl=dict(total_timesteps=int(5e6)),
gail=dict(total_timesteps=int(8e6)),
)
reward = dict(
algorithm_specific=dict(
airl=dict(
net_cls=reward_nets.BasicShapedRewardNet,
net_kwargs=dict(
reward_hid_sizes=(32,),
potential_hid_sizes=(32,),
),
),
),
)
algorithm_kwargs = dict(
# Number of discriminator updates after each round of generator updates
n_disc_updates_per_round=16,
# Equivalent to no replay buffer if batch size is the same
gen_replay_buffer_capacity=16384,
demo_batch_size=8192,
)
@train_adversarial_ex.named_config
def seals_hopper():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Hopper-v0")
@train_adversarial_ex.named_config
def seals_humanoid():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Humanoid-v0")
total_timesteps = int(4e6)
@train_adversarial_ex.named_config
def reacher():
common = dict(env_name="Reacher-v2")
algorithm_kwargs = {"allow_variable_horizon": True}
@train_adversarial_ex.named_config
def seals_swimmer():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Swimmer-v0")
total_timesteps = int(2e6)
@train_adversarial_ex.named_config
def seals_walker():
locals().update(**MUJOCO_SHARED_LOCALS)
common = dict(env_name="seals/Walker2d-v0")
# Debug configs
@train_adversarial_ex.named_config
def fast():
# Minimize the amount of computation. Useful for test cases.
# Need a minimum of 10 total_timesteps for adversarial training code to pass
# "any update happened" assertion inside training loop.
total_timesteps = 10
algorithm_kwargs = dict(
demo_batch_size=1,
n_disc_updates_per_round=4,
)
| HumanCompatibleAI/imitation | src/imitation/scripts/config/train_adversarial.py | Python | mit | 4,850 | 0.000825 |
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
import mock
import requests
from neutron.plugins.common import constants
from neutron.plugins.ml2 import config as config
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mechanism_odl
from neutron.plugins.ml2 import plugin
from neutron.tests import base
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import testlib_api
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class OpenDaylightTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'],
'ml2')
# Set URL/user/pass so init doesn't throw a cfg required error.
# They are not used in these tests since sendjson is overwritten.
config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
super(OpenDaylightTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
self.segment = {'api.NETWORK_TYPE': ""}
self.mech = mechanism_odl.OpenDaylightMechanismDriver()
mechanism_odl.OpenDaylightMechanismDriver.sendjson = (
self.check_sendjson)
def check_sendjson(self, method, urlpath, obj, ignorecodes=[]):
self.assertFalse(urlpath.startswith("http://"))
def test_check_segment(self):
"""Validate the check_segment call."""
self.segment[api.NETWORK_TYPE] = constants.TYPE_LOCAL
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_FLAT
self.assertFalse(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_VLAN
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_GRE
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_VXLAN
self.assertTrue(self.mech.check_segment(self.segment))
# Validate a network type not currently supported
self.segment[api.NETWORK_TYPE] = 'mpls'
self.assertFalse(self.mech.check_segment(self.segment))
class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
password='somepass'):
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'],
'ml2')
config.cfg.CONF.set_override('url', url, 'ml2_odl')
config.cfg.CONF.set_override('username', username, 'ml2_odl')
config.cfg.CONF.set_override('password', password, 'ml2_odl')
def _test_missing_config(self, **kwargs):
self._set_config(**kwargs)
self.assertRaises(config.cfg.RequiredOptError,
plugin.Ml2Plugin)
def test_valid_config(self):
self._set_config()
plugin.Ml2Plugin()
def test_missing_url_raises_exception(self):
self._test_missing_config(url=None)
def test_missing_username_raises_exception(self):
self._test_missing_config(username=None)
def test_missing_password_raises_exception(self):
self._test_missing_config(password=None)
class OpenDaylightMechanismTestBasicGet(test_plugin.TestBasicGet,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestNetworksV2(test_plugin.TestNetworksV2,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestSubnetsV2,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestPortsV2(test_plugin.TestPortsV2,
OpenDaylightTestCase):
pass
class AuthMatcher(object):
def __eq__(self, obj):
return (obj.username == config.cfg.CONF.ml2_odl.username and
obj.password == config.cfg.CONF.ml2_odl.password)
class OpenDaylightMechanismDriverTestCase(base.BaseTestCase):
def setUp(self):
super(OpenDaylightMechanismDriverTestCase, self).setUp()
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'], 'ml2')
config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
self.mech = mechanism_odl.OpenDaylightMechanismDriver()
self.mech.initialize()
@staticmethod
def _get_mock_delete_resource_context():
current = {'id': '00000000-1111-2222-3333-444444444444'}
context = mock.Mock(current=current)
return context
_status_code_msgs = {
204: '',
401: '401 Client Error: Unauthorized',
403: '403 Client Error: Forbidden',
404: '404 Client Error: Not Found',
409: '409 Client Error: Conflict',
501: '501 Server Error: Not Implemented'
}
@classmethod
def _get_mock_request_response(cls, status_code):
response = mock.Mock(status_code=status_code)
response.raise_for_status = mock.Mock() if status_code < 400 else (
mock.Mock(side_effect=requests.exceptions.HTTPError(
cls._status_code_msgs[status_code])))
return response
def _test_delete_resource_postcommit(self, object_type, status_code,
exc_class=None):
self.mech.out_of_sync = False
method = getattr(self.mech, 'delete_%s_postcommit' % object_type)
context = self._get_mock_delete_resource_context()
request_response = self._get_mock_request_response(status_code)
with mock.patch('requests.request',
return_value=request_response) as mock_method:
if exc_class is not None:
self.assertRaises(exc_class, method, context)
else:
method(context)
url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type,
context.current['id'])
mock_method.assert_called_once_with(
'delete', url=url, headers={'Content-Type': 'application/json'},
data=None, auth=AuthMatcher(),
timeout=config.cfg.CONF.ml2_odl.timeout)
def test_delete_network_postcommit(self):
self._test_delete_resource_postcommit('network',
requests.codes.no_content)
for status_code in (requests.codes.unauthorized,
requests.codes.not_found,
requests.codes.conflict):
self._test_delete_resource_postcommit(
'network', status_code, requests.exceptions.HTTPError)
def test_delete_subnet_postcommit(self):
self._test_delete_resource_postcommit('subnet',
requests.codes.no_content)
for status_code in (requests.codes.unauthorized,
requests.codes.not_found,
requests.codes.conflict,
requests.codes.not_implemented):
self._test_delete_resource_postcommit(
'subnet', status_code, requests.exceptions.HTTPError)
def test_delete_port_postcommit(self):
self._test_delete_resource_postcommit('port',
requests.codes.no_content)
for status_code in (requests.codes.unauthorized,
requests.codes.forbidden,
requests.codes.not_found,
requests.codes.not_implemented):
self._test_delete_resource_postcommit(
'port', status_code, requests.exceptions.HTTPError)
| shakamunyi/neutron-vrrp | neutron/tests/unit/ml2/test_mechanism_odl.py | Python | apache-2.0 | 9,006 | 0 |
"""
Optional integration with django-any-Imagefield
"""
from __future__ import absolute_import
from django.db import models
from fluent_utils.django_compat import is_installed
if is_installed('any_imagefield'):
from any_imagefield.models import AnyFileField as BaseFileField, AnyImageField as BaseImageField
else:
BaseFileField = models.FileField
BaseImageField = models.ImageField
# subclassing here so South or Django migrations detect a single class.
class AnyFileField(BaseFileField):
"""
A FileField that can refer to an uploaded file.
If *django-any-imagefield* is not installed, the filebrowser link will not be displayed.
"""
def deconstruct(self):
# For Django migrations, masquerade as normal FileField too
name, path, args, kwargs = super(AnyFileField, self).deconstruct()
# FileField behavior
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = getattr(self, 'upload_to', None) or getattr(self, 'directory', None) or ''
return name, "django.db.models.FileField", args, kwargs
# subclassing here so South or Django migrations detect a single class.
class AnyImageField(BaseImageField):
"""
An ImageField that can refer to an uploaded image file.
If *django-any-imagefield* is not installed, the filebrowser link will not be displayed.
"""
def deconstruct(self):
# For Django migrations, masquerade as normal ImageField too
name, path, args, kwargs = super(AnyImageField, self).deconstruct()
# FileField behavior
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = getattr(self, 'upload_to', None) or getattr(self, 'directory', None) or ''
return name, "django.db.models.ImageField", args, kwargs
| edoburu/django-fluent-utils | fluent_utils/softdeps/any_imagefield.py | Python | apache-2.0 | 1,856 | 0.002694 |
from django.core.management import call_command
from django.test import TestCase
from mock import call
from mock import patch
from kolibri.core.content import models as content
class DeleteChannelTestCase(TestCase):
"""
Testcase for delete channel management command
"""
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def delete_channel(self):
call_command("deletechannel", self.the_channel_id)
def test_channelmetadata_delete_remove_metadata_object(self):
self.delete_channel()
self.assertEquals(0, content.ChannelMetadata.objects.count())
def test_channelmetadata_delete_remove_contentnodes(self):
self.delete_channel()
self.assertEquals(0, content.ContentNode.objects.count())
def test_channelmetadata_delete_leave_unrelated_contentnodes(self):
c2c1 = content.ContentNode.objects.get(title="c2c1")
new_id = c2c1.id[:-1] + "1"
content.ContentNode.objects.create(
id=new_id,
content_id=c2c1.content_id,
kind=c2c1.kind,
channel_id=c2c1.channel_id,
available=True,
title=c2c1.title,
)
self.delete_channel()
self.assertEquals(1, content.ContentNode.objects.count())
def test_channelmetadata_delete_remove_file_objects(self):
self.delete_channel()
self.assertEquals(0, content.File.objects.count())
@patch("kolibri.core.content.models.paths.get_content_storage_file_path")
@patch("kolibri.core.content.models.os.remove")
def test_channelmetadata_delete_files(self, os_remove_mock, content_file_path):
path = "testing"
content_file_path.return_value = path
num_files = content.LocalFile.objects.filter(available=True).count()
self.delete_channel()
os_remove_mock.assert_has_calls([call(path)] * num_files)
| indirectlylit/kolibri | kolibri/core/content/test/test_deletechannel.py | Python | mit | 1,918 | 0.000521 |
import visual as vpy
import numpy as np
import anatomical_constants
from math import sin, cos, acos, atan, radians, sqrt
from conic_section import Ellipse
cam_mat_n7 = np.array([[1062.348, 0.0 , 344.629],
[0.0 , 1065.308, 626.738],
[0.0 , 0.0 , 1.0]])
# [Looking at a camera facing the user]
# z points towards user's face
# x points to the left (same as px coord direction)
# y points downwards (same as px coord direction)
class Limbus:
def __init__(self, centre_mm_param, normal_param, ransac_ellipse_param):
self.center_mm = centre_mm_param
self.normal = normal_param
self.ransac_ellipse = ransac_ellipse_param
def ellipse_to_limbuses_persp_geom(ellipse, device):
limbus_r_mm = anatomical_constants.limbus_r_mm
focal_len_x_px, focal_len_y_px, prin_point_x, prin_point_y = device.get_intrisic_cam_params()
focal_len_z_px = (focal_len_x_px + focal_len_y_px) / 2
(x0_px, y0_px), (_, maj_axis_px), _ = ellipse.rotated_rect
# Using iris_r_px / focal_len_px = iris_r_mm / distance_to_iris_mm
iris_z_mm = (limbus_r_mm * 2 * focal_len_z_px) / maj_axis_px
# Using (x_screen_px - prin_point) / focal_len_px = x_world / z_world
iris_x_mm = -iris_z_mm * (x0_px - prin_point_x) / focal_len_x_px
iris_y_mm = iris_z_mm * (y0_px - prin_point_y) / focal_len_y_px
limbus_center = (iris_x_mm, iris_y_mm, iris_z_mm)
(ell_x0, ell_y0), (ell_w, ell_h), angle = ellipse.rotated_rect
new_rotated_rect = (ell_x0 - prin_point_x, ell_y0 - prin_point_y), (ell_w, ell_h), angle
ell = Ellipse(new_rotated_rect)
f = focal_len_z_px;
Z = np.array([[ell.A, ell.B / 2.0, ell.D / (2.0 * f)],
[ell.B / 2.0, ell.C, ell.E / (2.0 * f)],
[ell.D / (2.0 * f), ell.E / (2.0 * f), ell.F / (f * f)]])
eig_vals, eig_vecs = np.linalg.eig(Z)
idx = eig_vals.argsort()
eig_vals = eig_vals[idx]
eig_vecs = eig_vecs[:, idx]
L1, L2, L3 = eig_vals[2], eig_vals[1], eig_vals[0]
R = np.vstack([eig_vecs[:, 2], eig_vecs[:, 1], eig_vecs[:, 0]])
g = sqrt((L2 - L3) / (L1 - L3))
h = sqrt((L1 - L2) / (L1 - L3))
poss_normals = [R.dot([h, 0, -g]), R.dot([h, 0, g]), R.dot([-h, 0, -g]), R.dot([-h, 0, g])]
# Constraints
nx, ny, nz = poss_normals[0 if iris_x_mm > 0 else 1]
if nz > 0:
nx, ny, nz = -nx, -ny, -nz
if ny * nz < 0:
ny *= -1
if iris_x_mm > 0:
if nx > 0: nx *= -1
elif nx < 0: nx *= -1
return Limbus(limbus_center, [nx, ny, nz], ellipse)
def ellipse_to_limbuses_approx(ellipse, device):
""" Returns 2 ambiguous limbuses
"""
limbus_r_mm = anatomical_constants.limbus_r_mm
focal_len_x_px, focal_len_y_px, prin_point_x, prin_point_y = device.get_intrisic_cam_params()
focal_len_z_px = (focal_len_x_px + focal_len_y_px) / 2
(x0_px, y0_px), (min_axis_px, maj_axis_px), angle = ellipse.rotated_rect
# Using iris_r_px / focal_len_px = iris_r_mm / distance_to_iris_mm
iris_z_mm = (limbus_r_mm * 2 * focal_len_z_px) / maj_axis_px
# Using (x_screen_px - prin_point) / focal_len_px = x_world / z_world
iris_x_mm = -iris_z_mm * (x0_px - prin_point_x) / focal_len_x_px
iris_y_mm = iris_z_mm * (y0_px - prin_point_y) / focal_len_y_px
limbus_center = (iris_x_mm, iris_y_mm, iris_z_mm)
psi = radians(angle) # z-axis rotation (radians)
tht_1 = acos(min_axis_px / maj_axis_px) # y-axis rotation (radians)
tht_2 = -tht_1 # as acos has 2 ambiguous solutions
# Find 2 possible normals for the limbus (weak perspective)
normal_1 = vpy.vector(sin(tht_1) * cos(psi), -sin(tht_1) * sin(psi), -cos(tht_1))
normal_2 = vpy.vector(sin(tht_2) * cos(psi), -sin(tht_2) * sin(psi), -cos(tht_2))
# Now correct for weak perspective by modifying angle by offset between camera axis and limbus
x_correction = -atan(iris_y_mm / iris_z_mm)
y_correction = -atan(iris_x_mm / iris_z_mm)
x_axis, y_axis = vpy.vector(1, 0, 0), vpy.vector(0, -1, 0) # VPython uses different y axis
normal_1 = vpy.rotate(normal_1, y_correction, y_axis)
normal_1 = vpy.rotate(normal_1, x_correction, x_axis).astuple()
normal_2 = vpy.rotate(normal_2, y_correction, y_axis)
normal_2 = vpy.rotate(normal_2, x_correction, x_axis).astuple()
return Limbus(limbus_center, normal_1, ellipse)
def get_gaze_point_px(limbus):
""" Convenience method for getting gaze point on screen in px
"""
gaze_point_mm = get_gaze_point_mm(limbus)
return convert_gaze_pt_mm_to_px(gaze_point_mm)
def get_gaze_point_mm(limbus):
""" Returns intersection with z-plane of optical axis vector (mm)
"""
# Ray-plane intersection
x0, y0, z0 = limbus.center_mm
dx, dy, dz = limbus.normal
t = -z0 / dz
x_screen_mm, y_screen_mm = x0 + dx * t, y0 + dy * t
return x_screen_mm, y_screen_mm
def convert_gaze_pt_mm_to_px((x_screen_mm, y_screen_mm), device):
""" Returns intersection with screen in coordinates (px)
"""
screen_w_mm, screen_h_mm = device.screen_size_mm
screen_w_px, screen_h_px = device.screen_size_px
screen_y_offset_px = device.screen_y_offset_px # height of notification bar
x_offset, y_offset = device.offset_mm # screen offset from camera position
x_screen_px = (x_screen_mm + x_offset) / screen_w_mm * screen_w_px
y_screen_px = (y_screen_mm - y_offset) / screen_h_mm * screen_h_px - screen_y_offset_px
return x_screen_px, y_screen_px
| errollw/EyeTab | EyeTab_Python/gaze_geometry.py | Python | mit | 5,818 | 0.010141 |
'''
Copyright (C) 2015 Constantin Tschuertz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from enum import Enum
import hashlib
from models.parametertype import ParameterType
__author__ = 'constantin'
class UrlStructure():
def __init__(self, path, paramters = {}, url_hash = None):
self.path = path
self.parameters = paramters # List of dict: parametername, parametertype, origin, generating <= change of the param creates a new page
self.url_hash = url_hash
def get_parameter_type(self, parameter_name):
if parameter_name not in self.parameters:
raise KeyError("{} not found".format(parameter_name))
return ParameterType(self.parameters[parameter_name]['parameter_type'])
def get_parameter_origin(self, parameter_name):
if parameter_name not in self.parameters:
raise KeyError("{} not found".format(parameter_name))
return ParameterType(self.parameters[parameter_name]['origin'])
def toString(self):
msg = "[Url: {} \n".format(self.path)
for param in self.parameters:
msg += "{} - {} - {} - {} \n".format(param, ParameterType(self.parameters[param]['parameter_type']), ParameterOrigin(self.parameters[param]['origin']), self.parameters[param]['generating'])
msg += "Hash: {}]".format(self.url_hash)
return msg
class ParameterOrigin(Enum):
ServerGenerated = 0
ClientGenerated = 1
| ConstantinT/jAEk | crawler/models/urlstructure.py | Python | gpl-3.0 | 1,991 | 0.00452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.