repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/video_pipeline/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,352 | 0.005178 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='VideoPipelineIntegration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('api_url', models.URLField(help_text='edx-video-pipeline API URL.', verbose_name='Internal API URL')),
('service_username', models.CharField(default=u'video_pipeline_service_user', help_text='Username created for Video Pipeline Integration, e.g. video_pipeline_service_user.', max_length=100)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Chang
|
ed by')),
|
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
zheguang/voltdb
|
lib/python/voltcli/cli.py
|
Python
|
agpl-3.0
| 28,839 | 0.008808 |
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'scooper'
import sys
import os
import optparse
import shlex
import copy
from voltcli import utility
# Volt CLI command processor
# Individual option variables are added by the option parser. They are available
# externally as module attributes.
#===============================================================================
class BaseOption(object):
#===============================================================================
"""
General CLI option specification (uses optparse keywords for now).
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
self.short_opt = short_opt
self.long_opt = long_opt
self.kwargs = kwargs
self.kwargs['dest'] = dest
# A help message of None makes it a hidden option.
if help_msg is not None:
self.kwargs['help'] = help_msg
if 'default' in self.kwargs:
if utility.is_string(kwargs['default']):
self.kwargs['help'] += ' (default="%s")' % self.kwargs['default']
else:
self.kwargs['help'] += ' (default=%s)' % self.kwargs['default']
else:
self.kwargs['help'] = optparse.SUPPRESS_HELP
def get_option_names(self):
return [a for a in
|
(self.short_opt, self.long_opt) if a is not None]
def get_dest(self):
if 'dest' not in self.kwargs:
utility.abort('%s must specify a "dest" property.' % self.__class__.__name__)
return self.kwargs['dest']
def get_default(self):
return self.kwargs.get('default', None)
def post
|
process_value(self, value):
# Hook for massaging the option instance value. Default to NOP.
return value
def __str__(self):
return '%s(%s/%s %s)' % (self.__class__.__name__,
self.short_opt, self.long_opt, self.kwargs)
def __cmp__(self, other):
# Sort options by lowercase letter or word, depending on which is available.
if self.short_opt:
if other.short_opt:
return cmp(self.short_opt.lower(), other.short_opt.lower())
return 1
if other.short_opt:
return -1
if self.long_opt:
if other.long_opt:
return cmp(self.long_opt.lower(), other.long_opt.lower())
return 1
if other.long_opt:
return -1
return 0
def has_value(self):
return (not 'action' in self.kwargs or self.kwargs['action'] == 'store')
#===============================================================================
class BooleanOption(BaseOption):
#===============================================================================
"""
Boolean CLI option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg,
action = 'store_true', **kwargs)
#===============================================================================
class StringOption(BaseOption):
#===============================================================================
"""
CLI string value option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
#===============================================================================
class IntegerOption(BaseOption):
#===============================================================================
"""
Integer CLI option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
try:
converted = int(value.strip())
except ValueError:
utility.abort('Bad "%s" integer value: %s' % (self.get_dest().upper(), value))
return converted
#===============================================================================
class StringListOption(StringOption):
#===============================================================================
"""
CLI comma-separated string list option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
return [v.strip() for v in value.split(',')]
#===============================================================================
class IntegerListOption(StringOption):
#===============================================================================
"""
CLI comma-separated integer list option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
bad = []
converted = []
for v in value.split(','):
try:
converted.append(int(v.strip()))
except ValueError:
bad.append(v.strip())
if bad:
utility.abort('Bad "%s" integer list value(s):' % self.get_dest().upper(), bad)
return converted
#===============================================================================
class EnumOption(StringOption):
#===============================================================================
"""
Enumeration option for selecting from a list of possible symbols.
"""
def __init__(self, short_opt, long_opt, dest, help_pfx, *values, **kwargs):
if not values or len(values) <= 1:
utility.abort('EnumOption "%s" must specify multiple valid values.' % dest)
self.values = values
help_msg = '%s [%s]' % (help_pfx, '|'.join(self.values))
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
if value not in self.values:
utility.abort('EnumOption "%s" value "%s" is not one of the following:'
% (self.get_dest(), value), self.values)
return value
#===============================================================================
class HostOption(StringOption):
#===============================================================================
"""
Comma-separated HOST[:PORT] list option.
"""
def __init__(self, short_opt, long_opt, dest, name, **kwargs):
self.min_count = utility.kwargs_get_integer(kwargs, 'min_count', default = 1)
self.max_count = utility.kwargs_get_integer(kwargs, 'max_count', default = 1)
self.default_port = utility.kwargs_get_integer(kwargs, 'default_port', default = 21212)
if self.max_count == 1:
help_msg = 'the
|
Makeystreet/makeystreet
|
woot/apps/catalog/migrations/0036_auto__add_likecfiitem__add_cfistoreitem.py
|
Python
|
apache-2.0
| 27,149 | 0.007219 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LikeCfiItem'
db.create_table(u'catalog_likecfiitem', (
(u'abstractlike_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalog.AbstractLike'], unique=True, primary_key=True)),
('cfi_item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.CfiStoreItem'])),
))
db.send_create_signal('catalog', ['LikeCfiItem'])
# Adding model 'CfiStoreItem'
db.create_table(u'catalog_cfistoreitem', (
(u'basemodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalog.BaseModel'], unique=True, primary_key=True)),
('item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Product'])),
))
db.send_create_signal('catalog', ['CfiStoreItem'])
def backwards(self, orm):
# Deleting model 'LikeCfiItem'
db.delete_table(u'catalog_likecfiitem')
# Deleting model 'CfiStoreItem'
db.delete_table(u'catalog_cfistoreitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'catalog.abstractlike': {
'Meta': {'object_name': 'AbstractLike', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'liked_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.abstracttop': {
'Meta': {'object_name': 'AbstractTop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'recorded_time': ('django.db.models.fields.DateTimeField', [], {})
},
'catalog.basemodel': {
'Meta': {'object_name': 'BaseModel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'liker': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'through': "orm['catalog.LikeCfiItem']", 'symmetrical': 'False'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null':
|
'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCol
|
lect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'})
},
'catalog.likecfiitem': {
'Meta': {'object_name': 'LikeCfiItem', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'cfi_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"})
},
'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Prod
|
kazuyaujihara/osra_vs
|
GraphicsMagick/scripts/format_c_api_doc.py
|
Python
|
gpl-2.0
| 21,967 | 0.005281 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:sw=4:expandtab:
# Copyright 2008 Mark Mitchell
# License: see __license__ below.
__doc__ = """
Reads a GraphicsMagick source file and parses the specially formatted
comment blocks which precede each function and writes the information
obtained from the comment block into a reStructuredText file.
Usage:
format_c_api_docs.py [options] SRCFILE OUTFILE
SRCFILE is the path to a Graphicsmagick API .c file.
For example: ./magick/animate.c
OUTFILE is the path where the reStructuredText file is written.
Options:
-h --help -- Print this help message
-w --whatis-file -- The path to a file containing "whatis" information for
the source files. The format of this file is:
* one line per source file
* source filename (without directory paths) and whatis text
are separated by whitespace
* blank lines are ignored
* lines starting with '#' are ignored
-i --include-rst -- Comma-separated list of file paths to be objects of reST
..include:: directives inserted in OUTFILE.
The default is the single file 'api_hyperlinks.rst'
Example of whatis file format:
animate.c Interactively animate an image sequence
annotate.c Annotate an image with text
"""
__copyright__ = "2008, Mark Mitchell"
__license__ = """
Copyright 2008, Mark Mitchell
Permission is hereby granted, free of charge, to any person obtaining
a copy of this Software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
The Software is provided "as is", without warranty of any kind,
express or implied, including but not limited to the warranties of
merchantability, fitness for a particular purpose and noninfringement.
In no event shall the authors or copyright holders be liable for any
claim, damages or other liability, whether in an action of contract,
tort or otherwise, arising from, out of or in connection with Software
or the use or other dealings in the Software.
"""
import sys
import getopt
import os, os.path
import re
import textwrap
# Key words to replace with HTML links
keywords = {
'AffineMatrix' : '`AffineMatrix`_',
'BlobInfo' : '`BlobInfo`_',
'Cache' : '`Cache`_',
'ChannelType' : '`ChannelType`_',
'ChromaticityInfo' : '`ChromaticityInfo`_',
'ClassType' : '`ClassType`_',
'ClipPathUnits' : '`ClipPathUnits`_',
'ColorPacket' : '`ColorPacket`_',
'ColorspaceType' : '`ColorspaceType`_',
'ComplianceType' : '`ComplianceType`_',
'CompositeOperator' : '`CompositeOperator`_',
'CompressionType' : '`CompressionType`_',
'DecorationType' : '`DecorationType`_',
'DrawContext' : '`DrawContext`_',
'DrawInfo' : '`DrawInfo`_',
'ErrorHandler' : '`ErrorHandler`_',
'ExceptionInfo' : '`ExceptionInfo`_',
'ExceptionType' : '`ExceptionType`_',
'FillRule' : '`FillRule`_',
'FilterTypes' : '`FilterTypes`_',
'FrameInfo' : '`FrameInfo`_',
'GravityType' : '`GravityType`_',
'Image' : '`Image`_',
'ImageInfo' : '`ImageInfo`_',
'ImageType' : '`ImageType`_',
'InterlaceType' : '`InterlaceType`_',
'LayerType' : '`LayerType`_',
'MagickInfo' : '`MagickInfo`_',
'MonitorHandler' : '`MonitorHandler`_',
'MontageInfo' : '`MontageInfo`_',
'NoiseType' : '`NoiseType`_',
'PaintMethod' : '`PaintMethod`_',
'PixelPacket' : '`PixelPacket`_',
'PointInfo' : '`PointInfo`_',
'ProfileInfo' : '`ProfileInfo`_',
'QuantizeInfo' : '`QuantizeInfo`_',
'Quantum' : '`Quantum`_',
'QuantumType' : '`QuantumType`_',
'RectangleInfo' : '`RectangleInfo`_',
'RegistryType' : '`RegistryType`_',
'RenderingIntent' : '`RenderingIntent`_',
'ResolutionType' : '`ResolutionType`_',
'ResourceType' : '`ResourceType`_',
'SegmentInfo' : '`SegmentInfo`_',
'SignatureInfo' : '`SignatureInfo`_',
'StorageType' : '`StorageType`_',
'StreamHandler' : '`StreamHandler`_',
'StretchType' : '`StretchType`_',
'StyleType' : '`StyleType`_',
'TypeMetric' : '`TypeMetric`_',
'ViewInfo' : '`ViewInfo`_',
'VirtualPixelMethod' : '`VirtualPixelMethod`_',
'MagickXResourceInfo' : '`MagickXResourceInfo`_',
}
state_init = 0
state_found_fcncomment = 1
state_found_fcntitle = 2
state_found_fcndoc = 3
state_more_prototype = 4
state_found_prototype = 5
state_found_private = 6
state_parmdescr = 7
def warn(msg):
print >> sys.stderr, msg
def debugtrace(msg):
print >> sys.stdout, msg
def nodebugtrace(msg):
pass
dtrace = nodebugtrace
#dtrace = debugtrace
# extract and save function title. example:
# + X M a g i c k C o m m a n d
|
%
# % X A n i m a t e B a c k g r o u n d I m a g e %
# Lines starting with '+' are pri
|
vate APIs which should not appear in
# in the output.
re_func_title = re.compile(r'^[+|%]\s+((\w )+)\s*%')
def proto_pretty(line):
"""fixes up inconsistent spaces in C function prototypes"""
line = re.sub(r',', ' , ', line)
line = re.sub(r'\(', ' ( ', line)
line = re.sub(r'\)', ' ) ', line)
line = re.sub(r'\*', ' * ', line)
line = re.sub(r'\s+', ' ', line)
line = re.sub(r'\(\s+\*', '(*', line)
line = re.sub(r' ,', ',', line)
line = re.sub(r' \(', '(', line)
line = re.sub(r'\) ', ')', line)
line = re.sub(r' \* ', ' *', line)
line = re.sub('^\s*', '', line)
return line
class Paragraph:
"Paragraphs consist of one or more lines of text."
def __init__(self):
self.lines = []
def __str__(self):
#return '\n'.join(self.lines)
return '\n'.join([line.strip() for line in self.lines])
class Prototype:
def __init__(self):
self.lines = []
def __str__(self):
proto = ' '.join(self.lines)
proto = proto_pretty(proto)
# escape all the '*' chars
proto = re.sub(r'\*', '\\*', proto)
# escape all the '_' chars
proto = re.sub(r'_', '\\_', proto)
# now replace keywords with hyperlinks
for k,v in keywords.iteritems():
proto = re.sub(r'^%s ' % k, '%s ' % v, proto)
proto = re.sub(r' %s ' % k, ' %s ' % v, proto)
# make some attempt to wrap the text nicely
openparen_index = proto.find('(')
if openparen_index > 0:
fcn = proto[:openparen_index+1]
indent_len = len(fcn) + 3
toomuch = (2 * fcn.count('\\')) + (3 * fcn.count('`_'))
if toomuch > 0: # account for the space following the opening paren
toomuch -= 1
indent_len -= toomuch
params = proto[openparen_index+1:].split(',')
params = [p.strip() for p in params]
max_param_len = 0
for x in params:
if len(x) > max_param_len:
max_param_len = len(x)
wrap_width = max(96, max_param_len + indent_len)
proto_lines = []
line = fcn + ' '
while params:
x = params.pop(0)
if len(line) + len(x) > wrap_width:
proto_lines.append(line)
line = ' ' * indent_len
line += x
|
icomms/rapidsms
|
lib/rapidsms/message.py
|
Python
|
lgpl-3.0
| 4,140 | 0.008454 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import copy
from rapidsms.connection import Connection
from rapidsms.person import Person
from datetime import datetime
from rapidsms import utils
class StatusCodes:
'''Enum for representing status types of a message or response.'''
NONE = "None" # we don't know. the default
OK = "Ok" # is great success!
APP_ERROR = "Application Error" # application specific errors - e.g. bad data
GENERIC_ERROR = "Generic error" # generic errors - e.g. a catch all responder
class Message(object):
def __init__(self, connection=None, text=None, person=None, date=None):
if connection == None and person == None:
raise Exception("Message __init__() must take one of: connection, person")
self._connection = connection
self.text = text
self.date = ( datetime.utcnow() if date is None
else utils.to_naive_utc_dt(date) )
self.person = person
self.responses = []
self.status = StatusCodes.NONE
# a message is considered "unprocessed" until
# rapidsms has dispatched it to all apps, and
# flushed the responses out
self.processed = False
def __unicode__(self):
return self.text
@property
def connection(self):
# connection is read-only, since it's an
# immutable property of this object
if self._connection is not None:
return self._connection
else:
return self.person.connection
@property
def peer (self):
# return the identity (e.g. phone number) of
# the other end of this message's connection
return self.connection.identity
def send(self):
"""Send this message via self.connection.backend, returning
True if the message was sent successfully."""
return self.connection.backend.router.outgoing(self)
def flush_responses (self):
"""Sends all responses added to this message (via the
Message.respond method) in the order which they were
added, and clears self.responses"""
# keep on iterating until all of
# the messages have been sent
while self.responses:
self.responses.pop(0).send()
def error(self, text, level):
"""Apps send error messages here rather than through respond
so users only receive one - the with the highest level of specificity"""
#TODO implement this
pass
def respond(self, text, status = StatusCodes.NONE):
"""Send the given text back to the original caller of this
message on the same route that it came in on"""
if self.connection:
response = self.get_response(text, status)
self.responses.append(response)
return True
else:
return False
def get_response(self, text, status):
response = copy.copy(self)
response.text = text
response.status = status
return response
def forward (self, identity, text=None):
if self.connection:
|
target = self.connection.fork(identity)
if text is None: text = self.text
message = type(self)(connection=target, text=text)
self.responses.append(message)
return True
else:
return False
class EmailMessage(Message):
"""Email version of a message object, with some extra stuff that ca
|
n
be consumed by email backends/apps."""
def __init__(self, connection=None, text=None, person=None, date=None,
subject=None, mime_type="text/plain"):
super(EmailMessage, self).__init__(connection=connection, text=text,
person=person, date=date)
self.subject = subject
self.mime_type = mime_type
def get_response(self, text, status):
response = Message.get_response(self, text, status)
response.subject = "re: %s" % self.subject
return response
|
aspuru-guzik-group/mission_control
|
mc/houston/utils.py
|
Python
|
apache-2.0
| 6,799 | 0.000147 |
from pathlib import Path
from jobman.jobman import JobMan
from mc.clients.job_record_client import JobRecordClient
from mc.clients.flow_record_client import FlowRecordClient
from mc.flows.flow_engine import FlowEngine
from mc.db.db import Db
from mc.runners.flow_runner import FlowRunner
from mc.runners.jobman_job_runner.job_runner import JobRunner
class HoustonUtils(ob
|
ject):
JOBS_SUBDIRS = ['pending', 'queued', 'executed', 'archive']
def __init__(self, houston=None):
self.houston = houston
@property
def cfg(self): return self.houston.cfg
@property
def db(self):
if not hasattr(self, '_db'):
self._db = self.generate_db(db_ur
|
i=self.cfg['MC_DB_URI'])
return self._db
def generate_db(self, db_uri=None, schema=None):
return Db(db_uri=db_uri, schema=schema)
@db.setter
def db(self, value): self._subcommands = value
def ensure_queues(self):
self.ensure_queue(queue_cfg=self.cfg['FLOW_QUEUE'])
self.ensure_queue(queue_cfg=self.cfg['JOB_QUEUE'])
def ensure_queue(self, queue_cfg=None):
try:
self.db.get_item_by_key(item_type='queue', key=queue_cfg['key'])
except self.db.ItemNotFoundError:
self.db.create_item(
item_type='queue',
item_kwargs={
'key': queue_cfg['key'],
**queue_cfg.get('queue_kwargs', {})
}
)
@property
def flow_runner(self):
if not hasattr(self, '_flow_runner'):
self._flow_runner = FlowRunner(
flow_engine=self.flow_engine,
flow_record_client=self.flow_record_client,
task_ctx={
'mc.flow_record_client': self.flow_record_client,
'mc.job_record_client': self.job_record_client,
}
)
return self._flow_runner
@flow_runner.setter
def flow_runner(self, new_value): self._flow_runner = new_value
@property
def flow_engine(self):
if not hasattr(self, '_flow_engine'):
self._flow_engine = FlowEngine()
return self._flow_engine
@flow_engine.setter
def flow_engine(self, new_value): self._flow_engine = new_value
@property
def flow_record_client(self):
if not hasattr(self, '_flow_record_client'):
self._flow_record_client = self._get_mc_client(record_type='flow')
return self._flow_record_client
@flow_record_client.setter
def flow_record_client(self, new_value):
self._flow_record_client = new_value
@property
def job_record_client(self):
if not hasattr(self, '_job_record_client'):
self._job_record_client = self._get_mc_client(record_type='job')
return self._job_record_client
def _get_mc_client(self, record_type=None):
client_cls = None
if record_type == 'flow':
client_cls = FlowRecordClient
elif record_type == 'job':
client_cls = JobRecordClient
assert client_cls is not None
queue_cfg = self.cfg[record_type.upper() + '_QUEUE']
return client_cls(mc_db=self.db,
use_locks=self.cfg.get('USE_LOCKS', True),
queue_key=queue_cfg['key'])
@job_record_client.setter
def job_record_client(self, new_value): self._job_record_client = new_value
@property
def job_runner(self, mc_clients=None):
if not hasattr(self, '_job_runner'):
self._job_runner = JobRunner(
artifact_handler=self.cfg['ARTIFACT_HANDLER'],
job_record_client=self.job_record_client,
jobman=self.jobman,
jobdirs_dir=self.cfg.get('JOBDIRS_DIR', None),
build_jobdir_fn=self.build_jobdir,
)
return self._job_runner
@job_runner.setter
def job_runner(self, new_value): self._job_runner = new_value
@property
def jobman(self):
if not hasattr(self, '_jobman'):
self._jobman = JobMan.from_cfg(cfg=self.cfg['JOBMAN_CFG'])
return self._jobman
@jobman.setter
def jobman(self, new_value): self._jobman = new_value
def build_jobdir(self, *args, **kwargs):
try:
build_jobdir_fn = self.cfg['BUILD_JOBDIR_FN']
except:
def build_jobdir_fn(*args, **kwargs):
return self.houston.run_command('build_job_dir')
return build_jobdir_fn(*args, **kwargs)
def has_unfinished_mc_records(self):
unfinished_records = self.get_unfinished_mc_records()
for record_type, records in unfinished_records.items():
if len(records) > 0:
return True
return False
def get_unfinished_mc_records(self):
return {
record_type: self._get_unfinished_mc_items(item_type=record_type)
for record_type in ['flow', 'job']
}
def _get_unfinished_mc_items(self, item_type=None):
return self.db.query_items(item_type=item_type, query={
'filters': [
{'field': 'status', 'op': '! IN',
'arg': ['FAILED', 'COMPLETED']}
]
})
def ensure_job_dirs(self):
for dir in self.job_dirs.values():
Path(dir).mkdir(parents=True, exist_ok=True)
@property
def job_dirs(self):
if not hasattr(self, '_job_dirs'):
self._job_dirs = {'root': self.cfg.get('JOB_DIRS_ROOT', None)}
for jobs_subdir in self.JOBS_SUBDIRS:
self._job_dirs[jobs_subdir] = str(Path(self._job_dirs['root'],
jobs_subdir))
return self._job_dirs
@job_dirs.setter
def job_dirs(self, value): self._job_dirs = value
@property
def archiver(self):
if not hasattr(self, '_archiver'):
self._archiver = self._generate_archiver()
return self._archiver
def _generate_archiver(self):
from mc.utils.archivers.dir_archiver import DirArchiver
return DirArchiver(root_dir=self.job_dirs['archive'])
@property
def entity_selector(self):
if not hasattr(self, '_entity_selector'):
from mc.utils.selectors.basic_entity_selector import (
BasicEntitySelector)
self._entity_selector = BasicEntitySelector(db=self.db)
return self._entity_selector
@property
def request_selector(self):
if not hasattr(self, '_request_selector'):
from mc.utils.selectors.basic_request_selector import (
BasicRequestSelector)
self._request_selector = BasicRequestSelector(db=self.db)
return self._request_selector
|
cvandeplas/plaso
|
plaso/parsers/winprefetch_test.py
|
Python
|
apache-2.0
| 17,693 | 0.000396 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Windows prefetch parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import winprefetch as winprefetch_formatter
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers import test_lib
from plaso.parsers import winprefetch
class WinPrefetchParserTest(test_lib.ParserTestCase):
"""T
|
ests for the Windows prefetch parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = winprefetch.WinPrefetchParser()
def testParse17(self):
"""Tests the Parse function on a version 17 Prefetch file."""
test_file = self._GetTestFilePath(['CMD.EXE-087B4001
|
.pf'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 2)
# The prefetch last run event.
event_object = event_objects[1]
self.assertEquals(event_object.version, 17)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-03-10 10:11:49.281250')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.LAST_RUNTIME)
self.assertEquals(event_object.executable, u'CMD.EXE')
self.assertEquals(event_object.prefetch_hash, 0x087b4001)
self.assertEquals(event_object.volume_serial_numbers[0], 0x24cb074b)
expected_mapped_files = [
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\NTDLL.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\KERNEL32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\UNICODE.NLS',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\LOCALE.NLS',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\SORTTBLS.NLS',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\MSVCRT.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\CMD.EXE',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\USER32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\GDI32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\SHIMENG.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\APPPATCH\\SYSMAIN.SDB',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\APPPATCH\\ACGENRAL.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\ADVAPI32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\RPCRT4.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\WINMM.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\OLE32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\OLEAUT32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\MSACM32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\VERSION.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\SHELL32.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\SHLWAPI.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\USERENV.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\UXTHEME.DLL',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\CTYPE.NLS',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\SORTKEY.NLS',
(u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\WINSXS\\X86_MICROSOFT.WINDOWS.'
u'COMMON-CONTROLS_6595B64144CCF1DF_6.0.2600.2180_X-WW_A84F1FF9\\'
u'COMCTL32.DLL'),
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\WINDOWSSHELL.MANIFEST',
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\COMCTL32.DLL',
(u'\\DEVICE\\HARDDISKVOLUME1\\D50FF1E628137B1A251B47AB9466\\UPDATE\\'
u'UPDATE.EXE.MANIFEST'),
u'\\DEVICE\\HARDDISKVOLUME1\\$MFT',
(u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\IE7\\SPUNINST\\SPUNINST.EXE.'
u'MANIFEST'),
(u'\\DEVICE\\HARDDISKVOLUME1\\D50FF1E628137B1A251B47AB9466\\UPDATE\\'
u'IERESETICONS.EXE'),
u'\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\IE7\\SPUNINST\\IERESETICONS.EXE']
self.assertEquals(event_object.mapped_files, expected_mapped_files)
# The volume creation event.
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-03-10 10:19:46.234375')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_msg = (
u'\\DEVICE\\HARDDISKVOLUME1 '
u'Serial number: 0x24CB074B '
u'Origin: CMD.EXE-087B4001.pf')
expected_msg_short = (
u'\\DEVICE\\HARDDISKVOLUME1 '
u'Origin: CMD.EXE-087B4001.pf')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
def testParse23(self):
"""Tests the Parse function on a version 23 Prefetch file."""
test_file = self._GetTestFilePath(['PING.EXE-B29F6629.pf'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 2)
# The prefetch last run event.
event_object = event_objects[1]
self.assertEquals(event_object.version, 23)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2012-04-06 19:00:55.932955')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.LAST_RUNTIME)
self.assertEquals(event_object.executable, u'PING.EXE')
self.assertEquals(event_object.prefetch_hash, 0xb29f6629)
self.assertEquals(
event_object.path, u'\\WINDOWS\\SYSTEM32\\PING.EXE')
self.assertEquals(event_object.run_count, 14)
self.assertEquals(
event_object.volume_device_paths[0], u'\\DEVICE\\HARDDISKVOLUME1')
self.assertEquals(event_object.volume_serial_numbers[0], 0xac036525)
expected_msg = (
u'Prefetch [PING.EXE] was executed - run count 14 path: '
u'\\WINDOWS\\SYSTEM32\\PING.EXE '
u'hash: 0xB29F6629 '
u'volume: 1 [serial number: 0xAC036525, '
u'device path: \\DEVICE\\HARDDISKVOLUME1]')
expected_msg_short = u'PING.EXE was run 14 time(s)'
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
# The volume creation event.
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2010-11-10 17:37:26.484375')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
def testParse23MultiVolume(self):
"""Tests the Parse function on a mulit volume version 23 Prefetch file."""
test_file = self._GetTestFilePath(['WUAUCLT.EXE-830BCC14.pf'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 6)
# The prefetch last run event.
event_object = event_objects[5]
self.assertEquals(event_object.version, 23)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2012-03-15 21:17:39.807996')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.LAST_RUNTIME)
self.assertEquals(event_object.executable, u'WUAUCLT.EXE')
self.assertEquals(event_object.prefetch_
|
edrex/minichess
|
minichess/eval.py
|
Python
|
gpl-2.0
| 605 | 0.028099 |
from data import *
# white
pvals = {
PAWN: 100,\
BISH
|
OP: 300,\
KNIGHT: 300,\
ROOK: 500,\
QUEEN: 900,\
-PAWN: -100,\
-BISHOP: -300,\
-KNIGHT: -300,\
-ROOK: -500,\
-QUEEN: -900,\
KING: 10000,\
-KING: -10000,\
EMPTY: 0,\
}
def value(state):
return state.som * sum(pvals[state.board[cord]] for cord in fcords)
def game_lost(state):
try:
state.
|
board.index(KING*state.som)
return False
except ValueError:
return True
def game_drawn(state):
if state.turn >= 80:
return True
else:
return False
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/pipeline/api/keras/models.py
|
Python
|
apache-2.0
| 3,962 | 0.000757 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from zoo.pipeline.api.utils import remove_batch
from .engine.topology import KerasNet
from bigdl.util.common import to_list
from zoo.common.utils import callZooFunc
if sys.version >= '3':
long = int
unicode = str
class Sequential(KerasNet):
"""
Container for a sequential model.
# Arguments
name: String to specify the name of the sequential model. Default is None.
>>> sequential = Sequential(name="seq1")
creating: createZooKerasSequential
"""
def __init__(self, jvalue=None, **kwargs):
super(Sequential, self).__init__(jvalue, **kwargs)
# TODO: expose is_built from scala side
def is_built(self):
try:
self.get_output_shape()
return True
except:
return False
def add(self, model):
from zoo.pipeline.api.autograd import Lambda
if (isinstance(model, Lambda)):
if not self.is_built():
if not model.input_shape:
raise Exception("You should specify inputShape for the first layer")
input_shapes = model.
|
input_shape
else:
input_shapes = self.get_output_shape()
model = model.create(remove_batch(input_shapes))
self
|
.value.add(model.value)
return self
@staticmethod
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Sequential(jvalue=jvalue)
model.value = jvalue
return model
class Model(KerasNet):
"""
Container for a graph model.
# Arguments
input: An input node or a list of input nodes.
output: An output node or a list of output nodes.
name: String to specify the name of the graph model. Default is None.
"""
def __init__(self, input, output, jvalue=None, **kwargs):
super(Model, self).__init__(jvalue,
to_list(input),
to_list(output),
**kwargs)
def save_graph_topology(self, log_path, backward=False):
"""
Save the current model graph to a folder, which can be displayed in TensorBoard
by running the command:
tensorboard --logdir log_path
# Arguments
log_path: The path to save the model graph.
backward: The name of the application.
"""
callZooFunc(self.bigdl_type, "zooSaveGraphTopology",
self.value,
log_path,
backward)
def new_graph(self, outputs):
value = callZooFunc(self.bigdl_type, "newGraph", self.value, outputs)
return self.from_jvalue(value)
def freeze_up_to(self, names):
callZooFunc(self.bigdl_type, "freezeUpTo", self.value, names)
def unfreeze(self, names):
callZooFunc(self.bigdl_type, "unFreeze", self.value, names)
@staticmethod
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Model([], [], jvalue=jvalue)
model.value = jvalue
return model
|
brain-research/acai
|
lib/layers.py
|
Python
|
apache-2.0
| 3,324 | 0 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# y
|
ou may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom neural network
|
layers.
Low-level primitives such as custom convolution with custom initialization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
def downscale2d(x, n):
"""Box downscaling.
Args:
x: 4D tensor in NHWC format.
n: integer scale.
Returns:
4D tensor down scaled by a factor n.
"""
if n <= 1:
return x
if n % 2 == 0:
x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
return downscale2d(x, n // 2)
return tf.nn.avg_pool(x, [1, n, n, 1], [1, n, n, 1], 'VALID')
def upscale2d(x, n):
"""Box upscaling (also called nearest neighbors).
Args:
x: 4D tensor in NHWC format.
n: integer scale (must be a power of 2).
Returns:
4D tensor up scaled by a factor n.
"""
if n == 1:
return x
return tf.batch_to_space(tf.tile(x, [n**2, 1, 1, 1]), [[0, 0], [0, 0]], n)
class HeModifiedNormalInitializer(tf.initializers.random_normal):
def __init__(self, slope):
self.slope = slope
def get_config(self):
return dict(slope=self.slope)
def __call__(self, shape, dtype=None, partition_info=None):
del partition_info
dtype = dtype or tf.float32
std = tf.rsqrt((1. + self.slope**2) *
tf.cast(tf.reduce_prod(shape[:-1]), tf.float32))
return tf.random_normal(shape, stddev=std, dtype=dtype)
def encoder(x, scales, depth, latent, scope):
activation = tf.nn.leaky_relu
conv_op = functools.partial(
tf.layers.conv2d, padding='same',
kernel_initializer=HeModifiedNormalInitializer(0.2))
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
y = conv_op(x, depth, 1)
for scale in range(scales):
y = conv_op(y, depth << scale, 3, activation=activation)
y = conv_op(y, depth << scale, 3, activation=activation)
y = downscale2d(y, 2)
y = conv_op(y, depth << scales, 3, activation=activation)
y = conv_op(y, latent, 3)
return y
def decoder(x, scales, depth, colors, scope):
activation = tf.nn.leaky_relu
conv_op = functools.partial(
tf.layers.conv2d, padding='same',
kernel_initializer=HeModifiedNormalInitializer(0.2))
y = x
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
for scale in range(scales - 1, -1, -1):
y = conv_op(y, depth << scale, 3, activation=activation)
y = conv_op(y, depth << scale, 3, activation=activation)
y = upscale2d(y, 2)
y = conv_op(y, depth, 3, activation=activation)
y = conv_op(y, colors, 3)
return y
|
yahya-idriss/Python-Personal-assistant
|
start.py
|
Python
|
mit
| 4,231 | 0.038762 |
#! /usr/bin/env python
import os
from function import function
import speech_recognition as sr
from server import EmailServer
from pygame import mixer
from subprocess import call
from send import SendEmail
from detect import face_rec
from face import face_detect
from trainer import face_train
mixer.init()
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
while True:
def get_speech():
with sr.Microphone() as source:
audio = r.listen(source)
try:
recon=r.recognize_google(audio)
print recon
return recon
except:
recon=r.recognize_sphinx(audio)
call(["espeak","-s","160","i can't understand what you said, please say again"])
return get_speech()
def import_con():
call(["espeak","-s","160","Do you want to import your contact?"])
speech
|
= get_speech()
if "yes" in speech.lower():
conn.import_contact
|
()
rec = face_rec()
if rec.rec() != "0":
computername = rec.rec()
else:
call(["espeak","-s","160","This is Your First Time using me"])
call(["espeak","-s","160","Do you want to create a new account?"])
speech = get_speech()
if "yes" in speech.lower() or "yeah" in speech.lower():
det = face_detect()
det.new()
server_ad = function()
server_ad.add_user()
train = face_train()
train.train()
rec = face_rec()
computername = rec.rec()
else:
break
call(["espeak","-s","160","Hello "+computername+" can i help you?"])
speech = get_speech()
if "email" in speech.lower():
try:
server = function()
if server.get_last_id() == "0":
id=1
else:
id= server.get_last_id()
email,passwd = server.get_login_passwd(id)
email_server = email.split("@")[1].split(".")[0]
adress,port = server.GetServer(email_server,'imap')
print adress
print port
call(["espeak","-s","160","ok i will check it for you"])
conn = EmailServer()
conn.login_server(email.rstrip(),passwd,adress,port)
conn.inbox()
import_con()
listid = conn.returnid()
nb = server.get_email_nb(id)
up_nb = conn.emailnumber()
server.update_email_nb(id,up_nb)
conn.access_server(listid,nb)
except sr.UnknownValueError:
call(["espeak","there is errer"])
elif "send" in speech.lower() or "reply" in speech.lower() or "response" in speech.lower():
try:
call(["espeak","-s","160","you want to send email?"])
speech = get_speech()
if "yes" in speech.lower() or "yeah" in speech.lower():
call(["espeak","-s","160","ok i will send email for you"])
server_ad = function()
adress,port = server_ad.GetServer('gmail','smtp')
name,email,passwd = server_ad.get_login_passwd(2)
call(["espeak","-s","160","what's the subject of this email?"])
sub = get_speech()
call(["espeak","-s","160","what you want to say to him?"])
body = get_speech()
call(["espeak","-s","160","to who you want to send it?"])
to_txt = get_speech()
to = server_ad.get_to(2,to_txt)
send = SendEmail()
send.Send(email.rstrip(),passwd,sub,body,to,adress,port)
except sr.UnknownValueError:
call(["espeak","-s","160","there is errer"])
elif "add" in speech.lower() and "server" in speech.lower():
try:
call(["espeak","-s","160","are you sure you want to add new server?"])
speech = get_speech()
if "yes" in speech.lower():
server_ad = function()
server_ad.AddServer()
except sr.UnknownValueError:
call(["espeak","-s","160","there is errer"])
elif "no" in speech.lower() or "quit" in speech.lower() or "close" in speech.lower():
call(["espeak","-s","160","ok Good By."])
call(["espeak","-s","160","if you need me please run me any time"])
break
|
andrewnsk/dorokhin.moscow
|
imageboard/admin.py
|
Python
|
mit
| 990 | 0.004082 |
from django.contrib import admin
from image_cropping i
|
mport ImageCroppingMixin
from imageboard.models import Image
class ImageAdmin(ImageCroppingMixin, admin.ModelAdmin):
list_display = ['__str__', 'tag_list', 'owner', 'created', 'updated', 'visible', 'get_image_url']
list_filter = ['owner', 'visible', 'created', 'updated']
list_editable = ['visible']
def save_model(self, request, obj, form, change):
obj.owner = request.user.profile
obj.save()
def get_queryset(self, request):
return super(Imag
|
eAdmin, self).get_queryset(request).prefetch_related('tags')
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
def get_image_url(self, obj):
return '<a href="{0}"><img src="{0}" width="100px"></a>'.format(obj.img.url)
get_image_url.allow_tags = True
get_image_url.short_description = 'Превью'
tag_list.short_description = 'Теги'
admin.site.register(Image, ImageAdmin)
|
dpcrook/timetemp
|
install/Archive/logging_sparkfun.py
|
Python
|
mit
| 5,870 | 0.005281 |
#!/usr/bin/python
# Google Spreadsheet BMP Sensor Data-logging Example
# Depends on the 'gspread' package being insta
|
lled. If you have pip installed
# execute:
# sudo pip install gspread
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons
|
to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import datetime
import json
import ssl
import Adafruit_BMP.BMP085 as BMP085
from Adafruit_LED_Backpack.SevenSegment import SevenSegment
from phant import Phant
LOGGING = True
COUNT = 0
# How long to wait (in seconds) between measurements.
FREQUENCY_SECONDS = 300
# How long to wait (in seconds) to display F or C.
ALTERNATE_TEMP_SCALE_SECONDS = 5
# Approximately how often measurements are made (in seconds)
MEASUREMENT_INTERVAL = 2 * ALTERNATE_TEMP_SCALE_SECONDS
# How seldom to upload the sensor data, if LOGGING is on
COUNT_INTERVAL = FREQUENCY_SECONDS / MEASUREMENT_INTERVAL
# Create sensor instance with default I2C bus (On Raspberry Pi either 0 or
# 1 based on the revision, on Beaglebone Black default to 1).
bmp = BMP085.BMP085(mode=BMP085.BMP085_HIGHRES, address=0x77)
# Initialize a LED display
#segment = SevenSegment(address=0x70)
segment = SevenSegment(address=0x71)
print segment
# Read in Phant config
json_keys_file2 = 'data.crookster.org.json'
p2=Phant(jsonPath=json_keys_file2)
print 'Logging sensor measurements taken every {2} seconds to "{0}" every {1} seconds.'.format(p2.title, FREQUENCY_SECONDS, MEASUREMENT_INTERVAL)
print p2
print 'Press Ctrl-C to quit.'
while True:
error_tables = {}
try:
# Attempt to get sensor readings.
temp = bmp.read_temperature()
pressure = bmp.read_pressure()
altitude = bmp.read_altitude()
temp_in_F = (temp * 9.0 / 5.0) + 32.0
print "Temperature: %.2f C" % temp
print "Temperature: %.2f F" % temp_in_F
print "Pressure: %.2f hPa" % (pressure / 100.0)
print "Altitude: %.2f m" % altitude
print "Press CTRL+C to exit"
print ""
for display_tmp_in_F in [False, True]:
if display_tmp_in_F:
if round(temp_in_F * 10.0) < 1000.0:
segment.set_digit(0, int(round(temp_in_F) / 10)) # Tens
segment.set_digit(1, int(round(temp_in_F) % 10)) # Ones
segment.set_digit(2, int(int(round(temp_in_F * 10.0)) % 10)) # Tenth
segment.set_digit(3, 'F')
segment.set_colon(True)
else:
segment.set_digit(0, int(round(temp_in_F) / 100)) # Hundreds
segment.set_digit(1, int(round(temp_in_F - 100.0) / 10)) # Tens
segment.set_digit(2, int(round(temp_in_F) % 10)) # Ones
segment.set_digit(3, 'F')
segment.set_colon(False)
else:
# write degrees
segment.set_digit(0, int(round(temp) / 10)) # Tens
segment.set_digit(1, int(round(temp) % 10)) # Ones
segment.set_digit(2, int(int(round(temp * 10.0)) % 10)) # Tenth
segment.set_digit(3, 'C')
segment.set_colon(True)
segment.write_display()
time.sleep(ALTERNATE_TEMP_SCALE_SECONDS)
if LOGGING:
ambient_temp_C = temp
ambient_temp_F = temp_in_F
ambient_pressure = pressure / 100.0
fields = (ambient_pressure, ambient_temp_C, ambient_temp_F, altitude)
print fields
if (COUNT % COUNT_INTERVAL) == 0:
p2.log(altitude, ambient_pressure, ambient_temp_C, ambient_temp_F)
print 'Wrote a row to {0}'.format(p2.title)
print(p2.remaining_bytes, p2.cap)
else:
print 'at {0} seconds out of {1}'.format((COUNT * MEASUREMENT_INTERVAL) % FREQUENCY_SECONDS, FREQUENCY_SECONDS )
COUNT = COUNT + 1
except KeyboardInterrupt:
segment.clear()
segment.write_display()
if 'KI' not in error_tables:
error_tables['KI'] = 1
else:
error_tables['KI'] += 1
print error_tables
sys.exit(0)
except ssl.SSLError:
# we had a network issue, try again later
if 'ssl.SSLError' not in error_tables:
error_tables['ssl.SSLError'] = 1
else:
error_tables['ssl.SSLError'] += 1
segment.clear()
segment.write_display()
print error_tables
# except:
# print "unhandled exception, skipping"
# if 'Unhandled' not in error_tables:
# error_tables['Unhandled'] = 1
# else:
# error_tables['Unhandled'] += 1
# print error_tables
finally:
segment.clear()
segment.write_display()
print error_tables
|
ssmdevelopers/jklhoods
|
streamtweet.py
|
Python
|
gpl-2.0
| 1,889 | 0.022763 |
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import sqlite3 as sql3
import time
import json
import time
from dateti
|
me import datetime
import os
#import sys
#consumer key, consumer secret, access token, access secre
|
t.
ckey= 'TWITTER_CKEY' in os.environ
csecret= 'TWITTER_CSECRET' in os.environ
atoken= 'TWITTER_TOKEN'in os.environ
asecret= 'TWITTER_SECRET' in os.environ
new = 0
con = sql3.connect("tweets.db")
cur = con.cursor()
def newTweets():
if new == 0:
return
class Listener(StreamListener):
def on_data(self, data):
all_data = json.loads(data)
id = all_data["id_str"]
timestamp = time.strftime('%Y.%m.%d %H:%M', time.strptime(all_data["created_at"],'%a %b %d %H:%M:%S +0000 %Y'))
name = all_data["user"]["name"]
screen_name = all_data["user"]["screen_name"]
tagit = all_data["entities"]["hashtags"]
cur.execute("INSERT INTO twitter_tweets (tweetID, time, username, screen_name) VALUES (?, ?, ?, ?)",
(id, timestamp, name, screen_name))
for text in tagit:
cur.execute("INSERT INTO twitter_tags (tweetID, hashtag) VALUES (?, ?)",
(id, text["text"]))
con.commit()
print((id ,screen_name))
# print tagit
return True
def on_error(self, status):
if status == 420:
#returning False in on_data disconnects the stream
return False
print status
#auth = OAuthHandler(ckey, csecret)
#auth.set_access_token(atoken, asecret)
#twitterStream = Stream(auth, Listener())
#twitterStream.filter(track=["#car"])
def runStream():
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, Listener())
twitterStream.filter(track=["#car"])
if __name__ == '__main__':
runStream()
|
wdzhou/mantid
|
Testing/SystemTests/tests/analysis/WishCalibrate.py
|
Python
|
gpl-3.0
| 8,447 | 0.003315 |
from __future__ import (absolute_import, division, print_function)
import filecmp
import numpy as np
import os
import stresstesting
import tempfile
import mantid.simpleapi as mantid
import mantid.kernel as kernel
from tube_calib_fit_params import TubeCalibFitParams
from tube_calib import getCalibratedPixelPositions, getPoints
from tube_spec import TubeSpec
from ideal_tube import IdealTube
import tube
class WishCalibration(stresstesting.MantidStressTest):
"""
Runs the WISH calibration script and checks the result produced is sane
"""
def __init__(self):
super(WishCalibration, self).__init__()
self.calibration_table = None
self.correction_table = None
self.calibration_ref_name = "WishCalibrate_correction_table.csv"
self.correction_ref_name = "WishCalibrate_calibration_table.csv"
self.calibration_out_path = tempfile.NamedTemporaryFile().name
self.correction_out_path = tempfile.NamedTemporaryFile().name
def skipTests(self):
return True
def cleanup(self):
mantid.mtd.clear()
try:
os.remove(self.calibration_out_path)
os.remove(self.correction_out_path)
except OSError:
print("Failed to remove an temp output file in WishCalibration")
def requiredFiles(self):
return [self.calibration_ref_name, self.correction_ref_name]
def validate(self):
calibration_ref_path = mantid.FileFinder.getFullPath(self.calibration_ref_name)
correction_ref_path = mantid.FileFinder.getFullPath(self.correction_ref_name)
cal_result = filecmp.cmp(calibration_ref_path, self.calibration_out_path, False)
cor_result = filecmp.cmp(correction_ref_path, self.correction_out_path, False)
if not cal_result:
print("Calibration did not match in WishCalibrate")
if not cor_result:
print("Correction did not match in WishCalibrate")
return cal_result and cor_result
def runTest(self):
# This script calibrates WISH using known peak positions from
# neutron absorbing bands. The workspace with suffix "_calib"
# contains calibrated data. The workspace with suxxic "_corrected"
# contains calibrated data with known problematic tubes also corrected
ws = mantid.LoadNexusProcessed(Filename="WISH30541_integrated.nxs")
# This array defines the positions of peaks on the detector in
# meters from the center (0)
# For wish this is calculated as follows:
# Height of all 7 bands = 0.26m => each band is separated by 0.260 / 6 = 0.4333m
# The bands are on a cylinder diameter 0.923m. So we can work out the angle as
# (0.4333 * n) / (0.923 / 2) where n is the number of bands above (or below) the
# center band.
# Putting this together with the distance to the detector tubes (2.2m) we get
# the following: (0.4333n) / 0.4615 * 2200 = Expected peak positions
# From this we can show there should be 5 peaks (peaks 6 + 7 are too high/low)
# at: 0, 0.206, 0.413 respectively (this is symmetrical so +/-)
peak_positions = np.array([-0.413, -0.206, 0, 0.206, 0.413])
funcForm = 5 * [1] # 5 gaussian peaks
fitPar = TubeCalibFitParams([59, 161, 258, 353, 448])
fitPar.setAutomatic(True)
instrument = ws.getInstrument()
spec = TubeSpec(ws)
spec.setTubeSpecByString(instrument.getFullName())
idealTube = IdealTube()
idealTube.setArray(peak_positions)
# First calibrate all of the detectors
calibrationTable, peaks = tube.calibrate(ws, spec, peak_positions, funcForm, margin=15,
outputPeak=True, fitPar=fitPar)
self.calibration_table = calibrationTable
def findBadPeakFits(peaksTable, threshold=10):
""" Find peaks whose fit values fall outside of a given tolerance
of the mean pe
|
ak centers across all tub
|
es.
Tubes are defined as have a bad fit if the absolute difference
between the fitted peak centers for a specific tube and the
mean of the fitted peak centers for all tubes differ more than
the threshold parameter.
@param peakTable: the table containing fitted peak centers
@param threshold: the tolerance on the difference from the mean value
@return A list of expected peak positions and a list of indicies of tubes
to correct
"""
n = len(peaksTable)
num_peaks = peaksTable.columnCount() - 1
column_names = ['Peak%d' % i for i in range(1, num_peaks + 1)]
data = np.zeros((n, num_peaks))
for i, row in enumerate(peaksTable):
data_row = [row[name] for name in column_names]
data[i, :] = data_row
# data now has all the peaks positions for each tube
# the mean value is the expected value for the peak position for each tube
expected_peak_pos = np.mean(data, axis=0)
# calculate how far from the expected position each peak position is
distance_from_expected = np.abs(data - expected_peak_pos)
check = np.where(distance_from_expected > threshold)[0]
problematic_tubes = list(set(check))
print("Problematic tubes are: " + str(problematic_tubes))
return expected_peak_pos, problematic_tubes
def correctMisalignedTubes(ws, calibrationTable, peaksTable, spec, idealTube, fitPar, threshold=10):
""" Correct misaligned tubes due to poor fitting results
during the first round of calibration.
Misaligned tubes are first identified according to a tolerance
applied to the absolute difference between the fitted tube
positions and the mean across all tubes.
The FindPeaks algorithm is then used to find a better fit
with the ideal tube positions as starting parameters
for the peak centers.
From the refitted peaks the positions of the detectors in the
tube are recalculated.
@param ws: the workspace to get the tube geometry from
@param calibrationTable: the calibration table ouput from running calibration
@param peaksTable: the table containing the fitted peak centers from calibration
@param spec: the tube spec for the instrument
@param idealTube: the ideal tube for the instrument
@param fitPar: the fitting parameters for calibration
@param threshold: tolerance defining is a peak is outside of the acceptable range
@return table of corrected detector positions
"""
table_name = calibrationTable.name() + 'Corrected'
corrections_table = mantid.CreateEmptyTableWorkspace(OutputWorkspace=table_name)
corrections_table.addColumn('int', "Detector ID")
corrections_table.addColumn('V3D', "Detector Position")
mean_peaks, bad_tubes = findBadPeakFits(peaksTable, threshold)
for index in bad_tubes:
print("Refitting tube %s" % spec.getTubeName(index))
tube_dets, _ = spec.getTube(index)
getPoints(ws, idealTube.getFunctionalForms(), fitPar, tube_dets)
tube_ws = mantid.mtd['TubePlot']
fit_ws = mantid.FindPeaks(InputWorkspace=tube_ws, WorkspaceIndex=0,
PeakPositions=fitPar.getPeaks(), PeaksList='RefittedPeaks')
centers = [row['centre'] for row in fit_ws]
detIDList, detPosList = getCalibratedPixelPositions(ws, centers, idealTube.getArray(), tube_dets)
for id, pos in zip(detIDList, detPosList):
corrections_table.addRow({'Detector ID': id, 'Detector Position': kernel.V3D(*pos)})
return corrections_table
corrected_calibration_table = correctMisalignedTubes(ws, calibrationTable, peaks, spec, idealTub
|
gpaOliveira/SuperDiffer
|
SuperDiffer/routes.py
|
Python
|
mit
| 2,533 | 0.013423 |
from SuperDiffer import app, db
from SuperDiffer.id import controllers as ID
from flask import Flask, render_template, request, abort, jsonify
import json,base64,pdb
"""Routes to allow clients to add left and right base64 encoded on JSON values and fetch their diff"""
#References: https://blog.miguelgrinberg.com/post/designing-a-restful-api-with-python-and-flask
@app.route('/v1/diff/<int:id>', methods=['GET'])
def diff_right_left(id):
"""Calculates the diff between left and right descriptors of a given ID and remove those descriptors if they're found (even if the data lenght is not the same and no diff is made)"""
all_diff_data = ID.diff(id, ["left","right"])
if not all_diff_data or not all_diff_data["left_right"]:
abort(400)
ID.remove_all(id, ["left","right"])
return jsonify(all_diff_data["left_right"])
@app.route('/v1/diff/<int:id>/left', methods=['POST'])
def add_left_to_id(id):
"""Add a JSON base64 value (in the format: {"data":"base64value"}) to the left descriptor of a given ID"""
return _add_data_to_id_description(id, "left", request.json)
@app
|
.route('/v1/diff/<int:
|
id>/right', methods=['POST'])
def add_right_to_id(id):
"""Add a JSON base64 value (in the format: {"data":"base64value"}) to the right descriptor of a given ID"""
return _add_data_to_id_description(id, "right", request.json)
def _is_base64(value):
"""Returns true only if value only has base64 chars (A-Z,a-z,0-9,+ or /)"""
#http://stackoverflow.com/questions/12315398/verify-is-a-string-is-encoded-in-base64-python
try:
enc = base64.b64encode(base64.b64decode(value)).strip()
return enc == value
except TypeError:
return False
def _add_data_to_id_description(id, descriptor, request_json):
"""Add a base64 value obtained from a JSON in the format {"data":"base64value"}) to the given descriptor of a given ID"""
if not "data" in request_json:# no data key on json ? abort !
abort(400)
try:#arrays or other objects that doesnt have encode methods should not be accepted - abort !
no_unicode_data = request_json["data"].encode("utf-8")
except:
abort(400)
if not _is_base64(no_unicode_data):# no base64 value on data key ? abort !
abort(400)
if not ID.add(id, descriptor, no_unicode_data):# add failed due to some database problem ? yeah, abort !
abort(400)
return "Created", 201 #yey!
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
|
araisrobo/linuxcnc
|
configs/gladevcp/probe/probe.py
|
Python
|
lgpl-2.1
| 7,589 | 0.011859 |
#!/usr/bin/env python
# vim: sts=4 sw=4 et
# This is a component of EMC
# probe.py Copyright 2010 Michael Haberler
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA''''''
'''
gladevcp probe demo example
Michael Haberler 11/2010
'''
import os,sys
from gladevcp.persistence import IniFile,widget_defaults,set_debug,select_widgets
import hal
import hal_glib
import gtk
import glib
import emc
debug = 0
class EmcInterface(object):
def __init__(self):
try:
emcIniFile = linuxcnc.ini(os.environ['INI_FILE_NAME'])
linuxcnc.nmlfile = os.path.join(os.path.dirname(os.environ['INI_FILE_NAME']), emcIniFile.find("EMC", "NML_FILE"))
self.s = linuxcnc.stat();
self.c = linuxcnc.command()
except Exception, msg:
print "cant initialize EmcInterface: %s - EMC not running?" %(msg)
def running(self,do_poll=True):
if do_poll: self.s.poll()
return self.s.task_mode == linuxcnc.MODE_AUTO and self.s.interp_state != linuxcnc.INTERP_IDLE
def manual_ok(self,do_poll=True):
if do_poll: self.s.poll()
if self.s.task_state != linuxcnc.STATE_ON: return False
return self.s.interp_state == linuxcnc.INTERP_IDLE
def ensure_mode(self,m, *p):
'''
If emc is not already in one of the modes given, switch it to the first mode
example:
ensure_mode(linuxcnc.MODE_MDI)
ensure_mode(linuxcnc.MODE_AUTO, linuxcnc.MODE_MDI)
'''
self.s.poll()
if self.s.task_mode == m or self.s.task_mode in p: return True
if self.running(do_poll=False): return False
self.c.mode(m)
self.c.wait_complete()
return True
def active_codes(self):
self.s.poll()
return self.s.gcodes
def get_current_system(self):
for i in self.active_codes():
if i >= 540 and i <= 590:
return i/10 - 53
elif i >= 590 and i <= 593:
return i - 584
return 1
def mdi_command(self,command, wait=True):
#ensure_mode(emself.c.MODE_MDI)
self.c.mdi(command)
if wait: self.c.wait_complete()
def emc_status(self):
'''
return tuple (task mode, task state, exec state, interp state) as strings
'''
self.s.poll()
task_mode = ['invalid', 'MANUAL', 'AUTO', 'MDI'][self.s.task_mode]
task_state = ['invalid', 'ESTOP', 'ESTOP_RESET', 'OFF', 'ON'][self.s.task_state]
exec_state = ['invalid', 'ERROR', 'DONE',
'WAITING_FOR_MOTION',
'WAITING_FOR_MOTION_QUEUE',
'WAITING_FOR_IO',
'WAITING_FOR_PAUSE',
'WAITING_FOR_MOTION_AND_IO',
'WAITING_FOR_DELAY',
'WAITING_FOR_SYSTEM_CMD' ][self.s.exec_state]
interp_state = ['invalid', 'IDLE', 'READING', 'PAUSED', 'WAITING'][self.s.interp_state]
return (task_mode, task_state, exec_state, interp_state)
class HandlerClass:
def on_manual_mode(self,widget,data=None):
if self.e.ensure_mode(linuxcnc.MODE_MANUAL):
print "switched to manual mode"
else:
print "cant switch to manual in this state"
def on_mdi_mode(self,widget,data=None):
if self.e.ensure_mode(linuxcnc.MODE_MDI):
print "switched to MDI mode"
else:
print "cant switch to MDI in this state"
def _query_emc_status(self,data=None):
(task_mode, task_state, exec_state, interp_state) = self.e.emc_status()
self.builder.get_object('task_mode').set_label("Task mode: " + task_mode)
self.builder.get_object('task_state').set_label("Task state: " + task_state)
self.builder.get_object('exec_state').set_label("Exec state: " + exec_state)
self.builder.get_object('interp_state').set_label("Interp state: " + interp_state)
return True
def on_probe(self,widget,data=None):
label = widget.get_label()
axis = ord(label[0].lower()) - ord('x')
direction = 1.0
if label[1] == '-':
direction = -1.0
self.e.s.poll()
self.start_feed = self.e.s.settings[1]
# determine system we are touching off - 1...g54 etc
self.current_system = self.e.get_current_system()
# remember current abs or rel mode - g91
self.start_relative = (910 in self.e.active_codes())
self.previous_mode = self.e.s.task_mode
if self.e.s.task_state != linuxcnc.STATE_ON:
print "machine not turned on"
return
if not self.e.s.homed[axis]:
print "%s axis not homed" %(chr(axis + ord('X')))
return
if self.e.running(do_poll=False):
print "cant do that now - intepreter running"
return
self.e.ensure_mode(linuxcnc.MODE_MDI)
self.e.mdi_command("#<_Probe_System> = %d " % (self.current_system ),wait=False)
self.e.mdi_command("#<_Probe_Axis> = %d " % (axis),wait=False)
self.e.mdi_command("#<_Probe_Speed> = %s " % (self.builder.get_object('probe_feed').get_value()),wait=False)
self.e.mdi_command("#<_Probe_Diameter> = %s " % (self.builder.get_object('probe_diameter').get_value() ),wait=False)
self.e.mdi_command("#<_Probe_Distance> = %s " % (self.builder.get_object('probe_travel').get_value() * direction),wait=False)
self.e.mdi_command("#<_Probe_Retract> = %s " % (self.builder.get_object('retract').get_value() * direction * -1.0),wait=False)
self.e.mdi_command("O<probe> call",wait=False)
self.e.mdi_command('F%f' % (self.start_feed),wait=False)
self.e.mdi_command('G91' if self.start_relative else 'G90',wait=False)
# self.e.ensure_mode(self.previous_mode)
def on_destroy(self,obj,data=None):
self.ini.save_state(self)
def on_restore_defaults(self,button,data=None):
'''
example c
|
allback for 'Reset to defaults' button
currently unused
|
'''
self.ini.create_default_ini()
self.ini.restore_state(self)
def __init__(self, halcomp,builder,useropts):
self.halcomp = halcomp
self.builder = builder
self.ini_filename = __name__ + '.ini'
self.defaults = { IniFile.vars: dict(),
IniFile.widgets : widget_defaults(select_widgets(self.builder.get_objects(), hal_only=False,output_only = True))
}
self.ini = IniFile(self.ini_filename,self.defaults,self.builder)
self.ini.restore_state(self)
self.e = EmcInterface()
glib.timeout_add_seconds(1, self._query_emc_status)
def get_handlers(halcomp,builder,useropts):
global debug
for cmd in useropts:
exec cmd in globals()
set_debug(debug)
return [HandlerClass(halcomp,builder,useropts)]
|
jmvrbanac/barbican
|
bin/barbican-keystone-listener.py
|
Python
|
apache-2.0
| 2,403 | 0.002913 |
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Barbican Keystone notification listener server.
"""
import eventlet
import os
import sys
# Oslo messaging notification server uses eventlet.
#
# To have remote debugging, thread module needs to be disabled.
# eventlet.monkey_patch(thread=False)
eventlet.monkey_patch()
# 'Borrowed' from the Glance project:
# If ../barbican/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'barbican', '__init__.py')):
sys.path.insert(0, possible_topdir)
from barbican.common import config
from barbican.openstack.common import log
from barbican.openstack.common import service
from barbican import queue
from barbican.queue import keystone_listener
from oslo_config import cfg
def fail(returncode, e):
sys.stderr.write("ERROR: {0}\n".format(e))
sys.exit(returncode)
if __name__ == '__main__':
try:
config.parse_args()
config.setup_remote_pydev_debug()
# Import and configure logging.
log.setup('barbican')
LOG = log.getLogger(__name__)
LOG.info("Booting up Barbican Keystone listener node...")
# Queuing initialization
CONF = cfg.CONF
queue.init(CONF)
if getattr(getattr(CO
|
NF, queue.KS_NOTIFICATIONS_GRP_NAME), 'enable'):
service.launch(
keystone_listener.MessageServer(CONF)
).wait()
else:
|
LOG.info("Exiting as Barbican Keystone listener is not enabled...")
except RuntimeError as e:
fail(1, e)
|
AltSchool/django-allauth
|
allauth/socialaccount/providers/fake/views.py
|
Python
|
mit
| 910 | 0 |
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import FakeProvider
class FakeOAuth2Adapter(OAuth2Adapter):
provider_id = FakeProvider.id
access_token_url = 'https://localhost/o/oauth2/token'
authorize_url = 'https://localhost/o/oauth2/auth'
|
profile_url = 'https://localhost/oauth2/v1/userinfo'
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
params={'access_token': token.token,
'alt': 'json'})
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(
request, extra_data)
oauth2_l
|
ogin = OAuth2LoginView.adapter_view(FakeOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FakeOAuth2Adapter)
|
heysion/deepin-auto-build
|
dab/webui/taskctrl.py
|
Python
|
gpl-3.0
| 2,297 | 0.01219 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@date: 2017-04-06
@author: Heysion Yuan
@copyright: 2017, Heysion Yuan <heysions@gmail.com>
@license: GPLv3
'''
from dab.webui import WebBase
from dab.core.db.models import Task
class TaskIndex(WebBase):
def get(self):
dataset = self.get_task_top_all()
self.render("taskindex.html",tasklist=dataset)
# task_items = [
# {"id":1,"name":"deepin-auto-build","createtime":"2017","state":"success","resultinfo":"info"},
# {"id":2,"name":"deepin-auto-build","createtime":"2017","state":"success","resultinfo":"info"}
# ]
# self.render("task.html", tasklist=task_items)
# pass
def get_task_top_all(self):
dataset = Task.select(Task.id,Task.src_name,
Task.create_time,Task.state,
Task.owner_name)
datalist = []
if dataset :
for data in dataset:
datalist.append({"id":data.id,"name":data.name,
"ceatetime":data.createtime,"state":data.state,
"resultinfo":data.ower_name})
return datalist
class TaskInfo(WebBase):
pass
class TaskNew(WebBase):
def get(self):
self.render("tasknew.html")
pass
|
def post(self):
req_data = { k: self.get_argument(k) for k in self.request.arguments }
if not ("arches" in req_data.keys()):
self.render("404.html")
if not ("name" in req_data and req_data["name"] is not None) :
self.render("404.html")
self.sa
|
ve_new_task(req_data)
self.render("/taskindex")
def save_new_task(self,data):
new_task = Task.select(Task.name).where(Task.name==data["name"])
if not new_task :
new_task = Task.create(name=data["name"],
suite=data["suite"],
codename=data["codename"],
architectures=data["arches"],
workdir=data["workbase"],
description=data["description"])
new_task.save()
else:
return None
return new_task
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/Service/DataIntegrityHandler.py
|
Python
|
gpl-3.0
| 6,511 | 0.004147 |
"""
:mod: DataIntegrityHandler
.. module: DataIntegrityHandler
:synopsis: DataIntegrityHandler is the implementation of the Data Integrity service in
the DISET framework
"""
# from DIRAC
from DIRAC import S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.DataManagementSystem.DB.DataIntegrityDB import DataIntegrityDB
class DataIntegrityHandlerMixin:
"""
.. class:: DataIntegrityHandler
Implementation of the Data Integrity service in the DISET framework.
"""
@classmethod
def initializeHandler(cls, serviceInfoDict):
"""Initialization of DB object"""
cls.dataIntegrityDB = DataIntegrityDB()
return S_OK()
types_removeProblematic = [[int, list]]
def export_removeProblematic(self, fileID):
"""Remove the file with the supplied FileID from the database"""
if isinstance(fileID, list):
fileIDs = fileID
else:
fileIDs = [int(fileID)]
self.log.info("DataIntegrityHandler.removeProblematic: Attempting to remove problematic.")
res = self.dataIntegrityDB.removeProblematic(fileIDs)
if not res["OK"]:
self.log.error("DataIntegrityHandler.removeProblematic: Failed to remove problematic.", res["Message"])
return res
types_getProblematic = []
def export_getProblematic(self):
"""Get the next problematic to resolve from the IntegrityDB"""
self.log.info("DataIntegrityHandler.getProblematic: Getting file to resolve.")
res = self.dataIntegrityDB.getProblematic()
if not res["OK"]:
self.log.error(
"DataIntegrityHandler.getProblematic: Failed to get problematic file to resolve.", res["Message"]
)
return res
types_getPrognosisProblematics = [str]
def export_getPrognosisProblematics(self, prognosis):
"""Get problematic files from the problematics table of the IntegrityDB"""
self
|
.log.info("DataIntegrityHandler.getPrognosisProblematics: Getting files with %s prognosis." % prognosis)
res = self.dataIntegrityDB.getPrognosisProblematics(prognosis)
if not res["OK"]:
self.log.error(
"DataIntegrityHandler.getPrognosisProblematics: Failed to get prognosis files.", res["M
|
essage"]
)
return res
types_setProblematicStatus = [int, str]
def export_setProblematicStatus(self, fileID, status):
"""Update the status of the problematics with the provided fileID"""
self.log.info("DataIntegrityHandler.setProblematicStatus: Setting file %s status to %s." % (fileID, status))
res = self.dataIntegrityDB.setProblematicStatus(fileID, status)
if not res["OK"]:
self.log.error("DataIntegrityHandler.setProblematicStatus: Failed to set status.", res["Message"])
return res
types_incrementProblematicRetry = [int]
def export_incrementProblematicRetry(self, fileID):
"""Update the retry count for supplied file ID."""
self.log.info("DataIntegrityHandler.incrementProblematicRetry: Incrementing retries for file %s." % (fileID))
res = self.dataIntegrityDB.incrementProblematicRetry(fileID)
if not res["OK"]:
self.log.error(
"DataIntegrityHandler.incrementProblematicRetry: Failed to increment retries.", res["Message"]
)
return res
types_insertProblematic = [str, dict]
def export_insertProblematic(self, source, fileMetadata):
"""Insert problematic files into the problematics table of the IntegrityDB"""
self.log.info("DataIntegrityHandler.insertProblematic: Inserting problematic file to integrity DB.")
res = self.dataIntegrityDB.insertProblematic(source, fileMetadata)
if not res["OK"]:
self.log.error("DataIntegrityHandler.insertProblematic: Failed to insert.", res["Message"])
return res
types_changeProblematicPrognosis = []
def export_changeProblematicPrognosis(self, fileID, newPrognosis):
"""Change the prognosis for the supplied file"""
self.log.info("DataIntegrityHandler.changeProblematicPrognosis: Changing problematic prognosis.")
res = self.dataIntegrityDB.changeProblematicPrognosis(fileID, newPrognosis)
if not res["OK"]:
self.log.error("DataIntegrityHandler.changeProblematicPrognosis: Failed to update.", res["Message"])
return res
types_getTransformationProblematics = [int]
def export_getTransformationProblematics(self, transID):
"""Get the problematics for a given transformation"""
self.log.info("DataIntegrityHandler.getTransformationProblematics: Getting problematics for transformation.")
res = self.dataIntegrityDB.getTransformationProblematics(transID)
if not res["OK"]:
self.log.error("DataIntegrityHandler.getTransformationProblematics: Failed.", res["Message"])
return res
types_getProblematicsSummary = []
def export_getProblematicsSummary(self):
"""Get a summary from the Problematics table from the IntegrityDB"""
self.log.info("DataIntegrityHandler.getProblematicsSummary: Getting problematics summary.")
res = self.dataIntegrityDB.getProblematicsSummary()
if res["OK"]:
for prognosis, statusDict in res["Value"].items():
self.log.info("DataIntegrityHandler.getProblematicsSummary: %s." % prognosis)
for status, count in statusDict.items():
self.log.info("DataIntegrityHandler.getProblematicsSummary: \t%-10s %-10s." % (status, str(count)))
else:
self.log.error("DataIntegrityHandler.getProblematicsSummary: Failed to get summary.", res["Message"])
return res
types_getDistinctPrognosis = []
def export_getDistinctPrognosis(self):
"""Get a list of the distinct prognosis from the IntegrityDB"""
self.log.info("DataIntegrityHandler.getDistinctPrognosis: Getting distinct prognosis.")
res = self.dataIntegrityDB.getDistinctPrognosis()
if res["OK"]:
for prognosis in res["Value"]:
self.log.info("DataIntegrityHandler.getDistinctPrognosis: \t%s." % prognosis)
else:
self.log.error("DataIntegrityHandler.getDistinctPrognosis: Failed to get unique prognosis.", res["Message"])
return res
class DataIntegrityHandler(DataIntegrityHandlerMixin, RequestHandler):
pass
|
jolevq/odoopub
|
extra-addons/customize/res_company.py
|
Python
|
agpl-3.0
| 1,213 | 0.003298 |
# -*- coding: utf-8 -*-
####################
|
##########################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Cubic ERP SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
#
|
published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class res_company(osv.osv):
_name = "res.company"
_inherit = 'res.company'
_columns = {
'currency2_id' : fields.many2one('res.currency', string="Secondary Currency"),
}
|
dnaextrim/django_adminlte_x
|
adminlte/static/plugins/datatables/extensions/ColReorder/examples/predefined.html.py
|
Python
|
mit
| 16,794 | 0.037752 |
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXX XXXXXXXXXX
XXXXXXXXXXX X
XXXXXX X XX XX XX XX XX X X
X
X XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XXXXXXX X XXXXXX XXXXXXXX XXXXX XX XXX XXXX XX XXX XXXX XXXXXX XXXXXXXXX XXX XXXX XXXXX XXXXXXX XXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XX XX XXXXX XX XXXXXXXX XXXX XXX XXXXXX XXXXXXXX XXX XXXXXXXXX
XXXXXX XXXX XXXXXXXXXXX XXXXX XXX XXXXXXXXXX XXXXXXXX XXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXX
|
X
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
|
XXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
|
Seek/LaTechNumeric
|
linearalg/linalg.py
|
Python
|
mit
| 8,666 | 0.006577 |
import numpy as np
import pdb
from scipy import linalg as splinalg
# A = np.array([
# [1, 1, -2, 1, 3, -1],
# [2, -1, 1, 2, 1, -3],
# [1, 3, -3, -1, 2, 1],
# [5, 2, -1, -1, 2, 1],
# [-3, -1, 2, 3, 1, 3],
# [4, 3, 1, -6, -3, -2]
# ], dtype=float)
# b = np.array([4, 20, -15, -3, 16, -27], dtype=float)
A = np.array([
[8,4,4],
[2,-4,1],
[2,-1,3]
], dtype = float)
b = np.array([
80, 7, 22
], dtype=float)
# A = np.array([
# [3,-0.1,-0.2],
# [0.1,7,-0.3],
# [0.3,-0.2,10]
# ], dtype = float)
# b = np.array([
# 7.85, -19.3, 71.4
# ], dtype=float)
# Simplest version
def gauss1(A, b):
assert A.shape[0] == len(b), "A and b must have the same length"
dim = A.shape[0]
x = np.zeros(dim)
# Elimination
for i in range(dim - 1):
for j in range(i + 1, dim):
c = A[j, i] / A[i, i]
A[j, :] -= (c * A[i, :])
b[j] -= (c * b[i])
# Substitution
x[-1] = b[-1] / A[-1, -1]
for i in range(dim - 2, -1, -1):
sum = b[i]
for j in range(dim - 1, i - 1, -1):
sum -= x[j] * A[i, j]
x[i] = sum / A[i, i]
return x
def gauss(A, b, tol, err):
assert A.shape[0] == len(b), "A and b must have the same length"
dim = A.shape[0]
x = np.zeros(dim)
pv = np.arange(0, dim, 1)
err = 0
# Eliminate everything but the last row (dim-1)
for i in range(dim - 1):
# Store the current pivot from the pivot list
pvt = pv[i]
# Store the value of the current pivot
pvv = A[pvt, i]
# Search the other row specified in the pivot list
for k in pv:
# Check if the other rows have larger pivot values
val = A[k, i]
# print("val ({0}) > pvv({1})".format(val, pvv))
if val > pvv:
# We found a larger row, store the value and so we can check the others
pvv = val
pvt = k
# Did we find a new pivot that is in a row below us?
if pvt > pv[i]:
# If we did switch the indices in the pivot list
#print("We switched row {0} with pivot {1} for row {2} with pivot {3}".format(pv[i], A[pv[i], i], pvt, A[pvt,i]))
tmp = pv[i]
pv[i] = pvt
pv[pvt] = tmp
# print(pv)
# Check if the current pivot is close to 0
# if it is, break and set the error flag
if np.abs(A[pv[i], i]) < tol:
err = -1
break
# Here we actually perform the actual elimination
for j in range(i + 1, dim):
# print("c = {0}/{1}".format(A[pv[j], i], A[pv[i], i]))
c = A[pv[j], i] / A[pv[i], i]
# print(A[pv[j], i:])
# print((c * A[pv[i], i:]))
A[pv[j], i:] -= (c * A[pv[i], i:])
# print(A[pv[j], :])
b[pv[j]] -= (c * b[pv[i]])
# print(A)
#print(b)
# Quit here is the system is singular
if err == -1:
return x
# Now we begin back substitution by calculating the last x value
x[-1] = b[pv[-1]] / A[pv[-
|
1], -1]
# Now we solve the remaining equations
# dim-2 starts means we begin at second row from the end and go until the 0th row
for i in range(dim - 2,
|
-1, -1):
# Grab the corresponding b value
sum = b[pv[i]]
# Now we sum from the last column (dim -1 ) to the current column (i-1)
for j in range(dim - 1, i - 1, -1):
sum -= x[j] * A[pv[i], j]
x[i] = sum / A[pv[i], i]
return x
def lu_factor(A, tol, err):
"""Returns the matrix A with the LU matrices and a pivot vector containing information on how the matrix was eliminated.
Passing these values to to lu_solve with a b vector will solve the equation"""
dim = A.shape[0]
pv = np.arange(0, dim, 1)
err = 0
# Eliminate everything but the last row (dim-1)
for i in range(dim - 1):
# Store the current pivot from the pivot list
pvt = pv[i]
# Store the value of the current pivot
pvv = A[pvt, i]
# Search the other row specified in the pivot list
for k in pv:
# Check if the other rows have larger pivot values
val = A[k, i]
# print("val ({0}) > pvv({1})".format(val, pvv))
if val > pvv:
# We found a larger row, store the value and so we can check the others
pvv = val
pvt = k
# Did we find a new pivot?
if pvt > pv[i]:
# If we did switch the indices in the pivot list
# print("We switched row {0} with pivot {1} for row {2} with pivot {3}".format(pv[i], A[pv[i], i], pvt, A[pvt,i]))
tmp = pv[i]
pv[i] = pvt
pv[pvt] = tmp
# print(pv)
# Check if the current pivot is close to 0
# if it is, break and set the error flag
if np.abs(A[pv[i], i]) < tol:
err = -1
break
# Here we actually perform the actual elimination
for j in range(i + 1, dim):
# print("c = {0}/{1}".format(A[pv[j], i], A[pv[i], i]))
c = A[pv[j], i] / A[pv[i], i]
# print(A[pv[j], i:])
# print((c * A[pv[i], i:]))
A[pv[j], i:] -= (c * A[pv[i], i:])
# print(A[pv[j], :])
#print("Replacing index {0},{1} with value {2} with {3}".format(pv[j], i, A[pv[j], i], c))
A[pv[j], i] = c
# print(A)
# Quit here if the system is singular
if err == -1:
return None
else:
return (A, pv)
def lu_solve(A, pv, b):
""" Solves the system Ax=b given the output from lu_factor"""
dim = A.shape[0]
x = np.zeros(dim)
for i in range(dim - 1):
for j in range(i + 1, dim):
#All of our c's are stored in A from the output of LU factor
c = A[pv[j], i]
#Calculate the b vector that would result from the typical elimination procedure
b[pv[j]] -= (c * b[pv[i]])
#print(d)
x[-1] = b[pv[-1]] / A[pv[-1], -1]
# Now we solve the remaining equations, this is the same as Gaussian back substitution
# dim-2 starts means we begin at second row from the end and go until the 0th row
for i in range(dim - 2, -1, -1):
# Grab the corresponding b value
sum = b[pv[i]]
# Now we sum from the last column (dim -1 ) to the current column (i-1)
for j in range(dim - 1, i - 1, -1):
sum -= x[j] * A[pv[i], j]
x[i] = sum / A[pv[i], i]
return x
def inv(A, tol, err):
"""We always assume square matrices"""
dim = A.shape[0]
A1 = np.zeros(A.shape)
A, pvt = lu_factor(A, tol, err)
if err == -1:
return None
for i in range(dim):
b = np.zeros(dim)
b[i] = 1
x = lu_solve(A, pvt, b)
A1[:, i] = np.copy(x)
return A1
def gauss_seidel(A, b, x, tol, maxi, lam):
""" x should contain initial guesses (can be 0)"""
dim = A.shape[0]
#Divide everything by each row by its diagnol element
for i in range(dim):
tmp = A[i,i]
for j in range(dim):
A[i,j] /= tmp
b[i] /= tmp
# print(A)
for i in range(dim):
acc = b[i]
for j in range(dim):
if i == j:
# print("Skipping i = {0} and j = {1}".format(i, j))
continue
else:
acc -= A[i, j] * x[j]
# print("Old x = {0}, new x = {1}".format(x[i], acc))
x[i] = acc
for i in range(maxi):
flag = 1
for k in range(dim):
acc = b[k]
oldx = x[k]
for j in range(dim):
if k == j:
continue
else:
# print('k = {0}, j={1}'.format(k, j))
acc -= (A[k,j] * x[j])
# print(acc)
# print("Old x = {0}, new x = {1}".format(oldx, (lam * acc) + ((1-lam) * oldx)))
x[k] = (lam * acc) + ((1-lam) * oldx)
if flag ==1 and x[k] != 0:
ea = abs((x[k] - oldx)/x[k]) * 100
# print("Error is equal
|
suutari-ai/shoop
|
shuup/admin/modules/users/views/permissions.py
|
Python
|
agpl-3.0
| 5,257 | 0.001332 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group as PermissionGroup
from django.forms.models import modelform_factory
from django.http.response import HttpResponseRedirect
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import UpdateView
from shuup.admin.forms.fields import Select2MultipleField
from shuup.admin.toolbar import get_default_edit_toolbar
from shuup.admin.utils.urls import get_model_url
class PermissionChangeFormBase(forms.ModelForm):
old_password = forms.CharField(
label=_("Your Password"),
widget=forms.PasswordInput,
help_text=_("For security purposes, we need your current password.")
)
def __init__(self, changing_user, *args, **kwargs):
super(PermissionChangeFormBase, self).__init__(*args, **kwargs)
self.changing_user = changing_user
if getattr(self.instance, 'is_superuser', False) and not getattr(self.changing_user, 'is_superuser', False):
self.fields.pop("is_superuser")
if not (
self.changing_user == self.instance or
getattr(self.instance, 'is_superuser', False)
):
# Only require old password when editing
self.fields.pop("old_password")
initial_groups = self._get_initial_groups()
permission_groups_field = Select2MultipleField(
model=PermissionGroup,
initial=[group.pk for group in initial_groups],
required=False,
label=_("Permission Groups"),
help_text=_(
"The permission groups that th
|
is user belongs to. "
"Permission groups are configured through Contacts - Permission Groups."
)
)
permission_groups_field.widget.choices = [(group.pk, force_text(group)) for group in initial_groups]
self.fields["permission_groups"] = permission_groups_field
def _get_initial_groups(self):
if self.instance.pk and hasattr(self.instance, "groups"):
return self.instance.groups.all()
e
|
lse:
return []
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.changing_user.check_password(old_password):
raise forms.ValidationError(
_("Your old password was entered incorrectly. Please enter it again."),
code='password_incorrect',
)
return old_password
def clean_members(self):
members = self.cleaned_data.get("members", [])
return get_user_model().objects.filter(pk__in=members).all()
def clean_permission_groups(self):
permission_groups = self.cleaned_data.get("permission_groups", [])
return PermissionGroup.objects.filter(pk__in=permission_groups)
def clean(self):
for field in ("is_staff", "is_superuser"):
flag = self.cleaned_data[field]
if self.changing_user == self.instance and not flag:
self.add_error(field, _("You can't unset this status for yourself."))
return self.cleaned_data
def save(self):
obj = super(PermissionChangeFormBase, self).save()
obj.groups.clear()
obj.groups = self.cleaned_data["permission_groups"]
class UserChangePermissionsView(UpdateView):
template_name = "shuup/admin/users/change_permissions.jinja"
model = settings.AUTH_USER_MODEL
title = _("Change User Permissions")
def get_form_class(self):
return modelform_factory(
model=get_user_model(),
form=PermissionChangeFormBase,
fields=("is_staff", "is_superuser")
)
def get_queryset(self):
return get_user_model().objects.all()
def get_toolbar(self):
toolbar = get_default_edit_toolbar(
self,
"permissions_form",
discard_url=get_model_url(self.object),
with_split_save=False
)
return toolbar
def get_form_kwargs(self):
kwargs = super(UserChangePermissionsView, self).get_form_kwargs()
kwargs["changing_user"] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
context = super(UserChangePermissionsView, self).get_context_data(**kwargs)
context["toolbar"] = self.get_toolbar()
context["title"] = _("Change Permissions: %s") % self.object
return context
def form_valid(self, form):
form.save()
messages.success(self.request, _("Permissions changed for %s.") % self.object)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return get_model_url(self.object)
|
mr-uuid/snippets
|
python/sockets/servers/blocking.py
|
Python
|
mit
| 1,330 | 0.001504 |
import socket
import random
import time
# A blocking server that simply sends hello to anyone who conncets to it
def blocking_server(bind='0.0.0.0', port=8080, queued_connections=5):
"""
This sets up a blocking socket. We will be listening for incomming
conncetions
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((bind, port))
sock.listen(queued_connections) # max num of queued connections
return sock
def handle_connections(server):
"""
To accept connections and send data on a server socket that is alerady set
up. Accepting a connection creates a seperate socker with the server we
accepted to connect to.
"""
# Ac
|
cept a connection. The socket must be bound to an address and listening
# for c
|
onnections. The return value is a pair (conn, address) where conn is
# a new socket object usable to send and receive data on the connection,
# and address is the address bound to the socket on the other end of the
# connection.
sock, addr = server.accept() # this is a blocking call
time.sleep(random.randint(1,5))
sock.sendall("Hello") # this is a blocking call
sock.close()
server = blocking_server()
while True:
handle_connections(server)
|
CZ-NIC/knot
|
tests-extra/tests/ddns/forward/test.py
|
Python
|
gpl-3.0
| 777 | 0 |
#!/usr/bin/env python3
'''Test for DDNS forwarding'''
from dnstest.test import Test
t = Test()
master = t.server("knot")
slave = t.server("knot")
zone = t.zone("example.com.")
t.link(zone, master, slave, ddns=True)
t.start()
master.zones_wait(zone)
seri = slave.zones_wait(zon
|
e)
# OK
update = slave.update(zone)
update.add("forwarded.example.com.", 1, "TXT", "forwarded")
update.send("NOERROR")
resp = master.dig("forwarded.example.com.", "TXT")
resp.check("forwarded")
slave.zones_wait(zone, seri)
t.xfr_diff(master, slave, zone)
# NAME out of zone
update = slave.update(zone)
update.add("forwarded.", 1, "TXT", "forwarded")
update.send("NOTZONE")
resp = master.dig("forwarded.", "TXT")
resp.check(rcode="REFUSED")
t.sleep(3)
t.xfr_dif
|
f(master, slave, zone)
t.end()
|
mazaclub/tate-server
|
src/networks.py
|
Python
|
agpl-3.0
| 413 | 0.004843 |
# Main network and testnet3 definitions
params = {
'bitcoin_main': {
'pubkey_address': 50,
'script_address': 9,
'genesis_hash': '00000c7c73d8ce604178dae13f0fc6ec0be3275614366d44b1b4b5
|
c6e238c60c'
},
'bitcoin_tes
|
t': {
'pubkey_address': 88,
'script_address': 188,
'genesis_hash': '000003ae7f631de18a457fa4fa078e6fa8aff38e258458f8189810de5d62cede'
}
}
|
matham/sniffer
|
sniffer/__init__.py
|
Python
|
mit
| 97 | 0 |
'''
'''
__ver
|
sion__ = '0.1-dev'
device_config_name = 'Devices'
exp_config_name = 'experimen
|
t'
|
0101a/Valid-IP-checker-
|
Ipv4.py
|
Python
|
gpl-2.0
| 918 | 0.051198 |
# Valid-IP-checker-
#This program check whether a given IP is valid or not IPV4
def ip_checkv4(ip):
parts=ip.split(".")
if len(parts)<4 or len(parts)>4:
return "invalid IP length should be 4 not greater or less than 4"
else:
while len(parts)== 4:
a=int(parts[0])
b=int(parts[1])
c=int(parts[2])
d=int(parts[3])
if a<= 0 or a == 127 :
return "invalid IP address"
elif d == 0:
return "host id should not be 0 or less than zero "
elif a>=255:
|
return "should not be 255 or greater than 255 or less than 0 A"
elif b>=255 or b<0:
return "should not be 255 or greater than 255 or less than 0 B"
elif c>=255 or c<0:
retu
|
rn "should not be 255 or greater than 255 or less than 0 C"
elif d>=255 or c<0:
return "should not be 255 or greater than 255 or less than 0 D"
else:
return "Valid IP address ", ip
p=raw_input("Enter IP address")
print ip_checkv4(p)
|
rsvip/Django
|
django/forms/models.py
|
Python
|
bsd-3-clause
| 55,046 | 0.001508 |
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
|
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fie
|
lds`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.virtual_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_virtual_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is
|
Yam-cn/potato
|
stratlib/thrSMA.py
|
Python
|
apache-2.0
| 5,091 | 0.004953 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 03 13:06:56 2015
@author: Eunice
"""
if __name__ == '__main__':
import sys
sys.path.append("..")
from engine import bar
# 以上模块仅测试用
from engine.broker.fillstrategy import DefaultStrategy
from engine.broker.backtesting import TradePercentage
from engine import strategy
from engine.technical import ma
from engine.technical import cross
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1,self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
#self.info("long close")
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
if self.__ma1[-i-1] > self.__ma3[-i-1]:
m1 += 1
if self.__ma2[-i-1] > self.__ma3[-i-1]:
m2 += 1
if m1 >=
|
self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long position.
if self.__ma2[-1]is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(self.__ma1, self.__ma2) > 0:
|
self.__position.exitMarket()
#self.info("sell %s" % (bars.getDateTime()))
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print bars[self.__instrument].getDateTime(), bars[self.__instrument].getPrice()
#self.info("buy %s" % (bars.getDateTime()))
def testStrategy():
from engine import bar
strat = thrSMA
instrument = '600288'
market = 'SH'
fromDate = '20150101'
toDate ='20150601'
frequency = bar.Frequency.MINUTE
paras = [2, 20, 60, 10]
plot = True
#############################################path set ############################33
import os
print os.path.split(os.path.realpath(__file__))
if frequency == bar.Frequency.MINUTE:
path = os.path.join(os.environ.get('STRATEGYPATH'), '..', 'histdata', 'minute')
elif frequency == bar.Frequency.DAY:
path = os.path.join(os.environ.get('STRATEGYPATH'), '..', 'histdata', 'day')
filepath = os.path.join(path, instrument + market + ".csv")
#############################################don't change ############################33
from engine.barfeed.csvfeed import Feed
barfeed = Feed(frequency)
barfeed.setDateTimeFormat('%Y-%m-%d %H:%M:%S')
barfeed.loadBars(instrument, market, fromDate, toDate, filepath)
engine_id = instrument + '.' + market
strat = strat(barfeed, engine_id, *paras)
from engine.stratanalyzer import returns
from engine.stratanalyzer import sharpe
from engine.stratanalyzer import drawdown
from engine.stratanalyzer import trades
retAnalyzer = returns.Returns()
strat.attachAnalyzer(retAnalyzer)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
strat.attachAnalyzer(tradesAnalyzer)
strat.run()
#夏普率
sharp = sharpeRatioAnalyzer.getSharpeRatio(0.05)
#最大回撤
maxdd = drawDownAnalyzer.getMaxDrawDown()
#收益率
return_ = retAnalyzer.getCumulativeReturns()[-1]
#收益曲线
return_list = []
for item in retAnalyzer.getCumulativeReturns():
return_list.append(item)
def run_strategy(ticker, account_id, paras):
print ticker
print account_id
print paras
strat = testStrategy()
if __name__ == "__main__":
testStrategy()
|
viaregio/django-simple-captcha
|
captcha/views.py
|
Python
|
mit
| 4,373 | 0.002058 |
from captcha.conf import settings
from captcha.helpers import captcha_image_url
from captcha.models import CaptchaStore
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
import random
import re
import tempfile
import os
import subprocess
try:
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
import Image
import ImageDraw
import ImageFont
try:
import json
except ImportError:
from django.utils import simplejson as json
NON_DIGITS_RX = re.compile('[^\d]')
# Distance of the drawn text from the top of the captcha image
from_top = 4
def getsize(font, text):
if hasattr(font, 'getoffset'):
return [x + y + z for x, y, z in zip(font.getsize(text), font.getoffset(text), (0, from_top))]
else:
return font.getsize(text)
def captcha_image(request, key, scale=1):
store = get_object_or_404(CaptchaStore, hashke
|
y=key)
text = store.challenge
if settings.CAPTCHA_FONT_PATH.lower().strip().endswith('ttf'):
font = ImageFont.truetype(settings.CAPTCHA_FONT_PATH, settings.CAPTCHA_FONT_SIZE * scale)
else:
font = ImageFont.load(settings.CAPTCHA_FONT_PATH)
size = getsize(font, text)
size = (size[0] * 2, int(size[1] * 1.4))
image = Image.new('RGB', size, settings.CAPTCHA_BACKGROUND_COLOR)
try:
PIL_VERSION = int(NON_DIGITS_RX.sub('', Image.VERSION))
|
except:
PIL_VERSION = 116
xpos = 2
charlist = []
for char in text:
if char in settings.CAPTCHA_PUNCTUATION and len(charlist) >= 1:
charlist[-1] += char
else:
charlist.append(char)
for char in charlist:
fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR)
charimage = Image.new('L', getsize(font, ' %s ' % char), '#000000')
chardraw = ImageDraw.Draw(charimage)
chardraw.text((0, 0), ' %s ' % char, font=font, fill='#ffffff')
if settings.CAPTCHA_LETTER_ROTATION:
if PIL_VERSION >= 116:
charimage = charimage.rotate(random.randrange(*settings.CAPTCHA_LETTER_ROTATION), expand=0, resample=Image.BICUBIC)
else:
charimage = charimage.rotate(random.randrange(*settings.CAPTCHA_LETTER_ROTATION), resample=Image.BICUBIC)
charimage = charimage.crop(charimage.getbbox())
maskimage = Image.new('L', size)
maskimage.paste(charimage, (xpos, from_top, xpos + charimage.size[0], from_top + charimage.size[1]))
size = maskimage.size
image = Image.composite(fgimage, image, maskimage)
xpos = xpos + 2 + charimage.size[0]
image = image.crop((0, 0, xpos + 1, size[1]))
draw = ImageDraw.Draw(image)
for f in settings.noise_functions():
draw = f(draw, image)
for f in settings.filter_functions():
image = f(image)
out = StringIO()
image.save(out, "PNG")
out.seek(0)
response = HttpResponse(content_type='image/png')
response.write(out.read())
response['Content-length'] = out.tell()
return response
def captcha_audio(request, key):
if settings.CAPTCHA_FLITE_PATH:
store = get_object_or_404(CaptchaStore, hashkey=key)
text = store.challenge
if 'captcha.helpers.math_challenge' == settings.CAPTCHA_CHALLENGE_FUNCT:
text = text.replace('*', 'times').replace('-', 'minus')
else:
text = ', '.join(list(text))
path = str(os.path.join(tempfile.gettempdir(), '%s.wav' % key))
subprocess.call([settings.CAPTCHA_FLITE_PATH, "-t", text, "-o", path])
if os.path.isfile(path):
response = HttpResponse()
f = open(path, 'rb')
response['Content-Type'] = 'audio/x-wav'
response.write(f.read())
f.close()
os.unlink(path)
return response
raise Http404
def captcha_refresh(request):
""" Return json with new captcha for ajax refresh request """
if not request.is_ajax():
raise Http404
new_key = CaptchaStore.generate_key()
to_json_response = {
'key': new_key,
'image_url': captcha_image_url(new_key),
}
return HttpResponse(json.dumps(to_json_response), content_type='application/json')
|
Freso/listenbrainz-server
|
listenbrainz/db/dump.py
|
Python
|
gpl-2.0
| 24,816 | 0.001531 |
""" This module contains data dump creation and import functions.
Read more about the data dumps in our documentation here:
https://listenbrainz.readthedocs.io/en/production/dev/listenbrainz-dumps.html
"""
# listenbrainz-server - Server for the ListenBrainz project
#
# Copyright (C) 2017 MetaBrainz Foundation Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import os
import shutil
import sqlalchemy
import subprocess
import sys
import tarfile
import tempfile
import time
import ujson
from datetime import datetime
from flask import current_app
from listenbrainz import DUMP_LICENSE_FILE_PATH
import listenbrainz.db as db
from listenbrainz.db import DUMP_DEFAULT_THREAD_COUNT
from listenbrainz.utils import create_path, log_ioerrors
from listenbrainz import config
# this dict contains the tables dumped in public dump as keys
# and a tuple of columns that should be dumped as values
PUBLIC_TABLES = {
'"user"': (
'id',
'created',
'musicbrainz_id',
'musicbrainz_row_id',
# the following are dummy values for columns that we do not want to
# dump in the public dump
'\'\'', # auth token
'to_timestamp(0)', # last_login
'to_timestamp(0)', # latest_import
),
'statistics.user': (
'user_id',
'artist',
'release',
'recording',
'last_updated',
),
'statistics.artist': (
'id',
'msid',
'name',
'release',
'recording',
'listener',
'listen_count',
'last_updated',
),
'statistics.release': (
'id',
'msid',
'name',
'recording',
'listener',
'listen_count',
'last_updated',
),
'statistics.recording': (
'id',
'msid',
'name',
'listener',
'listen_count',
'last_updated',
),
'recording_feedback': (
'id',
'user_id',
'recording_msid',
'score',
'created'
),
}
# this dict contains the tables dumped in the private dump as keys
# and a tuple of columns that should be dumped as values
PRIVATE_TABLES = {
'"user"': (
'id',
'created',
'musicbrainz_id',
'auth_token',
'last_login',
'latest_import',
'musicbrainz_row_id',
'gdpr_agreed',
),
'api_compat.token': (
'id',
'user_id',
'token',
'api_key',
'ts',
),
'api_compat.session': (
'id',
'user_id',
'sid',
'api_key',
'ts',
),
}
def dump_postgres_db(location, dump_time=datetime.today(), threads=None):
""" Create postgres database dump in the specified location
Arguments:
location: Directory where the final dump will be stored
dump_time: datetime object representing when the dump was started
threads: Maximal number of threads to run during compression
Returns:
a tuple: (path to private dump, path to public dump)
"""
current_app.logger.info('Beginning dump of PostgreSQL database...')
current_app.logger.info('dump path: %s', location)
current_app.logger.info('Creating dump of private data...')
try:
private_dump = create_private_dump(location, dump_time, threads)
except IOError as e:
current_app.logger.critical(
'IOError while creating private dump: %s', str(e), exc_info=True)
current_app.logger.info('Removing created files and giving up...')
shutil.rmtree(location)
return
except Exception as e:
current_app.logger.critical(
'Unable to create private db dump due to error %s', str(e), exc_info=True)
current_app.logger.info('Removing created files and giving up...')
shutil.rmtree(location)
return
current_app.logger.info(
'Dump of private data created at %s!', private_dump)
current_app.logger.info('Creating dump of public data...')
try:
public_dump = create_public_dump(location, dump_time, threads)
except IOError as e:
current_app.logger.critical(
'IOError while creating public dump: %s', str(e), exc_info=True)
current_app.logger.info('Removing created files and giving up...')
shutil.rmtree(location)
return
except Exception as e:
current_app.logger.critical(
'Unable to create public dump due to error %s', str(e), exc_info=True)
current_app.logger.info('Removing created files and giving up...')
shutil.rmtree(location)
return
current_app.logger.info('Dump of public data created at %s!', public_dump)
current_app.logger.info(
'ListenBrainz PostgreSQL data dump created at %s!', location)
return private_dump, public_dump
def dump_feedback_for_spark(location, dump_time=datetime.today(), threads=None):
""" Dump user/recommendation feedback from postgres into spark format.
Arguments:
location: Directory where the final dump will be stored
dump_time: datetime object representing when the dump was started
threads: Maximal number of threads to run during compression
Returns:
path to feedback dump
"""
current_app.logger.info('Beginning dump of feedback data...')
current_app.logger.info('dump path: %s', location)
try:
feedback_dump = create_feedback_dump(location, dump_time, threads)
except IOError as e:
current_app.logger.critical(
'IOError while creating feedback dump: %s', str(e), exc_info=True)
current_app.logger.info('Removing created files and giving up...')
shutil.rmtree(location)
return
except Exception as e:
current_app.logger.critical(
'Unable to create feedback dump due to error %s', str(e), exc_info=True)
current_app.logger.info('Removing created files and giving up...')
shutil.rmtree(location)
return
current_app.logger.info(
'Dump of feedback data created at %s!', feedback_dump)
return feedback_dump
def _create_dump(location, dump_type, tables, dump_time, threads=DUMP_DEFAULT_THREAD_COUNT):
""" Creates a dump of the provided tables at the location passed
Arguments:
location: the path where the dump should be created
dump_type: the type of data dump being made - private or public
tables: a dict containing the names of the tables to be dumped as keys and the columns
to be dumped as values
dump_time: the time at which the dump process was started
threads: the maximum number of threads to use for compression
Returns:
the path to the archive file created
"""
archive_name = 'listenbrainz-{dump_type}-dump-{time}'.format(
dump_type=dump_type,
time=dump_time.strftime('%Y%m%d-%H%M%S')
)
archive_path = os.path.join(location, '{arch
|
ive_name}.tar.xz'.format(
archive_name=archive_name,
))
with open(archive_path, 'w') as archive:
pxz_command = ['pxz', '--compress',
'-T{threads}'.format(threads=threads)]
pxz = subprocess.Popen(
pxz_command, stdin=subprocess.PIPE, stdout=archive)
with tarfile.open(fileobj=pxz.stdin, mode='w|') as tar:
temp_dir = tempfile.mkdtemp()
|
t
|
dask/distributed
|
distributed/queues.py
|
Python
|
bsd-3-clause
| 9,788 | 0.001124 |
import asyncio
import logging
import uuid
from collections import defaultdict
from dask.utils import parse_timedelta, stringify
from .client import Client, Future
from .worker import get_client, get_worker
logger = logging.getLogger(__name__)
class QueueExtension:
"""An extension for the scheduler to manage queues
This adds the following routes to the scheduler
* queue_create
* queue_release
* queue_put
* queue_get
* queue_size
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.queues = dict()
self.client_refcount = dict()
self.future_refcount = defaultdict(lambda: 0)
self.scheduler.handlers.update(
{
"queue_create": self.create,
"queue_put": self.put,
"queue_get": self.get,
"queue_qsize": self.qsize,
}
)
self.scheduler.stream_handlers.update(
{"queue-future-release": self.future_release, "queue_release": self.release}
)
self.scheduler.extensions["queues"] = self
def create(self, name=None, client=None, maxsize=0):
logger.debug(f"Queue name: {name}")
if name not in self.queues:
self.queues[name] = asyncio.Queue(maxsize=maxsize)
self.client_refcount[name] = 1
else:
self.client_refcount[name] += 1
def release(self, name=None, client=None):
if name not in self.queues:
return
self.client_refcount[name] -= 1
if self.client_refcount[name] == 0:
del self.client_refcount[name]
futures = self.queues[name]._queue
del self.queues[name]
keys = [d["value"] for d in futures if d["type"] == "Future"]
if keys:
self.scheduler.client_releases_keys(keys=keys, client="queue-%s" % name)
async def put(self, name=None, key=None, data=None, client=None, timeout=None):
if key is not None:
record = {"type": "Future", "value": key}
self.future_refcount[name, key] += 1
self.scheduler.client_desires_keys(keys=[key], client="queue-%s" % name)
else:
record = {"type": "msgpack", "value": data}
await asyncio.wait_for(self.queues[name].put(record), timeout=timeout)
def future_release(self, name=None, key=None, client=None):
self.future_refcount[name, key] -= 1
if self.future_refcount[name, key] == 0:
self.scheduler.client_releases_keys(keys=[key], client="queue-%s" % name)
del self.future_refcount[name, key]
async def get(self, name=None, client=None, timeout=None, batch=False):
def process(record):
"""Add task status if known"""
if record["type"] == "Future":
record = record.copy()
key = record["value"]
ts = self.scheduler.tasks.get(key)
state = ts.state if ts is not None else "lost"
record["state"] = state
if state == "erred":
record["exception"] = ts.exception_blame.exception
record["traceback"] = ts.exception_blame.traceback
return record
if batch:
q = self.queues[name]
out = []
if batch is True:
while not q.empty():
record = await q.get()
out.append(record)
else:
if timeout is not None:
msg = (
"Dask queues don't support simultaneous use of "
"integer batch sizes and timeouts"
)
raise NotImplementedError(msg)
for i in range(batch):
record = await q.get()
out.append(record)
out = [process(o) for o in out]
return out
else:
record = await asyncio.wait_for(self.queues[name].get(), timeout=timeout)
record = process(record)
return record
def qsize(self, name=None, client=None):
return self.queues[name].qsize()
class Queue:
"""Distributed Queue
This allows multiple clients to share futures or small bits of data between
each other with a multi-producer/multi-consumer queue. All metadata is
sequentialized through the scheduler.
Elements of the Queue must be either Futures or msgpack-encodable data
(ints, strings, lists, dicts). All data is sent through the scheduler so
it is wise not to send large objects. To share large objects scatter the
data and share the future instead.
.. warning::
This object is experimental
Parameters
----------
name: string (optional)
Name used by other clients and the scheduler to identify the queue. If
not given, a random name will be generated.
client: Client (o
|
ptional)
Client used for communication with the scheduler.
If not given, the default global client will be used.
maxsize: int (optional)
Number of items allowed in the queue. If 0 (the default), the queue
size is unbounded.
Examples
--------
>>> from dask.distributed import Client, Queue # doctest: +SKIP
>>> client = Client() # doctest: +SKIP
>>> queue = Queue('x') # doctest: +SKIP
>>> future = client.submit(f, x) # doctest: +SKIP
>>> que
|
ue.put(future) # doctest: +SKIP
See Also
--------
Variable: shared variable between clients
"""
def __init__(self, name=None, client=None, maxsize=0):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.name = name or "queue-" + uuid.uuid4().hex
self.maxsize = maxsize
if self.client.asynchronous:
self._started = asyncio.ensure_future(self._start())
else:
self.client.sync(self._start)
async def _start(self):
await self.client.scheduler.queue_create(name=self.name, maxsize=self.maxsize)
return self
def __await__(self):
if hasattr(self, "_started"):
return self._started.__await__()
else:
async def _():
return self
return _().__await__()
async def _put(self, value, timeout=None):
if isinstance(value, Future):
await self.client.scheduler.queue_put(
key=stringify(value.key), timeout=timeout, name=self.name
)
else:
await self.client.scheduler.queue_put(
data=value, timeout=timeout, name=self.name
)
def put(self, value, timeout=None, **kwargs):
"""Put data into the queue
Parameters
----------
timeout : number or string or timedelta, optional
Time in seconds to wait before timing out.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
"""
timeout = parse_timedelta(timeout)
return self.client.sync(self._put, value, timeout=timeout, **kwargs)
def get(self, timeout=None, batch=False, **kwargs):
"""Get data from the queue
Parameters
----------
timeout : number or string or timedelta, optional
Time in seconds to wait before timing out.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
batch : boolean, int (optional)
If True then return all elements currently waiting in the queue.
If an integer than return that many elements from the queue
If False (default) then return one item at a time
"""
timeout = parse_timedelta(timeout)
return self.client.sync(self._get, timeout=timeout, batch=batch, **kwargs)
def qsize(self, **kwargs):
"""Current number of elements in the queue""
|
sparkfun/usb-scale
|
scaleServer.py
|
Python
|
gpl-3.0
| 3,755 | 0.001065 |
#! /usr/bin/env python
# This will run as a production ready server if something like eventlet is installed
import argparse
import json
import os
import threading
import time
import sys
from gevent import ssl
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
from readscale import set_scale
clients = 0
scale = None
class WeightApp(WebSocketApplication):
def setup_scale(self):
global scale
try:
scale = set_scale()
except ValueError:
scale = None
sys.stdout.write("\rPlease ensure that s
|
cale is connected and not in use by another process")
sys.stdout.flush()
def on_open(self):
print "Connected!"
global clients
if clients:
clients += 1
return
clients += 1
self.send_weight()
def on_message(self, message, *args, **kwargs):
if message:
print message
def on_close(self, reason):
print 'Disconnected'
global clients
clients -= 1
if reason:
|
print reason
def send_weight(self, reschedule=0.2):
"""
broadcast the weight on the scale to listeners
:param weight: dictionary with
:param reschedule: time delay to reschedule the function
:return: None
"""
global scale
fakeweight = {
'lbs': 'Please connect scale',
'ozs': 'Please connect scale',
}
if not scale:
self.setup_scale()
if scale:
try:
scale.update()
weight = {
'lbs': scale.pounds,
'ozs': scale.ounces
}
except IOError:
self.setup_scale()
weight = fakeweight
else:
weight = fakeweight
if clients:
self.ws.send(json.dumps(weight))
if reschedule and clients:
threading.Timer(reschedule, self.send_weight).start()
def static_wsgi_app(environ, start_response):
"""
Serve a test page
:param environ:
:param start_response:
:return:
"""
start_response("200 OK", [('Content-Type]', 'text/html')])
with open("templates/index.html", 'r') as f:
retval = [bytes(line) for line in f.readlines()]
return retval
def parse_args():
"""
Parse cmd line arguments
:return: arguments
"""
parser = argparse.ArgumentParser(description='Serve USB scale weights over WebSockets')
parser.add_argument('-k', '--key', help='Server private key for SSL')
parser.add_argument('-c', '--cert', help='Server certificate for SSL')
return parser.parse_args()
def validate_file(_file):
"""
Check to see if a file exists
:param _file: path to file
:return: True for file exists, Raises RuntimeError if doesn't exist
"""
if not os.path.isfile(_file):
raise RuntimeError("The file provided does not exist! {}".format(_file))
return True
if __name__ == '__main__':
args = parse_args()
server_args = []
server_kwargs = dict()
try:
scale = set_scale()
except ValueError:
print "ERROR: Unable to connect to the scale!!"
scale = None
if not args.cert and not args.key:
pass
elif validate_file(args.cert) and validate_file(args.key):
server_kwargs.update({'keyfile': args.key,
'certfile': args.cert})
server_args.append(('localhost', 8000))
server_args.append(
Resource([
('/', static_wsgi_app),
('/data', WeightApp)
])
)
WebSocketServer(*server_args, **server_kwargs).serve_forever()
|
vodkina/GlobaLeaks
|
backend/globaleaks/tests/jobs/test_notification_sched.py
|
Python
|
agpl-3.0
| 1,811 | 0.000552 |
from twisted.internet.defer import inlineCallbacks, fail, succeed
from globaleaks import models
from globaleaks.orm import transact
from globaleaks.tests import helpers
from globaleaks.jobs.delivery_sched import DeliverySchedule
from globaleaks.jobs.notification_sched import NotificationSchedule, MailGenerator
class TestNotificationSchedule(helpers.TestGLWithPopulatedDB):
@inlineCallbacks
def setUp(self):
yield helpers.TestGLWithPopulatedDB.setUp(self)
yield self.perform_full_submission_actions()
@transact
def get_scheduled_email_count(self, store):
return store.find(models.Mail).count()
@inlineCallbacks
def test_notific
|
ation_schedule_success(self):
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
yield DeliverySchedule().run()
notification_schedule = NotificationSchedule()
notification_schedule.skip_sleep = Tru
|
e
yield notification_schedule.run()
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
@inlineCallbacks
def test_notification_schedule_failure(self):
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
yield DeliverySchedule().run()
notification_schedule = NotificationSchedule()
notification_schedule.skip_sleep = True
def sendmail(x, y, z):
return fail(True)
notification_schedule.sendmail = sendmail
for i in range(0, 10):
yield notification_schedule.run()
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 40)
yield notification_schedule.run()
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
|
ChinaMassClouds/copenstack-server
|
openstack/src/ceilometer-2014.2.2/ceilometer/tests/ipmi/notifications/ipmi_test_data.py
|
Python
|
gpl-2.0
| 30,224 | 0 |
#
# Copyright 2014 Red Hat, Inc
#
# Author: Chris Dent <chdent@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Sample data for test_ipmi.
This data is provided as a sample of the data expected from the ipmitool
driver in the Ironic project, which is the publisher of the notifications
being tested.
"""
TEMPERATURE_DATA = {
'DIMM GH VR Temp (0x3b)': {
'Status': 'ok',
'Deassertions Enabled': 'unc+ ucr+ unr+',
'Sensor Reading': '26 (+/- 0.500) degrees C',
'Entity ID': '20.6 (Power Module)',
'Assertions Enabled': 'unc+ ucr+ unr+',
'Positive Hysteresis': '4.000',
'Assertion Events': '',
'Upper non-critical': '95.000',
'Event Message Control': 'Per-threshold',
'Upper non-recoverable': '105.000',
'Normal Maximum': '112.000',
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '100.000',
'Sensor ID': 'DIMM GH VR Temp (0x3b)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '16.000'
},
'CPU1 VR Temp (0x36)': {
'Status': 'ok',
'Deassertions Enabled': 'unc+ ucr+ unr+',
'Sensor Reading': '32 (+/- 0.500) degrees C',
'Entity ID': '20.1 (Power Module)',
'Assertions Enabled': 'unc+ ucr+ unr+',
'Positive Hysteresis': '4.000',
'Assertion Events': '',
'Upper non-critical': '95.000',
'Event Message Control': 'Per-threshold',
'Upper non-recoverable': '105.000',
'Normal Maximum': '112.000',
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '100.000',
'Sensor ID': 'CPU1 VR Temp (0x36)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '16.000'
},
'DIMM EF VR Temp (0x3a)': {
'Status': 'ok',
'Deassertions Enabled': 'unc+ ucr+ unr+',
'Sensor Reading': '26 (+/- 0.500) degrees C',
'Entity ID': '20.5 (Power Module)',
'Assertions Enabled': 'unc+ ucr+ unr+',
'Positive Hysteresis': '4.000',
'Assertion Events': '',
'Upper non-critical': '95.000',
'Event Message Control': 'Per-threshold',
'Upper non-recoverable': '105.000',
'Normal Maximum': '112.000',
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '100.000',
'Sensor ID': 'DIMM EF VR Temp (0x3a)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '16.000'
},
'CPU2 VR Temp (0x37)': {
'Status': 'ok',
'Deassertions Enabled': 'unc+ ucr+ unr+',
'Sensor Reading': '31 (+/- 0.500) degrees C',
'Entity ID': '20.2 (Power Module)',
'Assertions Enabled': 'unc+ ucr+ unr+',
'Positive Hysteresis': '4.000',
'Assertion Events': '',
'Upper non-critical': '95.000',
'Event Message Control': 'Per-threshold',
'Upper non-recoverable': '105.000',
'Normal Maximum': '112.000',
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '100.000',
'Sensor ID': 'CPU2 VR Temp (0x37)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '16.000'
},
'Ambient Temp (0x32)': {
'Status': 'ok',
'Sensor Reading': '25 (+/- 0) degrees C',
'Entity ID': '12.1 (Front Panel Board)',
'Assertions Enabled': 'unc+ ucr+ unr+',
'Event Message Control': 'Per-threshold',
'Assertion Events': '',
'Upper non-critical': '43.000',
'Deassertions Enabled': 'unc+ ucr+ unr+',
'Upper non-recoverable': '50.000',
'Positive Hysteresis': '4.000',
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '46.000',
'Sensor ID': 'Ambient Temp (0x32)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '25.000'
},
'Mezz Card Temp (0x35)': {
'Status': 'Disabled',
'Sensor Reading': 'Disabled',
'Entity ID': '44.1 (I/O Module)',
'Event Message Control': 'Per-threshold',
'Upper non-critical': '70.000',
'Upper non-recoverable': '85.000',
'Positive Hysteresis': '4.000',
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '80.000',
'Sensor ID': 'Mezz Card Temp (0x35)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '25.000'
},
'PCH Temp (0x3c)': {
'Status': 'ok',
'Deassertions Enabled': 'unc+ ucr+ unr+',
'Sensor Reading': '46 (+/- 0.500) degrees C',
'Entity ID': '45.1 (Processor/IO Module)',
'Assertions Enabled': 'unc+ ucr+ unr+',
'Positive Hysteresis': '4.000',
'Assertion Events': '',
'Upper non-critical': '93.000',
'Event Message Control': 'Per-threshold',
'Upper non-recoverable': '103.000',
'Normal Maximum': '112.000',
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '98.000',
'Sensor ID': 'PCH Temp (0x3c)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '16.000'
},
'DIMM CD VR Temp (0x39)': {
'Status': 'ok',
'Deassertions Enabled': 'unc+ ucr+ unr+',
'Sensor R
|
eading': '27 (+/- 0.500) degrees C',
'Entity ID': '20.4 (Power Module)',
'Assertions Enabled': 'unc+ ucr+ unr+',
'Positive Hysteresis': '4.000',
'Assertion Events': '',
'Upper non-critical': '95.000',
'Event Message Control': 'Per-threshold',
'Upper non-recoverable': '105.000',
'Normal Maximum': '112.000',
|
'Maximum sensor range': 'Unspecified',
'Sensor Type (Analog)': 'Temperature',
'Readable Thresholds': 'unc ucr unr',
'Negative Hysteresis': 'Unspecified',
'Threshold Read Mask': 'unc ucr unr',
'Upper critical': '100.000',
'Sensor ID': 'DIMM CD VR Temp (0x39)',
'Settable Thresholds': '',
'Minimum sensor range': 'Unspecified',
'Nominal Reading': '16.000'
},
'PCI Riser 2 Temp (0x34)': {
'Status': 'ok',
'Deassertions Enabled': 'unc+ ucr
|
danieljb/django-hybrid-filefield
|
hybrid_filefield/forms.py
|
Python
|
gpl-3.0
| 3,393 | 0.001768 |
# -
|
*- coding: utf-8 -*-
import os
import re
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.util import ErrorList
from django.forms.fields import MultiValueField, FilePathField, \
FileField, CharField
from django.utils.translation import ugettext as _
from hybrid_filefield.widgets import FileSelectOrUploadWidget
class FileSelectOrUploadField(MultiValueFi
|
eld):
widget = FileSelectOrUploadWidget
default_error_messages = {
'optional_required': _('At least one value is required.'),
}
def __init__(self, upload_to='', path='', match='',
recursive=False, widget=None, initial=None,
optional=False, *args, **kwargs):
self.upload_to, self.path, self.match, \
self.recursive, self.initial, self.optional = \
upload_to, path, match, recursive, initial, optional
self.max_length = kwargs.pop('max_length', None)
self.required = getattr(kwargs, 'required', True)
fields = (
FilePathField(
path=self.path,
match=self.match,
recursive=self.recursive,
initial=self.initial,
required=self.required,
),
FileField(
max_length=self.max_length,
initial=self.initial,
required=self.required,
),
)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
self.widget = widget
super(FileSelectOrUploadField, self).__init__(
fields,
widget=self.widget,
*args, **kwargs
)
self.choices = [('', 'Use upload')] + fields[0].choices
self.widget.is_required = self.required
def _get_choices(self):
return self._choices
def _set_choices(self, value):
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def clean(self, value):
clean_data = []
errors = ErrorList()
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in validators.EMPTY_VALUES:
if (self.required and not self.optional):
raise ValidationError(self.error_messages['required'])
else:
field_value = field_value
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
errors.extend(e.messages)
if i == len(self.fields) and len(clean_data) == 0:
raise ValidationError(self.error_messages['optional_required'])
if errors:
raise ValidationError(errors)
return self.compress(clean_data)
def compress(self, data_list):
if len(data_list) > 1 and data_list[1] not in validators.EMPTY_VALUES:
return data_list[1]
elif len(data_list) > 0 and data_list[0] not in validators.EMPTY_VALUES:
return data_list[0]
return None
|
has2k1/plotnine
|
plotnine/positions/position_nudge.py
|
Python
|
gpl-2.0
| 698 | 0 |
from .position import position
class position_nudge(position):
"""
Nudge points
Useful to nudge labels away from the points
being labels.
Parameters
----------
x : float
Horizontal nudge
y : float
Vertical nudge
"""
def __init__(self, x=0, y=0):
self.params = {'x': x, 'y': y}
@classm
|
ethod
def compute_layer(cls, data, params, layout):
trans_x, trans_y = None, None
if params['x']:
def trans_x(x):
return x + params['x']
if params['y']:
def trans_y(y):
return y + params['y']
|
return cls.transform_position(data, trans_x, trans_y)
|
PearsonIOKI/compose-forum
|
askbot/migrations/0146_auto__add_field_threadtogroup_visibility.py
|
Python
|
gpl-3.0
| 33,877 | 0.00797 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ThreadToGroup.visibility'
db.add_column('askbot_thread_groups', 'visibility',
self.gf('django.db.models.fields.SmallIntegerField')(default=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ThreadToGroup.visibility'
db.delete_column('askbot_thread_groups', 'visibility')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
|
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.an
|
onymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.askwidget': {
'Meta': {'object_name': 'AskWidget'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_text_field': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inner_style': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'outer_style': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.draftanswer': {
'Meta': {'object_name': 'DraftAnswer'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['askbot.Thread']"})
},
'askbot.draftquestion': {
'Meta': {'object_name': 'DraftQuestion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'})
},
'askbot.emailfeedsetting': {
'Meta': {'unique_together': "(('subscriber', 'feed_type'),)", 'object_nam
|
blr246/traffic
|
phone_display_demo.py
|
Python
|
mit
| 16,853 | 0.002492 |
"""
Copyright (C) 2011-2012 Brandon L. Reiss
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS O
|
R COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING F
|
ROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Displays either day or nighttime traffic image processing in a mock-up UI
based on the HTC Desire smartphone.
"""
import numpy as np
import scipy
import scipy.ndimage as ndimg
from collections import deque
from copy import *
import PIL
import ImageOps
import pylab
import cv2
import os
import fnmatch
import sys
import pymorph
import night
import day
import argparse
class PhoneDemo(object):
""" Object to run the phone UI demo. """
TYPE_DAY = "DAY"
TYPE_NIGHT = "NIGHT"
HISTORY_FRAMES = 600
class DayProcessor(object):
""" Object used to process day sequences. """
GMM_K = 3
GMM_NUM_FRAMES = 25
GMM_W_INIT = 0.1
GMM_VAR_INIT = 20
GMM_MAHA_THRESH = 3
MASK_OVERLAY_ALPHA = 0.4
def __init__(self, rgb):
assert(rgb.dtype == 'uint8')
self._gmm = day.GaussianMixtureModelUV(self.GMM_K, rgb.shape,
self.GMM_NUM_FRAMES,
self.GMM_W_INIT,
self.GMM_VAR_INIT,
self.GMM_MAHA_THRESH)
self._ycbcr = np.zeros(rgb.shape, dtype='uint8')
self._mask = np.zeros(rgb.shape[:2], dtype='uint8')
self._red_mask = np.zeros(rgb.shape, dtype='uint8')
self._rgb_red_masked = np.zeros(rgb.shape, dtype='uint8')
self._process_count = 0
def next(self, rgb):
""" Process the next file and return the results. """
# Do GMM steps.
self._gmm.rgb2ycbcr(rgb, self._ycbcr)
self._gmm.segment_cl(self._mask)
self._gmm.update_cl(self._ycbcr)
# Save total pixels in foreground.
fg_pixel_count = np.sum(self._mask)
# Pull alpha and render red overlay
# (channels are reversed RGB = BGR).
self._red_mask[:,:,2] = self._mask * 255
self._rgb_red_masked[:,:] = \
(self.MASK_OVERLAY_ALPHA * self._red_mask) + \
((1. - self.MASK_OVERLAY_ALPHA) * rgb)
# Ignore the first GMM_NUM_FRAMES / 2 frames.
self._process_count = self._process_count + 1
if self._process_count > self.GMM_NUM_FRAMES / 2:
return fg_pixel_count, self._rgb_red_masked
else:
return 0, self._rgb_red_masked
class NightProcessor(object):
""" Object used to process day sequences. """
def __init__(self, rgb):
pass
def next(self, rgb):
""" Process the next file and return the results. """
def blackout_date_regions(image, blackout_rects):
""" Black out specified regions. """
for rect in blackout_rects:
image[rect[1]:rect[3], rect[0]:rect[2]] = 0
# Do bright object detection.
blackout_date_regions(rgb, night.BLACKOUT_RECTS)
steps = night.bright_object_detection(rgb)
# Return results (channels are reversed RGB = BGR).
label_img = pymorph.overlay(steps['luminance'].astype('uint8'),
blue=steps['detect_dilate'])
return steps['bright_blob_count'], label_img
def __init__(self):
# Initialize plotting parameters.
self._history_raw = deque()
self._history_filtered = deque()
self._max_sample = 0.001
self._ui = PhoneDisplay()
self._filter_exp = 0.1
self._sample_exp_filter = 0.
def run_sequence(self, type, filepath, seq_range=None, filter_exp=None):
""" Run a TYPE_DAY or TYPE_NIGHT sequence. """
QUIT_KEY_CODES = [ 27, 113, 81 ]
PAUSE_KEY_CODES = [ 32, 112, 80 ]
def pause():
""" Poll input until the pause key is pressed. """
while True:
key = cv2.waitKey(100)
if PAUSE_KEY_CODES.count(key) > 0:
break
def bound_queue_push(val, q, maxlen=None):
""" Push to bounded queue. """
q.append(val)
if maxlen is not None and len(q) > maxlen:
q.popleft()
assert(type == self.TYPE_DAY or type == self.TYPE_NIGHT)
# TODO(reissb) -- The history frames and filtering need to become
# parameterized in some way. The history frames is fixed by the
# camera framerate. The filtering is fixed by the required
# detection sensitivity.
if filter_exp is not None:
self._filter_exp = filter_exp
else:
self._filter_exp = 0.1
# Clear state.
self._ui.clear()
self._history_raw = deque()
self._history_filtered = deque()
self._max_sample = 0.001
self._sample_exp_filter = 0.
# Extract command-line parameters. This is the name of one file in the
# series.
path, filename = os.path.split(filepath)
file_name, file_ext = os.path.splitext(os.path.basename(filename))
series_name_end = file_name.rindex('_')
series_name = file_name[:series_name_end]
print "Processing image series {0} in path {1}.".format(series_name,
path)
files_in_path = os.listdir(path)
series_pattern = series_name + '_[0-9]*' + file_ext
print "Processing files matching pattern {0}.".format(series_pattern)
series_suffixes = [int(os.path.splitext(fn)[0].split('_')[-1]) \
for fn in files_in_path \
if fnmatch.fnmatch(fn, series_pattern)]
series_suffixes.sort()
num_files = len(series_suffixes)
print "Found {0} files in image series {1}.".format(num_files,
series_name)
# Check for limited range.
if seq_range is not None:
assert(seq_range[1] > seq_range[0] and seq_range[0] >= 0)
print "Filtering series range [{},{}).".format(seq_range[0],
seq_range[1])
series_suffixes = np.array(series_suffixes)
f = (series_suffixes >= seq_range[0]) * \
(series_suffixes < seq_range[1])
series_suffixes = np.sort(series_suffixes * f)
remove_count = len(series_suffixes) - np.sum(f)
series_suffixes = np.delete(series_suffixes, range(remove_count))
# Load first file and process.
series_filename = series_name + '_' + str(series_suffixes[0]) + \
file_ext
rgb = ndimg.imread(os.path.join(path, series_filename))
# Initilaize the processor.
type_processor = self.DayProcessor(rgb) if type
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/remote/models/network_network_explorer_summaries_summary_grid_remote.py
|
Python
|
apache-2.0
| 1,486 | 0 |
from ..remote import RemoteModel
class NetworkNetworkExplorerSummariesSummaryGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``DeviceID:`` none
| ``attribute type:`` string
| ``DeviceIPNumeric:`` none
| ``attribute type:`` string
| ``DeviceIPDotted:`` none
| ``attribute type:`` string
| ``VirtualNetworkID:`` none
| ``attribute type:`` string
| ``Network:`` none
| ``attribute type:`` string
| ``Devic
|
eName:`` no
|
ne
| ``attribute type:`` string
| ``DeviceType:`` none
| ``attribute type:`` string
| ``ifIndex:`` none
| ``attribute type:`` string
| ``ifName:`` none
| ``attribute type:`` string
| ``VirtualNetworkMemberName:`` none
| ``attribute type:`` string
| ``ifType:`` none
| ``attribute type:`` string
| ``ifIPDotted:`` none
| ``attribute type:`` string
| ``ifIPNumeric:`` none
| ``attribute type:`` string
"""
properties = ("id",
"DeviceID",
"DeviceIPNumeric",
"DeviceIPDotted",
"VirtualNetworkID",
"Network",
"DeviceName",
"DeviceType",
"ifIndex",
"ifName",
"VirtualNetworkMemberName",
"ifType",
"ifIPDotted",
"ifIPNumeric",
)
|
agile-geoscience/bruges
|
setup.py
|
Python
|
apache-2.0
| 117 | 0.008547 |
from setuptools imp
|
ort setup
# Really only required so setup.cfg can pick up __version__
setup(
name="bru
|
ges",
)
|
MaplePlan/djwp
|
oauth2app/token.py
|
Python
|
lgpl-3.0
| 16,845 | 0.00089 |
#-*- coding: utf-8 -*-
"""OAuth 2.0 Token Generation"""
from base64 import b64encode
from django.http import HttpResponse
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
try: import simplejson as json
except ImportError: import json
from .exceptions import OAuth2Exception
from .consts import ACCESS_TOKEN_EXPIRATION, REFRESH_TOKEN_LENGTH, ACCESS_TOKEN_LENGTH
from .consts import AUTHENTICATION_METHOD, MAC, BEARER, MAC_KEY_LENGTH
from .consts import REFRESHABLE
from .lib.uri import normalize
from .models import Client, AccessRange, Code, AccessToken, TimestampGenerator
from .models import KeyGenerator
class AccessTokenException(OAuth2Exception):
"""Access Token exception base class."""
pass
class UnvalidatedRequest(OAut
|
h2Exception):
"""The method requested requires a validated request to continue."""
pass
class InvalidRequest(AccessTokenException):
"""The request is missing a required parameter, includes an
unsupported parameter or parameter value,
|
repeats a
parameter, includes multiple credentials, utilizes more
than one mechanism for authenticating the client, or is
otherwise malformed."""
error = 'invalid_request'
class InvalidClient(AccessTokenException):
"""Client authentication failed (e.g. unknown client, no
client credentials included, multiple client credentials
included, or unsupported credentials type)."""
error = 'invalid_client'
class UnauthorizedClient(AccessTokenException):
"""The client is not authorized to request an authorization
code using this method."""
error = 'unauthorized_client'
class InvalidGrant(AccessTokenException):
"""The provided authorization grant is invalid, expired,
revoked, does not match the redirection URI used in the
authorization request, or was issued to another client."""
error = 'invalid_grant'
class UnsupportedGrantType(AccessTokenException):
"""The authorization grant type is not supported by the
authorization server."""
error = 'unsupported_grant_type'
class InvalidScope(AccessTokenException):
"""The requested scope is invalid, unknown, malformed, or
exceeds the scope granted by the resource owner."""
error = 'invalid_scope'
@csrf_exempt
def handler(request):
"""Token access handler. Conveneince function that wraps the Handler()
callable.
**Args:**
* *request:* Django HttpRequest object.
"""
return TokenGenerator()(request)
class TokenGenerator(object):
"""Token access handler. Validates authorization codes, refresh tokens,
username/password pairs, and generates a JSON formatted authorization code.
**Args:**
* *request:* Django HttpRequest object.
**Kwargs:**
* *scope:* An iterable of oauth2app.models.AccessRange objects representing
the scope the token generator will grant. *Default None*
* *authentication_method:* Type of token to generate. Possible
values are: oauth2app.consts.MAC and oauth2app.consts.BEARER
*Default oauth2app.consts.BEARER*
* *refreshable:* Boolean value indicating whether issued tokens are
refreshable. *Default True*
"""
valid = False
code = None
client = None
access_token = None
user = None
error = None
request = None
def __init__(
self,
scope=None,
authentication_method=AUTHENTICATION_METHOD,
refreshable=REFRESHABLE):
self.refreshable = refreshable
if authentication_method not in [BEARER, MAC]:
raise OAuth2Exception("Possible values for authentication_method"
" are oauth2app.consts.MAC and oauth2app.consts.BEARER")
self.authentication_method = authentication_method
if scope is None:
self.authorized_scope = None
elif isinstance(scope, AccessRange):
self.authorized_scope = set([scope.key])
else:
self.authorized_scope = set([x.key for x in scope])
@csrf_exempt
def __call__(self, request):
"""Django view that handles the token endpoint. Returns a JSON formatted
authorization code.
**Args:**
* *request:* Django HttpRequest object.
"""
self.grant_type = request.REQUEST.get('grant_type')
self.client_id = request.REQUEST.get('client_id')
self.client_secret = request.POST.get('client_secret')
self.scope = request.REQUEST.get('scope')
if self.scope is not None:
self.scope = set(self.scope.split())
# authorization_code, see 4.1.3. Access Token Request
self.code_key = request.REQUEST.get('code')
self.redirect_uri = request.REQUEST.get('redirect_uri')
# refresh_token, see 6. Refreshing an Access Token
self.refresh_token = request.REQUEST.get('refresh_token')
# password, see 4.3.2. Access Token Request
self.email = request.REQUEST.get('email')
self.username = request.REQUEST.get('username')
self.password = request.REQUEST.get('password')
# Optional json callback
self.callback = request.REQUEST.get('callback')
self.request = request
try:
self.validate()
except AccessTokenException:
return self.error_response()
return self.grant_response()
def validate(self):
"""Validate the request. Raises an AccessTokenException if the
request fails authorization.
*Returns None*"""
try:
self._validate()
except AccessTokenException as e:
self.error = e
raise e
self.valid = True
def _validate(self):
"""Validate the request."""
# Check response type
if self.grant_type is None:
raise InvalidRequest('No grant_type provided.')
if self.grant_type not in [
"authorization_code",
"refresh_token",
"password",
"client_credentials"]:
raise UnsupportedGrantType('No grant type: %s' % self.grant_type)
if self.client_id is None:
raise InvalidRequest('No client_id')
try:
self.client = Client.objects.get(key=self.client_id)
except Client.DoesNotExist:
raise InvalidClient("client_id %s doesn't exist" % self.client_id)
# Scope
if self.scope is not None:
access_ranges = AccessRange.objects.filter(key__in=self.scope)
access_ranges = set(access_ranges.values_list('key', flat=True))
difference = access_ranges.symmetric_difference(self.scope)
if len(difference) != 0:
raise InvalidScope("Following access ranges doesn't exist: "
"%s" % ', '.join(difference))
if self.grant_type == "authorization_code":
self._validate_authorization_code()
elif self.grant_type == "refresh_token":
self._validate_refresh_token()
elif self.grant_type == "password":
self._validate_password()
elif self.grant_type == "client_credentials":
self._validate_client_credentials()
else:
raise UnsupportedGrantType('Unable to validate grant type.')
def _validate_access_credentials(self):
"""Validate the request's access credentials."""
if self.client_secret is None and "HTTP_AUTHORIZATION" in self.request.META:
authorization = self.request.META["HTTP_AUTHORIZATION"]
auth_type, auth_value = authorization.split()[0:2]
if auth_type.lower() == "basic":
credentials = "%s:%s" % (self.client.key, self.client.secret)
if auth_value != b64encode(credentials):
raise InvalidClient('Client authentication failed.')
else:
raise InvalidClient('Client authentication failed.')
elif self.client_secret != self.client.secret:
raise InvalidClient('Client authentication failed.')
def _validate_client_credentials(self):
"""Validate a client
|
ghchinoy/tensorflow
|
tensorflow/contrib/distributions/python/ops/bijectors/softplus.py
|
Python
|
apache-2.0
| 5,563 | 0.003775 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softplus bijector."""
from __future__ import absolute_import
from __future_
|
_ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"Softplus
|
",
]
class Softplus(bijector.Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
The optional nonzero `hinge_softness` parameter changes the transition at
zero. With `hinge_softness = c`, the bijector is:
```f_c(x) := c * g(x / c) = c * Log[1 + exp(x / c)].```
For large `x >> 1`, `c * Log[1 + exp(x / c)] approx c * Log[exp(x / c)] = x`,
so the behavior for large `x` is the same as the standard softplus.
As `c > 0` approaches 0 from the right, `f_c(x)` becomes less and less soft,
approaching `max(0, x)`.
* `c = 1` is the default.
* `c > 0` but small means `f(x) approx ReLu(x) = max(0, x)`.
* `c < 0` flips sign and reflects around the `y-axis`: `f_{-c}(x) = -f_c(-x)`.
* `c = 0` results in a non-bijective transformation and triggers an exception.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus()
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
@distribution_util.AppendDocstring(
kwargs_dict={
"hinge_softness": (
"Nonzero floating point `Tensor`. Controls the softness of what "
"would otherwise be a kink at the origin. Default is 1.0")})
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
hinge_softness=None,
validate_args=False,
name="softplus"):
with ops.name_scope(name, values=[hinge_softness]):
if hinge_softness is not None:
self._hinge_softness = ops.convert_to_tensor(
hinge_softness, name="hinge_softness")
else:
self._hinge_softness = None
if validate_args:
nonzero_check = check_ops.assert_none_equal(
ops.convert_to_tensor(
0, dtype=self.hinge_softness.dtype),
self.hinge_softness,
message="hinge_softness must be non-zero")
self._hinge_softness = control_flow_ops.with_dependencies(
[nonzero_check], self.hinge_softness)
super(Softplus, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.hinge_softness is None:
return nn_ops.softplus(x)
hinge_softness = math_ops.cast(self.hinge_softness, x.dtype)
return hinge_softness * nn_ops.softplus(x / hinge_softness)
def _inverse(self, y):
if self.hinge_softness is None:
return distribution_util.softplus_inverse(y)
hinge_softness = math_ops.cast(self.hinge_softness, y.dtype)
return hinge_softness * distribution_util.softplus_inverse(
y / hinge_softness)
def _inverse_log_det_jacobian(self, y):
# Could also do:
# ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
# axis=event_dims)
# but the following is more numerically stable. Ie,
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
if self.hinge_softness is not None:
y /= math_ops.cast(self.hinge_softness, y.dtype)
return -math_ops.log(-math_ops.expm1(-y))
def _forward_log_det_jacobian(self, x):
if self.hinge_softness is not None:
x /= math_ops.cast(self.hinge_softness, x.dtype)
return -nn_ops.softplus(-x)
@property
def hinge_softness(self):
return self._hinge_softness
|
awsdocs/aws-doc-sdk-examples
|
lambda_functions/secretsmanager/RDSPostgreSQL-Multiuser.py
|
Python
|
apache-2.0
| 16,740 | 0.004421 |
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[Lambda rotation for AWS Secrets Manager - RDS PostgreSQL with separate Master secret]
# snippet-service:[secretsmanager]
# snippet-keyword:[rotation function]
# snippet-keyword:[python]
# snippet-keyword:[RDS PostgreSQL]
# snippet-keyword:[AWS Lambda]
# snippet-keyword:[AWS Secrets Manager]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourceauthor:[AWS]
# snippet-sourcedate:[2018-08-22]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import json
import logging
import os
import pg
import pgdb
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""Secrets Manager RDS PostgreSQL Handler
This handler uses the master-user rotation scheme to rotate an RDS PostgreSQL user credential. During the first rotation, this
scheme logs into the database as the master user, creates a new user (appending _clone to the username), and grants the
new user all of the permissions from the user being rotated. Once the secret is in the state, every subsequent rotation
simply creates a new secret with the AWSPREVIOUS user credentials, adds any missing permissions that are in the current
secret, changes that user's password, and then marks the latest secret as AWSCURRENT.
The Secret SecretString is expected to be a JSON string with the following format:
{
'engine': <required: must be set to 'postgres'>,
'host': <required: instance host name>,
'username': <required: username>,
'password': <required: password>,
'dbname': <optional: database name, default to 'postgres'>,
'port': <optional: if not specified, default port 5432 will be used>,
'masterarn': <required: the arn of the master secret which will be used to create users/change passwords>
}
Args:
event (dict): Lambda dictionary of event parameters. These keys must include the following:
- SecretId: The secret ARN or identifier
- ClientRequestToken: The ClientRequestToken of the secret version
- Step: The rotation step (one of createSecret, setSecret, testSecret, or finishSecret)
context (LambdaContext): The Lambda runtime information
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not properly configured for rotation
KeyError: If the secret json does not contain the expected keys
"""
arn = event['SecretId']
token = event['ClientRequestToken']
step = event['Step']
# Setup the client
service_client = boto3.client('secretsmanager', endpoint_url=os.environ['SECRETS_MANAGER_ENDPOINT'])
# Make sure the version is staged correctly
metadata = service_client.describe_secret(SecretId=arn)
if "RotationEnabled" in metadata and not metadata['RotationEnabled']:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata['VersionIdsToStages']
if token not in versions:
logger.error("Secret version %s has no stage for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s has no stage for rotation of secret %s." % (token, arn))
if "AWSCURRENT" in versions[token]:
logger.info("Secret version %s already set as AWSCURRENT for secret %s." % (token, arn))
return
elif "AWSPENDING" not in versions[token]:
logger.error("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
# Call the appropriate step
if step == "createSecret":
create_secret(service_client, arn, token)
elif step == "setSecret":
set_secret(service_client, arn, token)
elif step == "testSecret":
test_secret(service_client, arn, token)
elif step == "finishSecret":
finish_secret(service_client, arn, token)
else:
logger.error("lambda_handler: Invalid step parameter %s for secret %s" % (step, arn))
raise ValueError("Invalid step parameter %s for secret %s"
|
% (step, arn))
def create_secret(service_client, arn, token):
"""Generate a new secret
This method first checks for the existence of a secret for the passed in token. If one does not exist, it will generate a
new secret and put it with the passed in token.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
|
token (string): The ClientRequestToken associated with the secret version
Raises:
ValueError: If the current secret is not valid JSON
KeyError: If the secret json does not contain the expected keys
"""
# Make sure the current secret exists
current_dict = get_secret_dict(service_client, arn, "AWSCURRENT")
# Now try to get the secret version, if that fails, put a new secret
try:
get_secret_dict(service_client, arn, "AWSPENDING", token)
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except service_client.exceptions.ResourceNotFoundException:
# Get the alternate username swapping between the original user and the user with _clone appended to it
current_dict['username'] = get_alt_username(current_dict['username'])
# Generate a random password
passwd = service_client.get_random_password(ExcludeCharacters='/@"\'\\')
current_dict['password'] = passwd['RandomPassword']
# Put the secret
service_client.put_secret_value(SecretId=arn, ClientRequestToken=token, SecretString=json.dumps(current_dict), VersionStages=['AWSPENDING'])
logger.info("createSecret: Successfully put secret for ARN %s and version %s." % (arn, token))
def set_secret(service_client, arn, token):
"""Set the pending secret in the database
This method tries to login to the database with the AWSPENDING secret and returns on success. If that fails, it
tries to login with the master credentials from the masterarn in the current secret. If this succeeds, it adds all
grants for AWSCURRENT user to the AWSPENDING user, creating the user and/or setting the password in the process.
Else, it throws a ValueError.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON or master credentials could not be used to login to DB
KeyError: If the secret json does not contain the expected keys
"""
# First try to login with the pending secret, if it succeeds, return
pending_dict = get_secret_dict(service_client, arn, "AWSPENDING", token)
conn = get_connection(pending_dict)
if conn:
conn.close()
logger.info("setSecret: AWSPENDING secret is already set as password in PostgreSQL DB for secret arn %s." % arn)
return
# Before we do anything with the secret, make sure the AWSCURRENT secret is valid by logging in to the db
current_dict = get_secret_dict(service_client, arn, "AWSCURRENT")
conn = get_connection(current_dict)
if not conn:
logger.error("setSecret: Unable to log into database using current credentials for secret %s" % arn)
raise ValueError("Unable to log into database using current credentials for secret %s" % arn)
conn.close()
# Now get the mast
|
Mte90/LearnHotkeys
|
cheatsheet.py
|
Python
|
gpl-3.0
| 5,629 | 0.007817 |
#!/usr/bin/env python
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtXml import *
import sys, os
from ui_cheatsheet import Ui_CSDialo
|
g
class CSWindow ( QDialog , Ui_CSDialo
|
g):
settings = QSettings('Mte90','LearnHotkeys')
settings.setFallbacksEnabled(False)
theme_path = "./style"
theme_folder = theme_path+'/'
hotkeys_path = "./hotkeys"
hotkeys_folder = hotkeys_path+'/'
html_cs = ""
html_style = "<html>\n<head>\n<style>\n%s\n</style>\n</head>\n<body>\n"
html_thead = "\n<table><tr style='font-weight:bold'><td>Action</td><td>HotKey</td></tr>"
html_def = ""
def __init__ ( self, parent = None ):
QDialog.__init__( self, parent, Qt.CustomizeWindowHint )
self.ui = Ui_CSDialog()
self.ui.setupUi( self )
self.ui.saveButton.clicked.connect(self.saveHTML)
self.ui.closeButton.clicked.connect(self.accept)
for root, dirs, files in os.walk(self.theme_path):
files.sort()
for name in files:
filename = os.path.join(root, name)
self.ui.themeChooser.addItem(os.path.basename(filename))
if sys.version_info < (3, 0):
if self.ui.themeChooser.findText(self.settings.value('theme').toString()) != -1:
self.ui.themeChooser.setCurrentIndex(self.ui.themeChooser.findText(self.settings.value('theme').toString()))
self.saveConfig()
else:
self.settings.setValue("theme", 'soft-grey.css')
else:
if self.ui.themeChooser.findText(self.settings.value('theme')) != -1:
self.ui.themeChooser.setCurrentIndex(self.ui.themeChooser.findText(self.settings.value('theme')))
self.saveConfig()
else:
self.settings.setValue("theme", 'soft-grey.css')
self.ui.themeChooser.currentIndexChanged.connect(self.saveConfig)
self.loadHotkeys()
self.show()
def loadHotkeys(self):
if sys.version_info < (3, 0):
if self.settings.value('file_name_default').toString() != "":
fname = self.hotkeys_folder+self.settings.value('file_name_default').toString()
else:
if self.settings.value('file_name_default') != "":
fname = self.hotkeys_folder+self.settings.value('file_name_default')
dom = QDomDocument()
error = None
fh = None
try:
fh = QFile(fname)
if not fh.open(QIODevice.ReadOnly):
print(IOError, unicode(fh.errorString()))
if not dom.setContent(fh):
print(ValueError, "could not parse XML")
except (IOError, OSError, ValueError) as e:
error = "Failed to import: {0}".format(e)
finally:
if fh is not None:
fh.close()
if error is not None:
return False, error
root = dom.documentElement()
if not root.hasAttribute('fileversion'):
QMessageBox.information(self.window(), "LearnHotkeys","The file %s is not an LearnHotkeys definition file." % self.settings.value('file_name_default').toString())
return False
self.html_def += root.attribute('software')+" - "+root.attribute('softwareversion')+" - "+root.attribute('def')+"<br>\n<a href='"+root.attribute('softwaresite')+"'>" \
+root.attribute('softwaresite')+"</a><br> CheatSheet version: "+root.attribute('fileversion')+"<br><br>"
child = root.firstChildElement('hotkey')
while not child.isNull():
self.html_cs += "\n<tr><td>%s</td><td>%s</td></tr>" % (child.firstChildElement('question').text(),child.firstChildElement('key').text())
child = child.nextSiblingElement('hotkey')
self.html_cs += "</table></body></html>"
if sys.version_info < (3, 0):
self.ui.csView.setHtml((self.html_style % self.get_file_content(self.theme_folder+self.settings.value('theme').toString()))+self.html_thead+self.html_cs)
else:
self.ui.csView.setHtml((self.html_style % self.get_file_content(self.theme_folder+self.settings.value('theme')))+self.html_thead+self.html_cs)
def saveHTML(self):
if sys.version_info < (3, 0):
filename = QFileDialog.getSaveFileName(self, 'Save HTML CheatSheet', self.settings.value('file_name_default').toString()[:-4]+'.html')
fname = open(filename, 'w')
html = (self.html_style% self.get_file_content(self.theme_folder+self.settings.value('theme').toString()))+self.html_def+self.html_thead+self.html_cs
else:
filename = QFileDialog.getSaveFileName(self, 'Save HTML CheatSheet', self.settings.value('file_name_default')[:-4]+'.html')
fname = open(filename, 'w')
html = (self.html_style% self.get_file_content(self.theme_folder+self.settings.value('theme')))+self.html_def+self.html_thead+self.html_cs
fname.write(html.toUtf8()+"\n")
fname.close()
def get_file_content(self,file):
f = open(file, 'r')
c = f.read()
f.close()
return c
def saveConfig(self):
self.settings.setValue("theme", self.ui.themeChooser.currentText())
if sys.version_info < (3, 0):
self.ui.csView.setHtml((self.html_style % self.get_file_content(self.theme_folder+self.settings.value('theme').toString()))+self.html_thead+self.html_cs)
else:
self.ui.csView.setHtml((self.html_style % self.get_file_content(self.theme_folder+self.settings.value('theme')))+self.html_thead+self.html_cs)
|
codewarrior0/Shiboken
|
tests/samplebinding/virtualmethods_test.py
|
Python
|
gpl-2.0
| 5,040 | 0.002381 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
#
# Contact: PySide team <contact@pyside.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.
|
org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You
|
should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
'''Test cases for virtual methods.'''
import sys
import unittest
from sample import *
import warnings
class ExtendedVirtualMethods(VirtualMethods):
def __init__(self):
VirtualMethods.__init__(self)
self.virtual_method0_called = False
def virtualMethod0(self, pt, val, cpx, b):
self.virtual_method0_called = True
return VirtualMethods.virtualMethod0(self, pt, val, cpx, b) * -1.0
def strListToStdList(self, arg):
warnings.simplefilter('error')
# returning wrong type for test purposes.
return True
def recursionOnModifiedVirtual(self, arg):
# check if recursion is caused by injected code that calls C++.
return VirtualMethods.recursionOnModifiedVirtual(self, arg) + 10
class ExtendedVirtualDaughter(VirtualDaughter):
def __init__(self, name):
VirtualDaughter.__init__(self, name)
self.grand_daughter_name_called = False
def name(self):
self.grand_daughter_name_called = True
return VirtualDaughter.name(self).prepend('Extended')
class ExtendedExtendedVirtualDaughter(ExtendedVirtualDaughter):
def __init__(self, name):
ExtendedVirtualDaughter.__init__(self, name)
self.grand_grand_daughter_name_called = False
def name(self):
self.grand_grand_daughter_name_called = True
return ExtendedVirtualDaughter.name(self).prepend('Extended')
class VirtualMethodsTest(unittest.TestCase):
'''Test case for virtual methods'''
def setUp(self):
self.prefix_from_codeinjection = Str('Pimped')
def tearDown(self):
del self.prefix_from_codeinjection
def testReimplementedVirtualMethod0(self):
'''Test Python override of a virtual method with various different parameters is correctly called from C++.'''
vm = VirtualMethods()
evm = ExtendedVirtualMethods()
pt = Point(1.1, 2.2)
val = 4
cpx = complex(3.3, 4.4)
b = True
result0 = vm.callVirtualMethod0(pt, val, cpx, b)
result1 = evm.callVirtualMethod0(pt, val, cpx, b)
self.assertEqual(result0 * -1.0, result1)
def testRecursionOnModifiedVirtual(self):
evm = ExtendedVirtualMethods()
self.assertEqual(evm.recursionOnModifiedVirtual(''), 10)
self.assertEqual(evm.callRecursionOnModifiedVirtual(''), 10)
def testReimplementedVirtualMethodInheritedFromGrandParent(self):
'''Test Python override of a virtual method inherited from a grand parent.'''
original_name = 'Foo'
evd = ExtendedVirtualDaughter(original_name)
self.assertEqual(VirtualDaughter.name(evd), original_name)
self.assertEqual(VirtualMethods.name(evd), original_name)
self.assertFalse(evd.grand_daughter_name_called)
name = evd.callName()
self.assertTrue(evd.grand_daughter_name_called)
self.assertEqual(evd.name().prepend(self.prefix_from_codeinjection), name)
def testReimplementedVirtualMethodInheritedFromGrandGrandParent(self):
'''Test Python override of a virtual method inherited from a grand grand parent.'''
original_name = 'Foo'
eevd = ExtendedExtendedVirtualDaughter(original_name)
self.assertEqual(VirtualDaughter.name(eevd), original_name)
self.assertEqual(VirtualMethods.name(eevd), original_name)
self.assertFalse(eevd.grand_daughter_name_called)
self.assertFalse(eevd.grand_grand_daughter_name_called)
name = eevd.callName()
self.assertTrue(eevd.grand_daughter_name_called)
self.assertTrue(eevd.grand_grand_daughter_name_called)
self.assertEqual(eevd.name().prepend(self.prefix_from_codeinjection), name)
class PrettyErrorMessageTest(unittest.TestCase):
def testIt(self):
obj = ExtendedVirtualMethods()
self.assertRaises(RuntimeWarning, obj.callStrListToStdList, StrList())
if __name__ == '__main__':
unittest.main()
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/image_utils.py
|
Python
|
apache-2.0
| 14,495 | 0.007934 |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes and utilities for image datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import contrib
from tensor2tensor.utils import metrics
import tensorflow.compat.v1 as tf
def matplotlib_pyplot():
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt
def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum)
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries
def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels].
"""
scaled_images = []
for height in resolutions:
scaled_image = tf.image.resize_images(
image,
size=[height, height], # assuming that height = width
method=resize_method)
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([height, height, num_channels])
scaled_images.append(scaled_image)
return scaled_images
def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_chann
|
els: Number of channels in image.
Returns:
List of Tensors, one for each resolu
|
tion with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images
class ImageProblem(problem.Problem):
"""Base class for problems with images."""
@property
def num_channels(self):
"""Number of color channels."""
return 3
@property
def vocab_size(self):
"""Number of pixel values."""
return 256
def example_reading_spec(self):
data_fields = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/format": tf.FixedLenFeature((), tf.string),
}
data_items_to_decoders = {
"inputs":
contrib.slim().tfexample_decoder.Image(
image_key="image/encoded",
format_key="image/format",
channels=self.num_channels),
}
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
if not self._was_reversed:
example["inputs"] = tf.image.per_image_standardization(example["inputs"])
return example
def eval_metrics(self):
eval_metrics = [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
if self._was_reversed:
eval_metrics += [metrics.Metrics.IMAGE_SUMMARY]
return eval_metrics
@property
def decode_hooks(self):
return [convert_predictions_to_image_summaries]
class Image2ClassProblem(ImageProblem):
"""Base class for image classification problems."""
@property
def is_small(self):
raise NotImplementedError()
@property
def num_classes(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
return 1
@property
def class_labels(self):
return ["ID_%d" % i for i in range(self.num_classes)]
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.ImageEncoder(channels=self.num_channels),
"targets": text_encoder.ClassLabelEncoder(self.class_labels)
}
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2ClassProblem, self).example_reading_spec())
data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)
data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
label_key)
return data_fields, data_items_to_decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"inputs": 256,
"targets": self.num_classes}
p.batch_size_multiplier = 4 if self.is_small else 256
p.loss_mu
|
yaroslavprogrammer/django-filebrowser-no-grappelli
|
filebrowser/widgets.py
|
Python
|
bsd-3-clause
| 3,673 | 0.004901 |
# coding: utf-8
# DJANGO IMPORTS
from django.template.loader import render_to_string
from django.forms.widgets import FileInput as DjangoFileInput
from django.forms.widgets import ClearableFileInput as DjangoClearableFileInput
from django.forms.widgets import CheckboxInput
from django.forms.fields import FilePathField
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.safestring import mark_saf
|
e
# FILEBROWSER IMPORTS
from filebrowser.base import FileObject
from filebrowser.settings import ADMIN_THUMBNAIL
class FileInput(DjangoClearableFileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = u'%(input)s %(preview)s'
def render(self, name, value, attrs=None):
substitutions
|
= {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'preview': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = u'%(input)s'
substitutions['input'] = super(DjangoClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
preview_template = render_to_string('filebrowser/widgets/fileinput.html', {
'value': FileObject(value.name),
'ADMIN_THUMBNAIL': ADMIN_THUMBNAIL,
})
substitutions["preview"] = preview_template
return mark_safe(template % substitutions)
class ClearableFileInput(DjangoClearableFileInput):
"""
A FileField Widget that shows its current value if it has one.
If value is an Image, a thumbnail is shown.
"""
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = u'%(clear_template)s<br />%(input)s %(preview)s'
template_with_clear = u'%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'preview': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = u'%(input)s'
substitutions['input'] = super(DjangoClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
substitutions['initial'] = (u'<a target="_blank" href="%s">%s</a>' % (value.url, value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = checkbox_name
substitutions['clear_checkbox_id'] = checkbox_id
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
if value and hasattr(value, "url"):
preview_template = render_to_string('filebrowser/widgets/clearablefileinput.html', {
'value': FileObject(value.name),
'ADMIN_THUMBNAIL': ADMIN_THUMBNAIL,
})
substitutions["preview"] = preview_template
return mark_safe(template % substitutions)
|
gkotian/zulip
|
tools/deprecated/finbot/money.py
|
Python
|
apache-2.0
| 7,737 | 0.003231 |
#!/usr/bin/env python2.7
import datetime
import monthdelta
def parse_date(date_str):
return datetime.datetime.strptime(date_str, "%Y-%m-%d")
def unparse_date(date_obj):
return date_obj.strftime("%Y-%m-%d")
class Company(object):
def __init__(self, name):
self.name = name
self.flows = []
self.verbose = False
def __str__(self):
return self.name
def add_flow(self, flow):
self.flows.append(flow)
def cash_at_date_internal(self, start_date, end_date):
cash = 0
for flow in self.flows:
delta = flow.cashflow(start_date, end_date, (end_date - start_date).days)
cash += delta
if self.verbose:
print flow.name, round(delta, 2)
return round(cash, 2)
def cash_at_date(self, start, end):
start_date = parse_date(start)
end_date = parse_date(end)
return self.cash_at_date_internal(start_date, end_date)
def cash_monthly_summary(self, start, end):
start_date = parse_date(start)
cur_date = parse_date(start)
end_date = parse_date(end)
while cur_date <= end_date:
print cur_date, self.cash_at_date_internal(start_date, cur_date)
cur_date += monthdelta.MonthDelta(1)
if self.verbose:
print
# CashFlow objects fundamentally just provide a function that says how
# much cash has been spent by that source at each time
#
# The API is that one needs to define a function .cashflow(date)
class CashFlow(object):
def __init__(self, name):
self.name = name
class FixedCost(CashFlow):
def __init__(self, name, amount):
super(FixedCost, self).__init__(name)
self.cost = -amount
def cashflow(self, start, end, days):
return self.cost
class ConstantCost(CashFlow):
def __init__(self, name, amount):
super(ConstantCost, self).__init__(name)
self.rate = -amount
def cashflow(self, start, end, days):
return self.rate * days / 365.
class PeriodicCost(CashFlow):
def __init__(self, name, amount, start, interval):
super(PeriodicCost, self).__init__(name)
self.amount = -amount
self.start = parse_date(start)
self.interval = interval
def cashflow(self, start, end, days):
cur = self.start
delta = 0
while (cur <= end):
if cur >= start:
delta += self.amount
cur += datetime.timedelta(days=self.interval)
return delta
class MonthlyCost(CashFlow):
def __init__(self, name, amount, start):
super(MonthlyCost, self).__init__(name)
self.amount = -amount
self.start = parse_date(
|
start)
def cashflow(self, start, end, days):
cur = self.start
delta = 0
while (cur <= end):
if cur >= start:
delta += self.amount
cur += monthdelta.MonthDelta(1)
re
|
turn delta
class TotalCost(CashFlow):
def __init__(self, name, *args):
self.name = name
self.flows = args
def cashflow(self, start, end, days):
return sum(cost.cashflow(start, end, days) for cost in self.flows)
class SemiMonthlyCost(TotalCost):
def __init__(self, name, amount, start1, start2 = None):
if start2 is None:
start2 = unparse_date(parse_date(start1) + datetime.timedelta(days=14))
super(SemiMonthlyCost, self).__init__(name,
MonthlyCost(name, amount, start1),
MonthlyCost(name, amount, start2)
)
class SemiMonthlyWagesNoTax(SemiMonthlyCost):
def __init__(self, name, wage, start):
super(SemiMonthlyWagesNoTax, self).__init__(name, self.compute_wage(wage), start)
def compute_wage(self, wage):
return wage / 24.
class SemiMonthlyWages(SemiMonthlyWagesNoTax):
def compute_wage(self, wage):
fica_tax = min(wage, 110100) * 0.062 + wage * 0.0145
unemp_tax = 450
return (wage + fica_tax + unemp_tax) / 24.
def __init__(self, name, wage, start):
super(SemiMonthlyWages, self).__init__(name, wage, start)
class DelayedCost(CashFlow):
def __init__(self, start, base_model):
super(DelayedCost, self).__init__("Delayed")
self.base_model = base_model
self.start = parse_date(start)
def cashflow(self, start, end, days):
start = max(start, self.start)
if start > end:
return 0
time_delta = (end-start).days
return self.base_model.cashflow(start, end, time_delta)
class BiweeklyWagesNoTax(PeriodicCost):
def __init__(self, name, wage, start):
super(BiweeklyWagesNoTax, self).__init__(name, self.compute_wage(wage), start, 14)
def compute_wage(self, wage):
# You would think this calculation would be (wage * 14 /
# 365.24), but you'd be wrong -- companies paying biweekly
# wages overpay by about 0.34% by doing the math this way
return wage / 26.
class BiweeklyWages(BiweeklyWagesNoTax):
def compute_wage(self, wage):
fica_tax = min(wage, 110100) * 0.062 + wage * 0.0145
unemp_tax = 450
# You would think this calculation would be (wage * 14 /
# 365.24), but you'd be wrong -- companies paying biweekly
# wages overpay by about 0.34% by doing the math this way
return (wage + fica_tax + unemp_tax) / 26.
def __init__(self, name, wage, start):
super(BiweeklyWages, self).__init__(name, wage, start)
if __name__ == "__main__":
# Tests
c = Company("Example Inc")
c.add_flow(FixedCost("Initial Cash", -500000))
c.add_flow(FixedCost("Incorporation", 500))
assert(c.cash_at_date("2012-01-01", "2012-03-01") == 500000 - 500)
c.add_flow(FixedCost("Incorporation", -500))
c.add_flow(ConstantCost("Office", 50000))
assert(c.cash_at_date("2012-01-01", "2012-01-02") == 500000 - round(50000*1/365., 2))
c.add_flow(ConstantCost("Office", -50000))
c.add_flow(PeriodicCost("Payroll", 4000, "2012-01-05", 14))
assert(c.cash_at_date("2012-01-01", "2012-01-02") == 500000)
assert(c.cash_at_date("2012-01-01", "2012-01-06") == 500000 - 4000)
c.add_flow(PeriodicCost("Payroll", -4000, "2012-01-05", 14))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", 50000)))
assert(c.cash_at_date("2012-01-01", "2012-01-05") == 500000)
assert(c.cash_at_date("2012-01-01", "2012-02-05") == 500000 - round(50000*4/365., 2))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", -50000)))
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", 50000)))
assert(c.cash_at_date("2012-01-01", "2012-01-15") == 500000)
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", -50000)))
c.add_flow(SemiMonthlyCost("Payroll", 4000, "2012-01-01"))
assert(c.cash_at_date("2012-01-01", "2012-01-01") == 500000 - 4000)
assert(c.cash_at_date("2012-01-01", "2012-01-14") == 500000 - 4000)
assert(c.cash_at_date("2012-01-01", "2012-01-15") == 500000 - 4000 * 2)
assert(c.cash_at_date("2012-01-01", "2012-01-31") == 500000 - 4000 * 2)
assert(c.cash_at_date("2012-01-01", "2012-02-01") == 500000 - 4000 * 3)
assert(c.cash_at_date("2012-01-01", "2012-02-15") == 500000 - 4000 * 4)
c.add_flow(SemiMonthlyCost("Payroll", -4000, "2012-01-01"))
c.add_flow(SemiMonthlyWages("Payroll", 4000, "2012-01-01"))
assert(c.cash_at_date("2012-01-01", "2012-02-15") == 499207.33)
c.add_flow(SemiMonthlyWages("Payroll", -4000, "2012-01-01"))
print c
c.cash_monthly_summary("2012-01-01", "2012-07-01")
|
veusz/veusz
|
veusz/document/emf_export.py
|
Python
|
gpl-2.0
| 14,778 | 0.002977 |
# Copyright (C) 2009 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""A paint engine to produce EMF exports.
Requires: PyQt-x11-gpl-4.6-snapshot-20090906.tar.gz
sip-4.9-snapshot-20090906.tar.gz
pyemf
"""
import struct
import pyemf
from .. import qtall as qt
inch_mm = 25.4
scale = 100
def isStockObject(obj):
"""Is this a stock windows object."""
return (obj & 0x80000000) != 0
class _EXTCREATEPEN(pyemf._EMR._EXTCREATEPEN):
"""Extended pen creation record with custom line style."""
typedef = [
('i','handle',0),
('i','offBmi',0),
('i','cbBmi',0),
('i','offBits',0),
('i','cbBits',0),
('i','style'),
('i','penwidth'),
('i','brushstyle'),
('i','color'),
('i','brushhatch',0),
('i','numstyleentries')
]
def __init__(self, style=pyemf.PS_SOLID, width=1, color=0,
styleentries=[]):
"""Create pen.
styleentries is a list of dash and space lengths."""
pyemf._EMR._EXTCREATEPEN.__init__(self)
self.style = style
self.penwidth = width
self.color = pyemf._normalizeColor(color)
self.brushstyle = 0x0 # solid
if style & pyemf.PS_STYLE_MASK != pyemf.PS_USERSTYLE:
styleentries = []
self.numstyleentries = len(styleentries)
if styleentries:
self.unhandleddata = struct.pack(
"i"*self.numstyleentries, *styleentries)
def hasHandle(self):
return True
class EMFPaintEngine(qt.QPaintEngine):
"""Custom EMF paint engine."""
def __init__(self, width_in, height_in, dpi=75):
qt.QPaintEngine.__init__(
self,
qt.QPaintEngine.Antialiasing |
qt.QPaintEngine.PainterPaths |
qt.QPaintEngine.PrimitiveTransform |
qt.QPaintEngine.PaintOutsidePaintEvent |
qt.QPaintEngine.PatternBrush
)
self.width = width_in
self.height = height_in
self.dpi = dpi
def begin(self, paintdevice):
self.emf = pyemf.EMF(self.width, self.height, int(self.dpi*scale))
self.pen = self.emf.GetStockObject(pyemf.BLACK_PEN)
self.pencolor = (0, 0, 0)
self.brush = self.emf.GetStockObject(pyemf.NULL_BRUSH)
self.paintdevice = paintdevice
return True
def drawLines(self, lines):
"""Draw lines to emf output."""
for line in lines:
self.emf.Polyline(
[ (int(line.x1()*scale), int(line.y1()*scale)),
(int(line.x2()*scale), int(line.y2()*scale)) ] )
def drawPolygon(self, points, mode):
"""Draw polygon on output."""
# print "Polygon"
pts = [(int(p.x()*scale), int(p.y()*scale)) for p in points]
if mode == qt.QPaintEngine.PolylineMode:
self.emf.Polyline(pts)
else:
self.emf.SetPolyFillMode({
qt.QPaintEngine.WindingMode: pyemf.WINDING,
qt.QPaintEngine.OddEvenMode: pyemf.ALTERNATE,
qt.QPaintEngine.ConvexMode: pyemf.WINDING
})
self.emf.Polygon(pts)
def drawEllipse(self, rect):
"""Draw an ellipse."""
# print "ellipse"
args = (
int(rect.left()*scale), int(rect.top()*scale),
int(rect.right()*scale), int(rect.bottom()*scale),
int(rect.left()*scale), int(rect.top()*scale),
int(rect.left()*scale), int(rect.top()*scale),
)
self.emf.Pie(*args)
self.emf.Arc(*args)
def drawPoints(self, points):
"""Draw points."""
# print "points"
for pt in points:
x, y = (pt.x()-0.5)*scale, (pt.y()-0.5)*scale
self.emf.Pie(
int(x), int(y),
int((pt.x()+0.5)*scale), int((pt.y()+0.5)*scale),
int(x), int(y), int(x), int(y) )
def drawPixmap(self, r, pixmap, sr):
"""Draw pixmap to display."""
# convert pixmap to BMP format
bytearr = qt.QByteArray()
buf = qt.QBuffer(bytearr)
buf.open(qt.QIODevice.WriteOnly)
pixmap.save(buf, "BMP")
# chop off bmp header to get DIB
bmp = bytes(buf.data())
dib = bmp[0xe:]
hdrsize, = struct.unpack('<i', bmp[0xe:0x12])
dataindex, = struct.unpack('<i', bmp[0xa:0xe])
datasize, = struct.unpack('<i', bmp[0x22:0x26])
epix = pyemf._EMR._STRETCHDIBITS()
epix.rclBounds_left = int(r.left()*scale)
epix.rclBounds_top = int(r.top()*scale)
epix.rclBounds_right = int(r.right()*scale)
epix.rclBounds_bottom = int(r.bottom()*scale)
epix.xDest = int(r.left()*scale)
epix.yDest = int(r.top()*scale)
epix.cxDest = int(r.width()*scale)
epix.cyDest = int(r.height()*scale)
epix.xSrc = int(sr.left())
epix.ySrc = int(sr.top())
epix.cxSrc = int(sr.width())
epix.cySrc = int(sr.height())
epix.dwRop = 0xcc0020 # SRCCOPY
offset = epix.format.minstructsize + 8
epix.offBmiSrc = offset
epix.cbBmiSrc = hdrsize
epix.offBitsSrc = offset + dataindex - 0xe
epix.cbBitsSrc = datasize
epix.iUsageSrc = 0x0 # DIB_RGB_COLORS
epix.unhandleddata = dib
self.emf._append(epix)
def _createPath(self, path):
"""Convert qt path to emf path"""
self.emf.BeginPath()
count = path.elementCount()
i = 0
#print "Start path"
while i < count:
e = path.elementAt(i)
if e.type == qt.QPainterPath.MoveToElement:
self.emf.MoveTo( int(e.x*scale), int(e.y*scale) )
#print "M", e.x*scale, e.y*scale
elif e.type == qt.QPainterPath.LineToElement:
self.emf.LineTo( int(e.x*scale), int(e.y*scale) )
#print "L", e.x*scale, e.y*scale
elif e.type == qt.QPainterPath.CurveToElement:
e1 = path.elementAt(i+1)
e2 = path.elementAt(i+2)
params = (
( int(e.x*scale), int(e.y*scale) ),
( int(e1.x*scale), int(e1.y*scale) ),
( int(e2.x*scale), int(e2.y*scale) ),
)
self
|
.emf.PolyBezierTo(params)
#print "C", params
i += 2
else:
assert False
i += 1
ef = path.elementAt(0)
el = path.elementAt(count-1)
if ef.x == el.x and ef.y == el.y:
self.emf.CloseFigure()
#print "clos
|
ing"
self.emf.EndPath()
def drawPath(self, path):
"""Draw a path on the output."""
# print "path"
self._createPath(path)
self.emf.StrokeAndFillPath()
def drawTextItem(self, pt, textitem):
"""Convert text to a path and draw it.
"""
# print "text", pt, textitem.text()
path = qt.QPainterPath()
path.addText(pt, textitem.font(), textitem.text())
fill = self.emf.CreateSolidBrush(self.pencolor)
self.emf.SelectObject(fill)
self._createPath(path)
self.emf.FillPath()
self.emf.SelectObject(self.brush)
self.emf.DeleteObject(fill)
def end(sel
|
hugovk/diff-cover
|
diff_cover/violations_reporter.py
|
Python
|
agpl-3.0
| 15,153 | 0.000396 |
"""
Classes for querying the information in a test coverage report.
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from collections import namedtuple, defaultdict
import re
import subprocess
import sys
import six
from diff_cover.git_path import GitPathTool
Violation = namedtuple('Violation', 'line, message')
class BaseViolationReporter(object):
"""
Query information from a coverage report.
"""
__metaclass__ = ABCMeta
def __init__(self, name):
"""
Provide a name for the coverage report, which will be included
in the generated diff report.
"""
self._name = name
@abstractmethod
def violations(self, src_path):
"""
Return a list of Violations recorded in `src_path`.
"""
pass
def measured_lines(self, src_path):
"""
Return a list of the lines in src_path that were measured
by this reporter.
Some reporters will always consider all lines in the file "measured".
As an optimization, such violation reporters
can return `None` to indicate that all lines are measured.
The diff reporter generator will then use all changed lines
provided by the diff.
"""
return None
def name(self):
"""
Retrieve the name of the report, which may be
included in the generated diff coverage report.
For example, `name()` could return the path to the coverage
report file or the type of reporter.
"""
return self._name
class XmlCoverageReporter(BaseViolationReporter):
"""
Query information from a Cobertura XML coverage report.
"""
def __init__(self, xml_roots):
"""
Load the Cobertura XML coverage report represented
by the lxml.etree with root element `xml_root`.
"""
super(XmlCoverageReporter, self).__init__("XML")
self._xml_roots = xml_roots
# Create a dict to cache violations dict results
# Keys are source file paths, values are output of `violations()`
self._info_cache = defaultdict(list)
def _get_src_path_line_nodes(self, xml_document, src_path):
"""
Returns a list of nodes containing line information for `src_path`
in `xml_document`.
If file is not present in `xml_document`, return None
"""
# Remove git_root from src_path for searching the correct filename
# If cwd is `/home/user/work/diff-cover/diff_cover`
# and src_path is `diff_cover/violations_reporter.py`
# search for `violations_reporter.py`
src_rel_path = GitPathTool.relative_path(src_path)
# If cwd is `/home/user/work/diff-cover/diff_cover`
# and src_path is `other_package/some_file.py`
# search for `/home/user/work/diff-cover/other_package/some_file.py`
src_abs_path = GitPathTool.absolute_path(src_path)
xpath_template = ".//class[@filename='{0}']/lines/line"
xpath = None
src_node_xpath = ".//class[@filename='{0}']".format(src_rel_path)
if xml_document.find(src_node_xpath) is not None:
xpath = xpath_template.format(src_rel_path)
src_node_xpath = ".//class[@filename='{0}']".format(src_abs_path)
if xml_document.find(src_node_xpath) is not None:
xpath = xpath_template.format(src_abs_path)
if xpath is None:
return None
return xml_document.findall(xpath)
def _cache_file(self, src_path):
"""
Load the data from `self._xml_roots`
for `src_path`, if it hasn't been already.
"""
# If we have not yet loaded this source file
if src_path not in self._info_cache:
# We only want to keep violations that show up in each xml source.
# Thus, each time, we take the intersection. However, to do this
# we must treat the first time as a special case and just add all
# the violations from the first xml report.
violations = None
# A line is measured if it is measured in any of the reports, so
# we take set union each time and can just start with the empty set
measured = set()
# Loop through the files that contain the xml roots
for xml_document in self._xml_roots:
line_nodes = self._get_src_path_line_nodes(xml_document,
src_path)
if line_nodes is None:
continue
# First case, need to define violations initially
if violations is None:
violations = set(
Violation(int(line.get('number')), None)
for line in line_nodes
if int(line.get('hits', 0)) == 0)
# If we already have a violations set,
# take the intersection of the new
# violations set and its old self
else:
violations = violations & set(
Violation(int(line.get('number')), None)
for line in line_nodes
if int(line.get('hits', 0)) == 0
)
# Measured is the union of itself and the new measured
measured = measured | set(
int(line.get('number')) for line in line_nodes
)
# If we don't have any information about the source file,
# don't report any violations
if violations is None:
violations = set()
self._info_cache[src_path] = (violations, measured)
def violations(self, src_path):
"""
See base class comments.
"""
self._cache_file(src_path)
# Yield all lines not covered
return self._info_cache[src_path][0]
def measured_lines(self, src_path):
"""
See base class docstring.
"""
self._cache_file(src_path)
return self._info_cache[src_path][1]
class BaseQualityReporter(BaseViolationReporter):
"""
Abstract class to report code quality
information, using `COMMAND`
(provided by subclasses).
"""
COMMAND = ''
OPTIONS = []
# Encoding of the stdout from the command
# This is application-dependent
STDOUT_ENCODING = 'utf-8'
# A list of file
|
types to run on.
EXTENSIONS = []
def __init__(self, name, input_reports, user_options=None):
"""
Create a new quality reporter.
`name` is an identifier for the reporter
(usually the name of the t
|
ool used to generate
the report).
`input_reports` is an list of
file-like objects representing pre-generated
violation reports. The list can be empty.
If these are provided, the reporter will
use the pre-generated reports instead of invoking
the tool directly.
'user_options' is a string of options passed in.
This string contains options that are passed forward
to the reporter being used
"""
super(BaseQualityReporter, self).__init__(name)
self._info_cache = defaultdict(list)
self.user_options = user_options
# If we've been given input report files, use those
# to get the source information
if len(input_reports) > 0:
self.use_tool = False
self._load_reports(input_reports)
else:
self.use_tool = True
def violations(self, src_path):
"""
See base class comments.
"""
# If we've been given pre-generated pylint/pep8 reports,
# then we've already loaded everything we need into the cache.
# Otherwise, call pylint/pep8 ourselves
if self.use_tool:
if not any(src_path.endswith(ext) for ext in self.EXTENSIONS):
return []
if src_path not in self._info_cache:
output = self._run_command(src_path)
|
yeyanchao/calibre
|
src/calibre/devices/mtp/defaults.py
|
Python
|
gpl-3.0
| 1,628 | 0.006143 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import traceback, re
from calibre.constants import iswindows
class DeviceDefaults(object):
def __init__(self):
self.rules = (
# Amazon devices
({'vendor':0x1949}, {
'format_map': ['azw3', 'mobi', 'azw',
'azw1', 'azw4', 'pdf'],
'send_to': ['documents', 'books', 'kin
|
dle'],
}
|
),
)
def __call__(self, device, driver):
if iswindows:
vid = pid = 0xffff
m = re.search(r'(?i)vid_([0-9a-fA-F]+)&pid_([0-9a-fA-F]+)', device)
if m is not None:
try:
vid, pid = int(m.group(1), 16), int(m.group(2), 16)
except:
traceback.print_exc()
else:
vid, pid = device.vendor_id, device.product_id
for rule in self.rules:
tests = rule[0]
matches = True
for k, v in tests.iteritems():
if k == 'vendor' and v != vid:
matches = False
break
if k == 'product' and v != pid:
matches = False
break
if matches:
return rule[1]
return {}
|
AdamISZ/electrum-joinmarket-plugin
|
joinmarket/__init__.py
|
Python
|
gpl-3.0
| 721 | 0.011096 |
from electrum.i18n import _
fullname = 'Joinmarket coinjoins'
description = _(" ".join(["Ability to send payments as coinjoins with counterparties.",
"Paying minimal fees, you can immediately send your coins",
"with much better privacy. See https://github.com/joinmarket-org/joinmarket",
"for more details."]))
requires = [('jmclient','github.com/Joinmarket-Org/joinmarket-clientserver'),
('twisted', 'twistedmatrix.com')]
#TODO: setting it here results in Joi
|
nmarket never loading.
#It seems that Electrum will not load a plugin on startup
|
if
#it has any setting here.
#requires_wallet_type = ['standard']
available_for = ['qt']
|
ralhei/PyHDB
|
pyhdb/auth.py
|
Python
|
apache-2.0
| 3,055 | 0 |
# Copyright 2014, 2015 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import struct
import hashlib
import hmac
from io import BytesIO
###
from pyhdb.protocol.segments import RequestSegment
from pyhdb.protocol.constants import message_types
from pyhdb.protocol.parts import Authentication, Fields
from pyhdb.protocol.message import RequestMessage
from pyhdb.compat import iter_range
CLIENT_PROOF_SIZE = 32
CLIENT_KEY_SIZE = 64
class AuthManager(object):
def __init__(self, connection, user, password):
self.connection = connection
self.user = user
self.password = password
self.method = b"SCRAMSHA256"
self.client_key = os.urandom(CLIENT_KEY_SIZE)
self.client_proof = None
def perform_handshake(self):
request = RequestMessage.new(
self.connection,
RequestSegment(
message_types.AUTHENTICATE,
Authentication(self.user, {self.method: self.client_key})
)
)
response = self.connection.send_request(request)
auth_part = response.segments[0].parts[0]
if self.method not in auth_part.methods:
raise Exception(
"Only unknown authentication methods available: %s" %
b",".join(auth_part.methods.keys())
)
salt, server_key = Fields.unpack_data(
|
BytesIO(auth_part.methods[self.method])
)
self.client_proof = self.calculate_client_proof([salt], server_key)
return Authentication(self.user, {'SCRAMSHA256': self.client_proof})
def calculate_client_proof(self, salts, server_key):
proof = b"\x00"
proo
|
f += struct.pack('b', len(salts))
for salt in salts:
proof += struct.pack('b', CLIENT_PROOF_SIZE)
proof += self.scramble_salt(salt, server_key)
return proof
def scramble_salt(self, salt, server_key):
msg = salt + server_key + self.client_key
key = hashlib.sha256(
hmac.new(
self.password.encode('cesu-8'), salt, hashlib.sha256
).digest()
).digest()
key_hash = hashlib.sha256(key).digest()
sig = hmac.new(
key_hash, msg, hashlib.sha256
).digest()
return self._xor(sig, key)
@staticmethod
def _xor(a, b):
a = bytearray(a)
b = bytearray(b)
result = bytearray(len(a))
for i in iter_range(len(a)):
result[i] += a[i] ^ b[i]
return bytes(result)
|
dims/glance
|
glance/async/flows/convert.py
|
Python
|
apache-2.0
| 3,951 | 0 |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from taskflow.patterns import linear_flow as lf
from taskflow import task
from glance.i18n import _, _LW
LOG = logging.getLogger(__name__)
convert_task_opts = [
cfg.StrOpt('conversion_format',
choices=('qcow2', 'raw', 'vmdk'),
help=_("The format to which images will be automatically "
"converted. When using the RBD backend, this should be "
"set to 'raw'")),
]
CONF = cfg.CONF
# NOTE(flaper87): Registering under the taskflow_executor section
# for now. It seems a waste to have a whole section dedicated to a
# single task with a single option.
CONF.register_opts(convert_task_opts, group='taskflow_executor')
class _Convert(task.Task):
conversion_missing_warned = False
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_Convert, self).__init__(
name='%s-Convert-%s' % (task_type, task_id))
def execute(self, image_id, file_path):
# NOTE(flaper87): A format must be explicitly
# specified. There's no "sane" default for this
# because the dest format may work differently depending
# on the environment OpenStack is running in.
conversion_format = CONF.taskflow_executor.conversion_format
if conversion_format is None:
if not _Convert.conversion_missing_warned:
msg = (_LW('The conversion format is None, please add a value '
'for it in the config file for this task to '
'work: %s') %
self.task_id)
LOG.warn(msg)
_Convert.conversion_missing_warned = True
return
# TODO(flaper87): Check whether the image is in the desired
# format already. Probably using `qemu-img` just like the
# `Introspection` task.
dest_path = os.path.join(CONF.task.work_dir, "%s.convert
|
ed" % image_id)
stdout, stderr = putils.trycmd('qemu-img', 'convert', '-O',
conversion_format, file_path, dest_path,
log_errors=putils.LOG_ALL_ERRORS)
if stderr:
raise RuntimeError(stderr)
os.rename(dest_path, file_path.split("file://")[-1])
return file_path
def revert(s
|
elf, image_id, result=None, **kwargs):
# NOTE(flaper87): If result is None, it probably
# means this task failed. Otherwise, we would have
# a result from its execution.
if result is None:
return
fs_path = result.split("file://")[-1]
if os.path.exists(fs_path):
os.remove(fs_path)
def get_flow(**kwargs):
"""Return task flow for converting images to different formats.
:param task_id: Task ID.
:param task_type: Type of the task.
:param image_repo: Image repository used.
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
return lf.Flow(task_type).add(
_Convert(task_id, task_type, image_repo),
)
|
Jumpscale/jumpscale6_core
|
lib/JumpScale/baselib/units/units.py
|
Python
|
bsd-2-clause
| 787 | 0.005083 |
order = ['','K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
class Sizes(object):
_BASE = 1000.
def toSize(self, value, input='', output='K'):
"""
Convert value in other measurement
"""
input = order.index(input)
output = order.index(output)
factor = input - output
return value * (self._BASE ** factor)
def converToBestUnit(self, value, input=''):
devider = len(str(int(self._BASE))) - 1
output = (len(str(value)) -2) / devider
output += order.index(input)
if
|
output > len(order):
output = len(order) - 1
elif output < 0:
output = 0
output = order[output]
return self.toSi
|
ze(value, input, output), output
class Bytes(Sizes):
_BASE = 1024.
|
openkamer/openkamer
|
website/views.py
|
Python
|
mit
| 7,095 | 0.001409 |
import os
import json
import datetime
from django.http import HttpResponse
from django.views.generic import TemplateView
from django.utils.safestring import mark_safe
from django.utils import timezone
from django.template.loader import render_to_string
from person.models import Person
from document.models import Dossier
from document.models import Submitter
from document.models import Kamervraag
from document.models import Kamerstuk
from document.views import TimelineKamervraagItem
from document.views import TimelineKamerstukItem
from government.models import Government
from website import settings
from stats.views import get_example_plot_html
class HomeView(TemplateView):
template_name = "website/index.html"
context_object_name = "homepage"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class ContactView(TemplateView):
template_name = "website/contact.html"
context_object_name = "contact"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['contact_email'] = settings.CONTACT_EMAIL
return context
def create_timeline_date(date):
return {
'year': date.year,
'month': date.month,
'day': date.day
}
def get_dossier_timel
|
ine_json(request):
governments = Government.objects.all()
eras = []
for government in governments:
if government.date_dissolved:
end_date = government.date_dissolved
else:
end_date = timezone.now()
text = {
'headline': government.name,
'text': government.name
}
era = {
'start_date': create_timeline_date(gove
|
rnment.date_formed),
'end_date': create_timeline_date(end_date),
'text': text
}
eras.append(era)
events = []
if 'dossier_pk' in request.GET:
dossier = Dossier.objects.get(id=request.GET['dossier_pk'])
for kamerstuk in dossier.kamerstukken:
text = {
'headline': kamerstuk.type_short,
'text': kamerstuk.type_long
}
event = {
'start_date': create_timeline_date(kamerstuk.document.date_published),
'text': text
}
events.append(event)
timeline_info = {
'events': events,
'eras': eras
}
timeline_json = json.dumps(timeline_info, sort_keys=True, indent=4)
# print(timeline_json)
return HttpResponse(timeline_json, content_type='application/json')
class PlotExampleView(TemplateView):
template_name = "website/plot_examples.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['plot_html'] = mark_safe(get_example_plot_html())
return context
class DatabaseDumpsView(TemplateView):
template_name = "website/database_dumps.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
backup_files = self.get_files(settings.DBBACKUP_STORAGE_OPTIONS['location'])
context['backup_files'] = sorted(backup_files, key=lambda backup: backup['datetime_created'], reverse=True)
return context
@staticmethod
def get_files(path):
files = []
for (dirpath, dirnames, filenames) in os.walk(path):
for file in filenames:
if '.gitignore' in file or 'readme.txt' in file:
continue
filepath = os.path.join(dirpath, file)
size = os.path.getsize(filepath)
datetime_created = os.path.getctime(filepath)
files.append({
'file': file,
'size': int(size)/1024/1024,
'datetime_created': datetime.datetime.fromtimestamp(datetime_created)
})
return files
class CSVExportsView(TemplateView):
template_name = "website/csv_exports.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
files = DatabaseDumpsView.get_files(settings.CSV_EXPORT_PATH)
context['files'] = sorted(files, key=lambda file: file['datetime_created'], reverse=True)
return context
class PersonTimelineView(TemplateView):
template_name = "website/items/person_timeline.html"
@staticmethod
def get_timeline_items(person, year=None):
if year:
year = int(year)
submitters = Submitter.objects.filter(person=person, document__date_published__range=[datetime.date(year=year, day=1, month=1), datetime.date(year=year, day=31, month=12)])
else:
submitters = Submitter.objects.filter(person=person)
submitter_ids = list(submitters.values_list('id', flat=True))
timeline_items = []
kamervragen = Kamervraag.objects.filter(document__submitter__in=submitter_ids).select_related('document', 'kamerantwoord')
for kamervraag in kamervragen:
timeline_items.append(TimelineKamervraagItem(kamervraag))
kamerstukken = Kamerstuk.objects.filter(document__submitter__in=submitter_ids).select_related('document')
for kamerstuk in kamerstukken:
timeline_items.append(TimelineKamerstukItem(kamerstuk))
timeline_items = sorted(timeline_items, key=lambda items: items.date, reverse=True)
return timeline_items
def get_context_data(self, slug, year, **kwargs):
year = int(year)
context = super().get_context_data(**kwargs)
person = Person.objects.get(slug=slug)
timeline_items = []
has_next = True
while len(timeline_items) == 0:
timeline_items = PersonTimelineView.get_timeline_items(person, year)
if timeline_items:
break
if year < 1996:
has_next = False
break
year -= 1
if year == datetime.date.today().year:
next_year = None
else:
next_year = year + 1
context['timeline_items'] = timeline_items
context['person'] = person
context['is_person_timeline'] = True
context['previous_year'] = year - 1
context['next_year'] = next_year
context['has_next'] = has_next
return context
def get_person_timeline_html(request):
person = Person.objects.get(id=request.GET['person_id'])
year = int(request.GET['year'])
timeline_items = PersonTimelineView.get_timeline_items(person, year)
if year == datetime.date.today().year:
next_year = None
else:
next_year = year + 1
html = render_to_string('website/items/person_timeline.html', {
'timeline_items': timeline_items,
'person': person,
'is_person_timeline': True,
'previous_year': year-1,
'year': next_year,
'has_next': True
})
response = json.dumps({'html': html})
return HttpResponse(response, content_type='application/json')
|
penzance/hdt_monitor
|
hdt_monitor/settings/gunicorn_config.py
|
Python
|
mit
| 530 | 0.003774 |
# gunicorn configuration
bind
|
= '0.0.0.0:8000'
workers = 3
# These log settings assume that gunicorn log config will be included in the django base.py logging configuration
accesslog = '-'
errorlog = '-'
access_log_format = '{"request": "%(r)s", "http_status_code": "%(s)s", "http_request_url": "%(U)s", "http_query_st
|
ring": "%(q)s", "http_verb": "%(m)s", "http_version": "%(H)s", "http_referer": "%(f)s", "x_forwarded_for": "%({x-forwarded-for}i)s", "remote_address": "%(h)s", "request_usec": "%(D)s", "request_sec": "%(L)s"}'
|
rasathus/pigredients
|
pigredients/ics/lpd6803.py
|
Python
|
mit
| 7,299 | 0.01932 |
import spidev
import time
import random
# All set commands set the state only, and so require a write command to be displayed.
class LPD6803_Chain(object):
def __init__(self, ics_in_chain=25, spi_address_hardware=0, spi_address_output=0):
# default to 25 ics in the chain, so it works with no params with the Adafruit RGB LED Pixels - http://www.adafruit.com/products/738
self.number_of_ics = ics_in_chain
self.spi = spidev.SpiDev()
self.spi.open(spi_address_hardware, spi_address_output)
self.ics = {}
for ic in range(self.number_of_ics):
self.ics[ic] = { 'R' : 0 , 'G' : 0, 'B' : 0}
#Write out the current zero'd state to the chain.
self.write()
def two_byte_pack(self, rgb_dict):
# take in our RGB values in the form of 1 int per component, and transform to 2 bytes in the structure of ...
# 1<5 bits Red><5 bits Green><5 Bits Blue>
return_bytes = bytearray(2)
# creating 16bits to allow us to bitshift values into place.
temp_16bit = 0b0000000000000000
# Set our most significant bit to on.
temp_16bit += 32768
# take 5 most significant bits from each component, by shifting 3 pos to the right. Then shift into their appropriate place.
temp_16bit |= (rgb_dict['R'] >>3) << 10 # Red into bits 11-15
temp_16bit |= (rgb_dict['G'] >>3) << 5 # Green into bits 6-10
temp_16bit |= (rgb_dict['B'] >>3) # Blue into bits 1-5
#return_bytes initialised as zeros, going to mask the bits were interested in, and then bitshift the values to acces the bits we need.
return_bytes[0] = (temp_16bit & 0xFF00) >> 8
return_bytes[1] = (temp_16bit & 0x00FF) >> 0
return return_bytes
def write(self):
# Iterate through our IC states, and write out 2 bytes for each, representing 1<5 bits Red><5 bits Green><5 Bits Blue>
# pre charging our output bytes with 32bit start frame.
byte_list = []
# write out our 32bit start frame
self.spi.xfer2([0,0,0,0])
for ic in self.ics:
byte_pair = self.two_byte_pack(self.ics[ic])
byte_list.append(byte_pair[0])
byte_list.append(byte_pair[1])
self.spi.xfer2(byte_list)
# send out 'append pulse', one for each pixel.
append_pulses = []
for ic in self.ics:
append_pulses.append(0)
self.spi.xfer2(append_pulses)
def set(self):
# Alias of write
return self.write()
def print_ics(self):
print self.ics
def set_ic(self, ic_id, rgb_value=[]):
# Check we've been given a valid rgb_value.
if ic_id > self.number_of_ics -1:
raise Exception("Invalid ic_id : ic_id given is greater than the number number of ics in the chain.")
if len(rgb_value) < 3:
raise Exception("Invalid rgb_value : %s , for pin : %s, please pass a list containing three state values eg. [255,255,255]" % (rgb_value, ic_id))
try:
# Null op to ensure we've been given an integer.
int(ic_id)
self.ics[ic_id]= {'R' : rgb_value[0], 'G' : rgb_value[1], 'B' : rgb_value[2]}
except ValueError:
raise Exception("Pin number is not a valid integer.")
def set_rgb(self, rgb_value):
if len(rgb_value) != 3:
raise Exception("Invalid rgb_value: %s, please pass a list containing three state values eg. [255,255,255]" % rgb_value)
for ic in range(self.number_of_ics):
self.ics[ic] = {'R' : rgb_value[0], 'G' : rgb_value[1], 'B' : rgb_value[2]}
def all_on(self):
# !! NOTE !!
# This does not affect pin state
byte_list = []
# write out our 32bit start frame
self.spi.xfer2([0,0,0,0])
for ic in self.ics:
byte_pair = self.two_byte_pack({'R' : 255, 'G' : 255, '
|
B' : 255})
byte_list.append(byte_pair[0])
byte_list.append(byte_pair[1])
self.spi.xfer2(byte_list)
# send out 'append pulse', one for each pixel.
append_pulses = []
for ic in self.ics:
append_pulses.append(0)
self.spi.xfer2(append_pulses)
def all_off(self):
# !! NOTE !!
# This d
|
oes not affect pin state
byte_list = []
# write out our 32bit start frame
self.spi.xfer2([0,0,0,0])
for ic in self.ics:
byte_pair = self.two_byte_pack({'R' : 0, 'G' : 0, 'B' : 0})
byte_list.append(byte_pair[0])
byte_list.append(byte_pair[1])
self.spi.xfer2(byte_list)
# send out 'append pulse', one for each pixel.
append_pulses = []
for ic in self.ics:
append_pulses.append(0)
self.spi.xfer2(append_pulses)
def set_white(self):
for ic in range(self.number_of_ics):
self.ics[ic] = {'R' : 255, 'G' : 255, 'B' : 255}
def set_red(self):
for ic in range(self.number_of_ics):
self.ics[ic] = {'R' : 255, 'G' : 0, 'B' : 0}
def set_green(self):
for ic in range(self.number_of_ics):
self.ics[ic] = {'R' : 0, 'G' : 255, 'B' : 0}
def set_blue(self):
for ic in range(self.number_of_ics):
self.ics[ic] = {'R' : 0, 'G' : 0, 'B' : 255}
def set_off(self):
for ic in range(self.number_of_ics):
self.ics[ic] = {'R' : 0, 'G' : 0, 'B' : 0}
def all_random(self):
byte_list = []
# write out our 32bit start frame
self.spi.xfer2([0,0,0,0])
for ic in range(self.number_of_ics):
byte_pair = self.two_byte_pack({'R' : random.randint(0,255), 'G' : random.randint(0,255), 'B' : random.randint(0,255)})
byte_list.append(byte_pair[0])
byte_list.append(byte_pair[1])
self.spi.xfer2(byte_list)
# send out 'append pulse', one for each pixel.
append_pulses = []
for ic in self.ics:
append_pulses.append(0)
self.spi.xfer2(append_pulses)
def cycle(self, delay=0.01):
inc_vals = {}
for ic in range(self.number_of_ics):
inc_vals[ic] = {'R' : True, 'G' : True, 'B' : True}
self.ics[ic]['R'] = random.randint(0,255)
self.ics[ic]['G'] = random.randint(0,255)
self.ics[ic]['B'] = random.randint(0,255)
for i in range(512):
for ic in range(self.number_of_ics):
for val in ['R','G','B']:
if self.ics[ic][val] >= 255:
inc_vals[ic] = False
elif self.ics[ic][val] <= 0:
inc_vals[ic] = True
if inc_vals[ic] == True :
self.ics[ic][val] = self.ics[ic][val] + 5
else :
self.ics[ic][val] = self.ics[ic][val] - 5
self.write()
time.sleep(delay)
|
ukhas/habitat
|
habitat/tests/test_parser_modules/test_ukhas_parser.py
|
Python
|
gpl-3.0
| 18,589 | 0.000592 |
# Copyright 2010, 2011, 2013 (C) Adam Greig
#
# This file is part of habitat.
#
# habitat is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# habitat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with habitat. If not, see <http://www.gnu.org/licenses/>.
"""
Test the UKHAS protocol parser.
"""
from nose.tools import assert_raises
from copy import deepcopy
# Mocking the LoadableManager is a heck of a lot of effort. Not worth it.
from ...loadable_manager import LoadableManager
from ...parser import CantParse
from ...parser_modules.ukhas_parser import UKHASParser
# Provide the sensor functions to the parser
fake_sensors_config = {
"loadables": [
{"name": "sensors.base", "class": "habitat.sensors.base"},
{"name": "sensors.stdtelem", "class": "habitat.sensors.stdtelem"}
]
}
class FakeParser:
def __init__(self):
self.loadable_manager = LoadableManager(fake_sensors_config)
# A 'standard' config. Other configs can copy this and change parts.
base_config = {
"protocol": "UKHAS",
"checksum": "crc16-ccitt",
"fields": [
{
"name": "sentence_id",
"sensor": "base.ascii_int"
}, {
"name": "time",
"sensor": "stdtelem.time"
}, {
"name": "latitude",
"sensor": "stdtelem.coordinate",
"format": "dd.dddd"
}, {
"name": "longitude",
"sensor": "stdtelem.coordinate",
"format": "dd.dddd"
}, {
"name": "altitude",
"sensor": "base.ascii_int"
}, {
"name": "speed",
"sensor": "base.ascii_float"
}, {
"name": "custom_string",
"sensor": "base.string"
}
]
}
class TestUKHASParser:
"""UKHAS Parser"""
def setup(self):
self.p = UKHASParser(FakeParser())
def output_append_sentence(self, output, sentence):
"""Helper function to put a sentence in a pre-made output dictionary
for easy comparison with parser results."""
output_copy = deepcopy(output)
output_copy["_sentence"] = sentence
return output_copy
def test_pre_parse_rejects_bad_sentences(self):
# Each of these is a totally invalid stub that should just fail. The
# last one might be valid but has non-hexadecimal checksum characters.
bad_sentences = ["", "\n", "bad\n", "$$bad*\n", "bad*CC\n",
"bad*CCCC\n", "bad,bad,bad,bad\n", "$$bad*GH\n",
"$$bad,bad*GHIJ\n", "$$@invalid@,data*CCCC\n",
"$$good,data,\x01\n", "$$missing,newline*CCCC"]
for sentence in bad_sentences:
assert_raises(CantParse, self.p.pre_parse, sentence)
assert_raises(ValueError, self.p.parse, sentence, base_config)
def test_pre_parse_accepts_good_setences(self):
# Each of these short stubs should pass pre-parsing and return a
# callsign
good_sentences = ["$$good,data\n", "$$good,data*CC\n",
"$$good,data*CCCC\n",
"$$good,lots,of,1234,5678.90,data*CCCC\n"]
for sentence in good_sentences:
assert self.p.pre_parse(sentence) == "good"
def test_pre_parse_rejects_bad_callsigns(self):
bad_callsigns = ["abcdef@123", "ABC\xFA", "$$", "almost good"]
callsign_template = "$${0},data*CC\n"
for callsign in bad_callsigns:
sentence = callsign_template.format(callsign)
assert_raises(CantParse, self.p.pre_parse, sentence)
def test_pre_parse_accepts_good_callsigns(self):
good_callsigns = ["good", "g0_0d", "G0--0D", "abcde/f", "ABCDEF",
"012345", "abcDEF123"]
callsign_template = "$${0},data*CC\n"
for callsign in good_callsigns:
sentence = callsign_template.format(callsign)
assert self.p.pre_parse(sentence) == callsign
def test_pre_parse_rejects_bad_checksums(self):
bad_checksums = ["abcg", "123G", "$$", "*ABC", "defG", "123\xFA"]
checksum_template = "$$good,data*{0}\n"
for checksum in bad_checksums:
sentence = checksum_template.format(checksum)
assert_raises(CantParse, self.p.pre_parse, sentence)
def test_pre_parse_accepts_good_checksums(self):
good_checksums = ["abcd", "ABCD", "abCD", "ab12", "AB12", "aB12", "ab",
"aB", "AB", "a0", "A0"]
checksum_template = "$$good,data*{0}\n"
for checksum in good_checksums:
sentence = checksum_template.format(checksum)
assert self.p.pre_parse(sentence) == "good"
def test_parse_rejects_invalid_configs(self):
# A valid sentence for testing the configs with
sentence = "$$habitat,1,00:00:00,0.0,0.0,0,0.0,hab\n"
# A configuration with no checksum
config_checksum_none = deepcopy(base_config)
config_checksum_none["checksum"] = "none"
# A configuration without a protocol key (should fail)
config_no_protocol = deepcopy(config_checksum_none)
del config_no_protocol["protocol"]
assert_raises(ValueError, self.p.parse, sentence, config_no_protocol)
# A configuration without a checksum key (should fail)
config_no_checksum = deepcopy(config_checksum_none)
del config_no_checksum["checksum"]
assert_raises(ValueError, self.p.parse, sentence, config_no_checksum)
# A configuration without a fields dictionary (should fail)
config_no_fields = deepcopy(config_checksum_none)
del config_no_fields["fields"]
assert_raises(ValueError, self.p.parse, sentence, config_no_fields)
# A configuration with an empty fields dictionary (should fail)
config_empty_fields = deepcopy(config_checksum_none)
config_empty_fields["fields"] = {}
assert_raises(ValueError, self.p.parse, sentence, config_empty_fields)
# A configuration where a field has no name (should fail)
config_field_without_name = deepcopy(config_checksum_none)
del config_field_without_name["fields"][0]["name"]
assert_raises(ValueError, self.p.parse, sentence,
config_field_without_name)
# A configuration where a field has no sensor (should fail)
config_field_without_sensor = deepcopy(config_checksum_none)
del config_field_without_sensor["fields"][0]["sensor"]
assert_raises(ValueError, self.p.parse, sentenc
|
e,
config_field_without_se
|
nsor)
# A configuration where a coordinate field lacks a format (should fail)
config_field_without_format = deepcopy(config_checksum_none)
del config_field_without_format["fields"][2]["format"]
assert_raises(ValueError, self.p.parse, sentence,
config_field_without_format)
# A configuration with an invalid checksum (should fail)
config_checksum_invalid = deepcopy(config_checksum_none)
config_checksum_invalid = "invalid"
assert_raises(ValueError, self.p.parse, sentence,
config_checksum_invalid)
# A configuration with an invalid protocol key (should fail)
config_invalid_protocol = deepcopy(config_checksum_none)
config_invalid_protocol["protocol"] = "invalid"
assert_raises(ValueError, self.p.parse, sentence,
config_invalid_protocol)
# A configuration with an invalid field sensor (should fail)
config_field_sensor_invalid = deepcopy(config_checksum_none)
config_field_sensor_invalid["fields"][0]["sensor"] = "invalid"
assert_raises(ValueError, self.p.pars
|
m00nlight/hackerrank
|
algorithm/Number-Theory/nCr/main.py
|
Python
|
gpl-2.0
| 7,426 | 0.005252 |
from __future__ import division
from operator import add, mul
import cProfile
def memo(f):
cache = {}
def _f(*args):
try:
return cache[args]
except KeyError:
result = cache[args] = f(*args)
return result
except TypeError:
return f(*args)
return _f
def mod(a, b):
"""
Type :: (Int, Int) -> Int
Return modulo of a over b, make sure to return an positive number
when b is great than zero
"""
return (a % b + b) % b
def gcd(a, b):
"""
Type :: (Int, Int) -> Int
Return :: Greatest Common divisor
"""
while b is not 0:
a, b = b, a % b
return a
def exgcd(a, b):
"""
Type :: (Int, Int) -> (Int, Int, Int)
Return :: (g, x, y), g is gcd of a and b and
x * a + y * b = g
"""
if b is 0:
return (a, 1, 0)
else:
|
g, x, y = exgcd(b, a % b)
return (g, y, x - (a // b) * y)
@memo
def modinv(a, m):
"""
Type :: (Int, Int) -> Int
Return :: Return module inverse of a * x = 1 (mod m)
"""
if gcd(a, m) is not 1: raise Exception("Not coprime")
_, x, y = exgcd(a, m)
return (m + x % m) % m
def sieve(m):
"""
Type :: Int ->
|
[Int]
Generate primes number up to m, and return a list
"""
ret, judge = [], [True] * (m + 1)
judge[0] = judge[1] = False
ret.append(2)
for i in xrange(4, m + 1, 2): judge[i] = False
for i in xrange(3, m + 1, 2):
if judge[i]:
ret.append(i)
for j in xrange(i * i, m + 1, i): judge[j] = False
return ret
MAXN = 1000
primes = sieve(MAXN)
primes_set = set(primes)
def factor(n):
"""
Type :: Int -> [(Int, Int)]
Return the factorizatoin result of decompose number n
>>> factor(12)
[(2, 2), (3, 1)]
>>> factor(10007)
[(10007, 1)]
>>> factor(0)
Traceback (most recent call last):
...
Exception: Should be nonzero number
"""
if n is 0: raise Exception("Should be nonzero number")
ret, i = [], 0
while n is not 1 and i < len(primes):
if n % primes[i] == 0:
c = 0
while n % primes[i] == 0:
c += 1
n //= primes[i]
ret.append((primes[i], c))
i += 1
if n is not 1: ret.append((n, 1))
return ret
def euler_phi(n):
"""
Type :: Int -> Int
Calculate the Euler phi result of number n in around log(n) of time
>>> euler_phi(12)
4
>>> euler_phi(17)
16
>>> euler_phi(33)
20
"""
facts = factor(n)
return reduce(lambda acc, x: acc * (x[0] - 1) // x[0], facts, n)
def euler_phi2(n):
"""
Type :: Int -> [Int]
Generate the Euler phi result up to number n, and return the result
as a list
>>> euler_phi2(20) == [0] + [euler_phi(i) for i in range(1, 21)]
True
>>> euler_phi2(100) == [0] + [euler_phi(i) for i in range(1, 101)]
True
>>> euler_phi2(1000) == [0] + [euler_phi(i) for i in range(1, 1001)]
True
"""
ret = [i for i in range(n + 1)]
for i in range(2, n + 1):
if ret[i] == i:
for j in range(i, n + 1, i): ret[j] = ret[j] // i * (i - 1)
return ret
def gen_fact_mod_prime(p):
"""
Type :: Int -> [Int]
Generate the fact of i(mod p) for 1 <= i < p, p should be a prime number
>>> gen_fact_mod_prime(3)
[1, 1, 2]
>>> gen_fact_mod_prime(7)
[1, 1, 2, 6, 3, 1, 6]
"""
ret = [1] * p
for i in range(2, p): ret[i] = ret[i - 1] * i % p
return ret
def fact_mod(n, p, facts):
"""
Type :: (Int, Int, [Int]) -> (Int, Int)
Suppose n! = a * p^e (mod p), then the function return (a mod p, e)
facts is i!(mod p) for 0 <= i < p, use Lucas theory
>>> facts = gen_fact_mod_prime(7)
>>> fact_mod(5, 7, facts)
(1, 0)
>>> fact_mod(15, 7, facts)
(2, 2)
"""
if (n == 0): return (1, 0)
(a, e) = fact_mod(n // p, p, facts)
e += n // p
if (n // p % 2 != 0): return (a * (p - facts[n % p]) % p, e)
return (a * facts[n % p] % p, e)
def comb_mod(n, k, p):
"""
Type :: (Int, Int, Int) -> Int
Return C(n, k) mod p, p is a prime number.
>>> comb_mod(5, 3, 7)
3
>>> comb_mod(6, 2, 7)
1
"""
if n < 0 or k < 0 or n < k: return 0
facts = gen_fact_mod_prime(p)
a1, e1 = fact_mod(n, p, facts)
a2, e2 = fact_mod(k, p, facts)
a3, e3 = fact_mod(n - k, p, facts)
if (e1 > e2 + e3):
return 0
else:
return a1 * modinv(a2 * a3 % p, p) % p
def chinese_remainder_theory_for2(x, a, y, b):
"""
Type :: (Int, Int, Int, Int) -> Int
Return z for z = a (mod x) and z = b (mod y). Here z is unique modulo
M = lcm(x, y), return (z, M). On failure return, M = -1
"""
g, s, t = exgcd(x, y)
if (a % g) != (b % g):
return (0, -1)
else:
return (mod(s * b * x + t * a * y, x * y) // g, x * y // g)
def chinese_remainder_theory(xs, ass):
"""
Type :: ([Int], [Int]) -> Int
Return : z that z[i] = a[i] (mod xs[i]) for 0 <= i < n
Require : Require a[i] to be relative coprime to each other
>>> chinese_remainder_theory([3, 5, 7], [2,3,2])
(23, 105)
"""
ret = (ass[0], xs[0])
for i in xrange(1, len(xs)):
ret = chinese_remainder_theory_for2(ret[1], ret[0], xs[i], ass[i])
if ret[1] == -1: break
return ret
def comb_mod2(n, r, m, pa, facts1):
"""
Type :: (Int, Int, Int) -> Int
m is of form p^a, and n is very large
"""
p, a = pa
def n_fact_fact(n):
if n is 0 or n is 1:
return 1
elif n < m:
return facts1[n] * n_fact_fact(n // p) % m
else:
a = facts1[m - 1]
b = facts1[n % m]
c = n_fact_fact(n // p)
# print 'n = %d a = %d b = %d c = %d' % (n, a, b, c)
return pow(a, n // m, m) * b * c % m
def get_power(n, p):
ret = 0
while n > 0:
ret += n // p
n //= p
return ret
b = get_power(n, p) - get_power(r, p) - get_power(n - r, p)
if b >= a: return 0
m1 = n_fact_fact(n)
m2 = n_fact_fact(r)
m3 = n_fact_fact(n - r)
return (p ** b) * m1 * modinv_table[(m2, m)] * modinv_table[(m3, m)] % m
def solve(n, r, fs1, fs2, xss):
xs = [27, 11, 13, 37]
ass = [comb_mod2(n, r, xs[i], xss[i], fs1[i]) for i in xrange(4)]
return chinese_remainder_theory(xs, ass)
def init(xs):
ret1, ret2 = [], []
for i in xrange(len(xs)):
p, a = xs[i]
m = p ** a
t1, t2 = [1],[1]
for i in xrange(1, p):
t2.append(t2[-1] * i % p)
for i in xrange(1, m):
if gcd(i, m) == 1:
t1.append(t1[-1] * i % m)
else:
t1.append(t1[-1])
ret1.append(t1)
ret2.append(t2)
return ret1, ret2
modinv_table = {}
modulo = 142857
for x in {27, 11, 13, 37}:
for y in xrange(1, x):
if gcd(x, y) == 1:
modinv_table[(y, x)] = modinv(y, x)
def main():
n = int(raw_input())
xss = [(3,3), (11,1), (13, 1), (37, 1)]
facts1, facts2 = init(xss)
for _ in xrange(n):
n, r = map(int, raw_input().strip().split())
print solve(n, r, facts1, facts2, xss)[0]
if __name__ == '__main__':
main()
# cProfile.run('main()')
|
cwolferh/heat-scratch
|
heat/engine/resources/openstack/neutron/lbaas/listener.py
|
Python
|
apache-2.0
| 7,961 | 0 |
#
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Listener(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Listeners.
This resource creates and manages Neutron LBaaS v2 Listeners,
which represent a listening endpoint for the vip.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
PROTOCOL_PORT, PROTOCOL, LOADBALANCER, NAME,
ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
) = (
'protocol_port', 'protocol', 'loadbalancer', 'name',
'admin_state_up', 'description', 'default_tls_container_ref',
'sni_container_refs', 'connection_limit', 'tenant_id'
)
PROTOCOLS = (
TCP, HTTP, HTTPS, TERMINATED_HTTPS,
) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS',
)
ATTRIBUTES = (
LOADBALANCERS_ATTR, DEFAULT_POOL_ID_ATTR
) = (
'loadbalancers', 'default_pool_id'
)
properties_schema = {
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP or UDP port on which to listen for client traffic.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol on which to listen for the client traffic.'),
required=True,
constraints=[
constraints.AllowedValues(PROTOCOLS),
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('ID or name of the load balancer with which listener '
'is associated.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.lbaas.loadbalancer')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this listener.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this listener.'),
update_allowed=True,
default=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this listener.'),
update_allowed=True,
default=''
),
DEFAULT_TLS_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('Default TLS container reference to retrieve TLS '
'information.'),
update_allowed=True
),
SNI_CONTAINER_REFS: properties.Schema(
properties.Schema.LIST,
_('List of TLS container references for SNI.'),
update_allowed=True
),
|
CONNECTION_LIMIT: properties.Schema(
|
properties.Schema.INTEGER,
_('The maximum number of connections permitted for this '
'load balancer. Defaults to -1, which is infinite.'),
update_allowed=True,
default=-1,
constraints=[
constraints.Range(min=-1),
]
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the listener.')
),
}
attributes_schema = {
LOADBALANCERS_ATTR: attributes.Schema(
_('ID of the load balancer this listener is associated to.'),
type=attributes.Schema.LIST
),
DEFAULT_POOL_ID_ATTR: attributes.Schema(
_('ID of the default pool this listener is associated to.'),
type=attributes.Schema.STRING
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='find_resourceid_by_name_or_id',
entity='loadbalancer'
),
]
def validate(self):
res = super(Listener, self).validate()
if res:
return res
if self.properties[self.PROTOCOL] == self.TERMINATED_HTTPS:
if self.properties[self.DEFAULT_TLS_CONTAINER_REF] is None:
msg = (_('Property %(ref)s required when protocol is '
'%(term)s.') % {'ref': self.DEFAULT_TLS_CONTAINER_REF,
'term': self.TERMINATED_HTTPS})
raise exception.StackValidationFailed(message=msg)
def _check_lb_status(self):
lb_id = self.properties[self.LOADBALANCER]
return self.client_plugin().check_lb_status(lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
properties['loadbalancer_id'] = properties.pop(self.LOADBALANCER)
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
listener = self.client().create_listener(
{'listener': properties})['listener']
self.resource_id_set(listener['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
return self.client().show_listener(
self.resource_id)['listener']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_listener(self.resource_id,
{'listener': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_listener(self.resource_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::Listener': Listener,
}
|
cossacklabs/acra
|
tests/test.py
|
Python
|
apache-2.0
| 375,193 | 0.003038 |
# Copyright 2016, Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a
|
copy of the License at
#
# http:/
|
/www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import asyncio
import collections
import collections.abc
import contextlib
import http
import json
import logging
import os
import os.path
import random
import re
import shutil
import signal
import socket
import ssl
import stat
import subprocess
import tempfile
import traceback
import unittest
from base64 import b64decode, b64encode
from distutils.dir_util import copy_tree
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
from urllib.request import urlopen
import asyncpg
import grpc
import mysql.connector
import psycopg2
import psycopg2.errors
import psycopg2.extras
import pymysql
import redis
import requests
import semver
import sqlalchemy as sa
import sys
import time
import yaml
from ddt import ddt, data
from hvac import Client
from prometheus_client.parser import text_string_to_metric_families
from sqlalchemy.dialects import mysql as mysql_dialect
from sqlalchemy.dialects import postgresql as postgresql_dialect
from sqlalchemy.dialects.postgresql import BYTEA
from sqlalchemy.exc import DatabaseError
import api_pb2
import api_pb2_grpc
import utils
from random_utils import random_bytes, random_email, random_int32, random_int64, random_str
from utils import (read_storage_public_key, read_storage_private_key,
read_zone_public_key, read_zone_private_key,
read_poison_public_key, read_poison_private_key,
destroy_server_storage_key,
decrypt_acrastruct, deserialize_and_decrypt_acrastruct,
load_random_data_config, get_random_data_files,
clean_test_data, safe_string, prepare_encryptor_config,
get_encryptor_config, abs_path, get_test_encryptor_config, send_signal_by_process_name,
load_yaml_config, dump_yaml_config, BINARY_OUTPUT_FOLDER)
# add to path our wrapper until not published to PYPI
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'wrappers/python'))
from acrawriter import create_acrastruct
# log python logs with time format as in golang
format = u"%(asctime)s - %(message)s"
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(logging.Formatter(fmt=format, datefmt="%Y-%m-%dT%H:%M:%S%z"))
handler.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
DB_HOST = os.environ.get('TEST_DB_HOST', 'localhost')
DB_NAME = os.environ.get('TEST_DB_NAME', 'postgres')
DB_PORT = os.environ.get('TEST_DB_PORT', 5432)
TEST_TLS_CA = abs_path(os.environ.get('TEST_TLS_CA', 'tests/ssl/ca/ca.crt'))
TEST_TLS_SERVER_CERT = abs_path(os.environ.get('TEST_TLS_SERVER_CERT', os.path.join(os.path.dirname(__file__), 'ssl/acra-server/acra-server.crt')))
TEST_TLS_SERVER_KEY = abs_path(os.environ.get('TEST_TLS_SERVER_KEY', os.path.join(os.path.dirname(__file__), 'ssl/acra-server/acra-server.key')))
# keys copied to tests/* with modified rights to 0400 because keys in docker/ssl/ has access from groups/other but some
# db drivers prevent usage of keys with global rights
TEST_TLS_CLIENT_CERT = abs_path(os.environ.get('TEST_TLS_CLIENT_CERT', os.path.join(os.path.dirname(__file__), 'ssl/acra-writer/acra-writer.crt')))
TEST_TLS_CLIENT_KEY = abs_path(os.environ.get('TEST_TLS_CLIENT_KEY', os.path.join(os.path.dirname(__file__), 'ssl/acra-writer/acra-writer.key')))
TEST_TLS_CLIENT_2_CERT = abs_path(os.environ.get('TEST_TLS_CLIENT_2_CERT', os.path.join(os.path.dirname(__file__), 'ssl/acra-writer-2/acra-writer-2.crt')))
TEST_TLS_CLIENT_2_KEY = abs_path(os.environ.get('TEST_TLS_CLIENT_2_KEY', os.path.join(os.path.dirname(__file__), 'ssl/acra-writer-2/acra-writer-2.key')))
TEST_TLS_OCSP_CA = abs_path(os.environ.get('TEST_TLS_OCSP_CA', os.path.join(os.path.dirname(__file__), 'ssl/ca/ca.crt')))
TEST_TLS_OCSP_CERT = abs_path(os.environ.get('TEST_TLS_OCSP_CERT', os.path.join(os.path.dirname(__file__), 'ssl/ocsp-responder/ocsp-responder.crt')))
TEST_TLS_OCSP_KEY = abs_path(os.environ.get('TEST_TLS_OCSP_KEY', os.path.join(os.path.dirname(__file__), 'ssl/ocsp-responder/ocsp-responder.key')))
TEST_TLS_OCSP_INDEX = abs_path(os.environ.get('TEST_TLS_OCSP_INDEX', os.path.join(os.path.dirname(__file__), 'ssl/ca/index.txt')))
TEST_TLS_CRL_PATH = abs_path(os.environ.get('TEST_TLS_CRL_PATH', os.path.join(os.path.dirname(__file__), 'ssl/ca')))
TEST_WITH_TLS = os.environ.get('TEST_TLS', 'off').lower() == 'on'
OCSP_SERVER_PORT = int(os.environ.get('TEST_OCSP_SERVER_PORT', 8888))
CRL_HTTP_SERVER_PORT = int(os.environ.get('TEST_HTTP_SERVER_PORT', 8889))
TEST_WITH_TRACING = os.environ.get('TEST_TRACE', 'off').lower() == 'on'
TEST_WITH_REDIS = os.environ.get('TEST_REDIS', 'off').lower() == 'on'
TEST_TRACE_TO_JAEGER = os.environ.get('TEST_TRACE_JAEGER', 'off').lower() == 'on'
TEST_RANDOM_DATA_CONFIG = load_random_data_config()
TEST_RANDOM_DATA_FILES = get_random_data_files()
NoClientCert, RequestClientCert, RequireAnyClientCert, VerifyClientCertIfGiven, RequireAndVerifyClientCert = range(5)
if TEST_WITH_TLS:
ACRA_TLS_AUTH = RequireAndVerifyClientCert # verify if provided https://golang.org/pkg/crypto/tls/#ClientAuthType
else:
ACRA_TLS_AUTH = VerifyClientCertIfGiven
# 200 is overhead of encryption (chosen manually)
# multiply 2 because tested acrastruct in acrastruct
COLUMN_DATA_SIZE = (TEST_RANDOM_DATA_CONFIG['data_max_size'] + 200) * 2
metadata = sa.MetaData()
test_table = sa.Table('test', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('data', sa.LargeBinary(length=COLUMN_DATA_SIZE)),
sa.Column('raw_data', sa.Text),
sa.Column('nullable_column', sa.Text, nullable=True),
sa.Column('empty', sa.LargeBinary(length=COLUMN_DATA_SIZE), nullable=False, default=b''),
)
acrarollback_output_table = sa.Table('acrarollback_output', metadata,
sa.Column('data', sa.LargeBinary),
)
# keys of json objects that return acra-addzone tool
ZONE_ID = 'id'
ZONE_PUBLIC_KEY = 'public_key'
zones = []
poison_record = None
poison_record_acrablock = None
master_key = None
KEYS_FOLDER = None
ACRA_MASTER_KEY_VAR_NAME = 'ACRA_MASTER_KEY'
MASTER_KEY_PATH = '/tmp/acra-test-master.key'
TEST_WITH_VAULT = os.environ.get('TEST_WITH_VAULT', 'off').lower() == 'on'
TEST_SSL_VAULT = os.environ.get('TEST_SSL_VAULT', 'off').lower() == 'on'
TEST_VAULT_TLS_CA = abs_path(os.environ.get('TEST_VAULT_TLS_CA', 'tests/ssl/ca/ca.crt'))
VAULT_KV_ENGINE_VERSION=os.environ.get('VAULT_KV_ENGINE_VERSION', 'v1')
CRYPTO_ENVELOPE_HEADER = b'%%%'
# TLS_CERT_CLIENT_* represent two different ClientIDs are used in tests, initialized in setupModule function
TLS_CERT_CLIENT_ID_1 = None
TLS_CERT_CLIENT_ID_2 = None
TLS_CLIENT_ID_SOURCE_DN = 'distinguished_name'
TLS_CLIENT_ID_SOURCE_SERIAL = 'serial_number'
POISON_KEY_PATH = '.poison_key/poison_key'
STATEMENT_TIMEOUT = 5 * 1000 # 5 sec
SETUP_SQL_COMMAND_TIMEOUT = 0.1
# how long wait forked process to respond
FORK_TIMEOUT = 2
# seconds for sleep call after failed polling forked process
FORK_FAIL_SLEEP = 0.1
CONNECTION_FAIL_SLEEP = 0.1
SOCKET_CONNECT_TIMEOUT = 3
KILL_WAIT_TIMEOUT = 2
CONNECT_TRY_COUNT = 3
SQL_EXECUTE_TRY_COUNT = 5
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
# use only for requests.* methods
REQUEST_TIMEOUT = (5, 5) # connect_timeout, read_timeout
PG_UNIX_HOST = '/tmp'
DB_USER = os.environ.get('TEST_DB_USER', 'postgres')
DB_USER_PASSWORD = os.environ.get('TEST_DB_USER_PASSWORD', 'postgres')
SSLMODE = os.environ.get('TEST_SSL_MODE', 'require' if TEST_WITH_TLS else 'disable')
TEST_M
|
vagonbar/GNUnetwork
|
gwn/blocks/libio/gnuradio/new/gwnChannelqpsk.py
|
Python
|
gpl-3.0
| 4,398 | 0.007731 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of GNUWiNetwork,
# Copyright (C) 2014 by
# Pablo Belzarena, Gabriel Gomez Sena, Victor Gonzalez Barbone,
# Facultad de Ingenieria, Universidad de la Republica, Uruguay.
#
# GNUWiNetwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GNUWiNetwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNUWiNetwork. If not, see <http://www.gnu.org/licenses/>.
#
'''PSK modulation transmit / receive block.
'''
import sys
sys.path +=['..']
from gnuradio import digital
import gwnevents.api_events as api_events
import gwnTxRxL1_channel as TxRxLayer1
import gwnblocks.gwnblock as gwn
import math
class ChannelQPSK(gwn.GWNBlock):
'''PSK modulation block.
'''
def __init__(self,noise_voltage=0.01,frequency_offset=0.01,epsilon=1.001,taps=(1+0.5j, ),):
'''Constructor.
'''
super(ChannelQPSK,self).__init__(1, 'GNURadioChannelQPSK', 2, 2,1)
#super(TunTapInterface, self).__init__(1,'TunTapInterface', 2, 2)
self.set_timer(0, False, 3, 10)
self.rx_conn = gwn.gwninport.AQueueConnector()
self.tx_conn = gwn.gwninport.AQueueConnector()
self.rx_queue = self.rx_conn.lsevents
self.tx_queue = self.tx_conn.lsevents
self.set_connection_in(self.rx_conn, 1)
self.set_connection_out(self.tx_conn, 1)
self.tb = TxRxLayer1.gwn_sim_top_block(self.rx_queue,self.tx_queue,noise_voltage,frequency_offset,epsilon,taps,)
self.tb.start() # start flow graph
return
def process_data(self, port_type, port_nr, ev):
'''Process data function for PSK block.
'''
# print " ------------------------------------"
# print ev
# print port_type,port_nr
# print "-------------------------------------"
# PRUEBA: EL TIMER ESTA PUESTO SOLO PARA PROBAR EL SENSADO
if port_type == "intimer":
self.sense_carrier()
if port_type == 'inport' and port_nr == 0:
frame = ev.frmpkt
self.write_out(1, frame) # 1, to GNU radio
elif port_type == 'inport' and port_nr == 1:
frame = ev # ev is a frame received
if not frame:
print 'PSK: an empty frame from L1'
else:
event = api_events.mkevent("DataData")
event.frmpkt = frame
self.write_out(0, event)
return
def set_rx_freq(self, value):
'''Set receive frequency.
'''
self.tb_rx.set_freq(value)
def set_tx_freq(self, value):
'''Set transmit frequency.
'''
self.tb.set_freq(value)
def sense_carrier(self):
'''Sense carrier function.
'''
print " channel dbs sensed : "
aux = self.tb.hier_rx_0.analog_probe_avg_mag_sqrd_x_0.level()
if aux >0:
print 10*math.log10(aux)
def stop(self):
'''PSK block stop function.
T
|
his stop function is required to stop GNU Radio threads. Overwrites generic block stop function; first stops locally started threads, waits on them, and finally invokes the generic stop function in PSK super class (generic block).
'''
self.tb_tx.stop()
self.tb_tx.wait()
print("tx top block stopped")
self.tb_rx.stop() # wait for it to finish
self.tb_rx.wait() # wait for it to f
|
inish
print("rx top block stopped")
super(ChannelQPSK, self).stop()
class dotdict(dict):
'''dot.notation access to dictionary attributes.
'''
def __getattr__(self, attr):
return self.get(attr)
__setattr__= dict.__setitem__
__delattr__= dict.__delitem__
def main():
g = PSK()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
admiyo/keystone
|
keystone/routers/admin.py
|
Python
|
apache-2.0
| 6,755 | 0.009326 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import
|
logging
import routes
from keystone.common import wsgi
from keystone.controllers.token import TokenController
from keystone.controllers.roles import RolesController
from keystone.controllers.staticfiles import StaticFilesController
from keystone.controllers.tenant import TenantController
from keystone.controllers.user import UserController
from keystone.controllers.version import VersionController
from keystone.controllers.extensions import ExtensionsController
import keystone.contrib.extensions.admin as extension
logger = logging.getLogger(__name__) # pylint: disable=C0103
class AdminApi(wsgi.Router):
"""WSGI entry point for admin Keystone API requests."""
def __init__(self):
mapper = routes.Mapper()
# Load extensions first so they can override core if they need to
extension.get_extension_configurer().configure(mapper)
# Token Operations
auth_controller = TokenController()
mapper.connect("/tokens", controller=auth_controller,
action="authenticate",
conditions=dict(method=["POST"]))
mapper.connect("/tokens/{token_id}", controller=auth_controller,
action="validate_token",
conditions=dict(method=["GET"]))
mapper.connect("/tokens/{token_id}", controller=auth_controller,
action="check_token",
conditions=dict(method=["HEAD"]))
# Do we need this. API doesn't have delete token.
mapper.connect("/tokens/{token_id}", controller=auth_controller,
action="delete_token",
conditions=dict(method=["DELETE"]))
mapper.connect("/tokens/{token_id}/endpoints",
controller=auth_controller,
action="endpoints",
conditions=dict(method=["GET"]))
# Tenant Operations
tenant_controller = TenantController()
mapper.connect("/tenants", controller=tenant_controller,
action="get_tenants", conditions=dict(method=["GET"]))
mapper.connect("/tenants/{tenant_id}",
controller=tenant_controller,
action="get_tenant", conditions=dict(method=["GET"]))
roles_controller = RolesController()
mapper.connect("/tenants/{tenant_id}/users/{user_id}/roles",
controller=roles_controller, action="get_user_roles",
conditions=dict(method=["GET"]))
# User Operations
user_controller = UserController()
mapper.connect("/users/{user_id}",
controller=user_controller,
action="get_user",
conditions=dict(method=["GET"]))
mapper.connect("/users/{user_id}/roles",
controller=roles_controller, action="get_user_roles",
conditions=dict(method=["GET"]))
# Miscellaneous Operations
version_controller = VersionController()
mapper.connect("/", controller=version_controller,
action="get_version_info", file="admin/version",
conditions=dict(method=["GET"]))
extensions_controller = ExtensionsController()
mapper.connect("/extensions",
controller=extensions_controller,
action="get_extensions_info",
conditions=dict(method=["GET"]))
# Static Files Controller
static_files_controller = StaticFilesController()
mapper.connect("/identityadminguide.pdf",
controller=static_files_controller,
action="get_pdf_contract",
root="content/admin/", pdf="identityadminguide.pdf",
conditions=dict(method=["GET"]))
mapper.connect("/identity-admin.wadl",
controller=static_files_controller,
action="get_wadl_contract",
root="content/admin/", wadl="identity-admin.wadl",
conditions=dict(method=["GET"]))
mapper.connect("/common.ent",
controller=static_files_controller,
action="get_wadl_contract",
root="content/common/", wadl="common.ent",
conditions=dict(method=["GET"]))
mapper.connect("/xsd/{xsd}",
controller=static_files_controller,
action="get_xsd_contract",
root="content/common/",
conditions=dict(method=["GET"]))
mapper.connect("/xsd/atom/{xsd}",
controller=static_files_controller,
action="get_xsd_atom_contract",
root="content/common/",
conditions=dict(method=["GET"]))
mapper.connect("/xslt/{file:.*}",
controller=static_files_controller,
action="get_static_file",
root="content/common/", path="xslt/",
mimetype="application/xml",
conditions=dict(method=["GET"]))
mapper.connect("/js/{file:.*}",
controller=static_files_controller,
action="get_static_file",
root="content/common/", path="js/",
mimetype="application/javascript",
conditions=dict(method=["GET"]))
mapper.connect("/style/{file:.*}",
controller=static_files_controller,
action="get_static_file",
root="content/common/", path="style/",
mimetype="application/css",
conditions=dict(method=["GET"]))
mapper.connect("/samples/{file:.*}",
controller=static_files_controller,
action="get_static_file",
root="content/common/", path="samples/",
conditions=dict(method=["GET"]))
super(AdminApi, self).__init__(mapper)
|
ethanaward/mycroft-core
|
mycroft/skills/naptime/__init__.py
|
Python
|
gpl-3.0
| 1,477 | 0 |
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core
|
is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distribu
|
ted in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from os.path import dirname
from adapt.intent import IntentBuilder
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill
__author__ = 'seanfitz'
class NapTimeSkill(MycroftSkill):
def __init__(self):
super(NapTimeSkill, self).__init__(name="NapTimeSkill")
def initialize(self):
self.load_data_files(dirname(__file__))
naptime_intent = IntentBuilder("NapTimeIntent").require(
"SleepCommand").build()
self.register_intent(naptime_intent, self.handle_intent)
def handle_intent(self, message):
self.emitter.emit(Message('recognizer_loop:sleep'))
self.speak_dialog("sleep")
def stop(self):
pass
def create_skill():
return NapTimeSkill()
|
alexgorban/models
|
research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_test.py
|
Python
|
apache-2.0
| 9,782 | 0.002249 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 FPN feature extractors in SSD.
"""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor
from object_detection.models import ssd_mobilenet_v1_fpn_keras_feature_extractor
slim = contrib_slim
@parameterized.parameters(
{'use_keras': False},
{'use_keras': True},
)
class SsdMobilenetV1FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
if use_keras:
return (ssd_mobilenet_v1_fpn_keras_feature_extractor.
SSDMobileNetV1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
use_depthwise=True,
name='MobilenetV1_FPN'))
else:
return (ssd_mobilenet_v1_fpn_feature_extractor.
SSDMobileNetV1FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_depthwise=True,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self, use_keras):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_384(self, use_keras):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_with_dynamic_image_shape(self, use_keras):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_keras):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4,
|
4, 32),
|
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_keras):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=use_keras)
def test_preprocess_returns_correct_value_range(self, use_keras):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
|
capturePointer/or-tools
|
examples/tests/issue4.py
|
Python
|
apache-2.0
| 1,031 | 0.022308 |
from constraint_solver
|
import pywrapcp
def main():
solver = pywrapcp.Solver("time limit test")
n = 10
x = [solver.IntVar(1, n, "x[%i]" % i) for i in range(n)]
solver.Add(solver.AllDifferent(x, True))
solution = solver.Assignment()
solution.Add(x)
db = solver.Phase(x,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
time_limit = 10000
branch_limit = 100000000
failures_limit = 100000000
solutions_limit = 10000000
limi
|
ts = (
solver.Limit(
time_limit, branch_limit, failures_limit, solutions_limit, True))
search_log = solver.SearchLog(1000)
solver.NewSearch(db, [limits, search_log])
num_solutions = 0
while solver.NextSolution():
print "x:", [x[i].Value() for i in range(n)]
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.failures()
print "branches:", solver.branches()
print "wall_time:", solver.wall_time()
if __name__ == "__main__":
main()
|
mheap/ansible
|
lib/ansible/modules/cloud/openstack/os_flavor_facts.py
|
Python
|
gpl-3.0
| 6,809 | 0.000587 |
#!/usr/bin/python
# Copyright (c) 2015 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_flavor_facts
short_description: Retrieve facts about one or more flavors
author: "David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Retrieve facts about available OpenStack instance flavors. By default,
facts about ALL flavors are retrieved. Filters can be applied to get
facts for only matching flavors. For example, you can filter on the
amount of RAM available to the flavor, or the number of virtual CPUs
available to the flavor, or both. When specifying multiple filters,
*ALL* filters must match on a flavor before that flavor is returned as
a fact.
notes:
- This module creates a new top-level C(openstack_flavors) fact, which
contains a list of unsorted flavors.
requirements:
- "python >= 2.6"
- "openstacksdk"
options:
name:
description:
- A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
ram:
description:
- "A string used for filtering flavors based on the amount of RAM
(in MB) desired. This string accepts the following special values:
'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
(return flavors with the maximum amount of RAM)."
- "A specific amount of RAM may also be specified. Any flavors with this
exact amount of RAM will be returned."
- "A range of acceptable RAM may be given using a special syntax. Simply
prefix the amount of RAM with one of these acceptable range values:
'<', '>', '<=', '>='. These values represent less than, greater than,
less than or equal to, and greater than or equal to, respectively."
type: bool
default: 'no'
vcpus:
description:
- A string used for filtering flavors based on the number of virtual
CPUs desired. Format is the same as the I(ram) parameter.
type: bool
default: 'no'
limit:
description:
- Limits the number of flavors returned. All matching flavors are
returned by default.
ephemeral:
description:
- A string used for filtering flavors based on the amount of ephemeral
storage. Format is the same as the I(ram) parameter
type: bool
default: 'no'
version_added: "2.3"
availability_zone:
description:
- Ignored. Present for backwards compatibility
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all available flavors
- os_flavor_facts:
cloud: mycloud
# Gather facts for the flavor named "xlarge-flavor"
- os_flavor_facts:
cloud: mycloud
name: "xlarge-flavor"
# Get all flavors that have exactly 512 MB of RAM.
- os_flavor_facts:
cloud: mycloud
ram: "512"
# Get all flavors that have 1024 MB or more of RAM.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
# option will guarantee only a single flavor is returned.
- os_flavor_facts:
cloud: mycloud
ram: "MIN"
limit: 1
# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
# less than 30gb of
|
ephemeral storage.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
ephemeral: "<30"
'''
RETURN = '''
openstack_flavors:
description: Dictionary describing the flavors.
returned: On success.
type: complex
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
|
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
ram=dict(required=False, default=None),
vcpus=dict(required=False, default=None),
limit=dict(required=False, default=None, type='int'),
ephemeral=dict(required=False, default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'ram'],
['name', 'vcpus'],
['name', 'ephemeral']
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
name = module.params['name']
vcpus = module.params['vcpus']
ram = module.params['ram']
ephemeral = module.params['ephemeral']
limit = module.params['limit']
filters = {}
if vcpus:
filters['vcpus'] = vcpus
if ram:
filters['ram'] = ram
if ephemeral:
filters['ephemeral'] = ephemeral
sdk, cloud = openstack_cloud_from_module(module)
try:
if name:
flavors = cloud.search_flavors(filters={'name': name})
else:
flavors = cloud.list_flavors()
if filters:
flavors = cloud.range_search(flavors, filters)
if limit is not None:
flavors = flavors[:limit]
module.exit_json(changed=False,
ansible_facts=dict(openstack_flavors=flavors))
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
mflu/openvstorage_centos
|
openstack/tests/__init__.py
|
Python
|
apache-2.0
| 694 | 0 |
# Cop
|
yright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eit
|
her express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains the unit tests for OVS Cinder Plugin for OpenStack
Tested on Plugin version 1.0.2a
"""
|
vsajip/django
|
django/views/i18n.py
|
Python
|
bsd-3-clause
| 9,782 | 0.002556 |
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.utils import importlib
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils.text import javascript_quote
from django.utils.encoding import smart_unicode
from django.utils.formats import get_format_modules, get_format
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
src = []
for k, v in result.items():
if six.PY3:
k = k.encode('ascii')
if isinstance(v, six.string_types + (int,)):
src.append("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_unicode(v))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_unicode(value)) for value in v]
src.append("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v)))
return ''.join(src)
NullSource = """
/* gettext identity library */
function gettext(msgid) { return msgid; }
function ngettext(singular, plural, count) { return (count == 1) ? singular : plural; }
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) { return msgid; }
function npgettext(context, singular, plural, count) { return (count == 1) ? singular : plural; }
"""
LibHead = """
/* gettext library */
var catalog = new Array();
"""
LibFoot = """
function gettext(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
}
function ngettext(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
}
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) {
var value = gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
}
function npgettext(context, singular, plural, count) {
var value = ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
}
"""
LibFormatHead = """
/* formatting library */
var formats = new Array();
"""
LibFormatFoot = """
function get_format(format_type) {
var value = formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
}
"""
SimplePlural = """
function pluralidx(count) { return (count == 1) ? 0 : 1; }
"""
InterPolate = r"""
function interpolate(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
}
"""
PluralIdx = r"""
function pluralidx(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
}
"""
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot]
return http.HttpResponse(''.join(src), 'text/javascript')
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOEr
|
ror:
catalog = None
if catalog is not
|
None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
|
leifos/boxes
|
treasure-houses/asg/admin.py
|
Python
|
mit
| 178 | 0 |
__author
|
__ = 'leif'
from django.contrib import admin
from models import *
admin.site.register(GameExperiment)
admin.site.register(UserProfile)
admin.si
|
te.register(MaxHighScore)
|
rrustia/code-katas
|
src/test_is_thue_morse.py
|
Python
|
mit
| 318 | 0 |
"""Test
|
."""
import pytest
TM_TABLE = [
([0, 1, 1, 0, 1], True),
([0], True),
([1], False),
([0, 1, 0, 0], False),
]
@pytest.mark.parametrize("n, result", TM_TABLE)
def test_is_thue_morse(n, result):
"""Test."""
from is_thue_morse import is
|
_thue_morse
assert is_thue_morse(n) == result
|
astaninger/speakout
|
venv/lib/python3.6/site-packages/setuptools/command/upload.py
|
Python
|
mit
| 1,493 | 0 |
import getpass
from distutils import log
from distutils.command import upload as orig
class upload(orig.upload):
"""
Override default upload behavior to obtain password
in a variety of different ways.
"""
def run(self):
try:
orig.upload.run(self)
finally:
self.announce(
"WARNING: Uploading via this command is deprecated, use twine "
"to upload instead (https://pypi.org/p/twine/)",
log.WARN
)
def finalize_options(self):
orig.upload.finalize_options(self)
self.username = (
self.username or
getpass.getuser()
)
# Attempt to obtain password. Short circuit evaluation at the first
# sign of success.
self.password = (
self.password or
self._load_password_from_keyring() or
self._prompt_for_password()
)
def _load_password_from_keyring(self):
"""
|
Attempt to load password from keyring. Suppress Exceptions.
|
"""
try:
keyring = __import__('keyring')
return keyring.get_password(self.repository, self.username)
except Exception:
pass
def _prompt_for_password(self):
"""
Prompt for a password on the tty. Suppress Exceptions.
"""
try:
return getpass.getpass()
except (Exception, KeyboardInterrupt):
pass
|
varunarya10/ironic
|
ironic/drivers/fake.py
|
Python
|
apache-2.0
| 2,806 | 0 |
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake drivers used in testing.
"""
from ironic.common import exception
from ironic.drivers import base
from ironic.drivers.modules import fake
from ironic.drivers.modules import ipminative
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules import pxe
from ironic.drivers.modules import seamicro
from ironic.drivers.modules import ssh
from ironic.drivers import utils
from ironic.openst
|
ack.common import import
|
utils
class FakeDriver(base.BaseDriver):
"""Example implementation of a Driver."""
def __init__(self):
self.power = fake.FakePower()
self.deploy = fake.FakeDeploy()
self.a = fake.FakeVendorA()
self.b = fake.FakeVendorB()
self.mapping = {'first_method': self.a,
'second_method': self.b}
self.vendor = utils.MixinVendorInterface(self.mapping)
self.console = fake.FakeConsole()
class FakeIPMIToolDriver(base.BaseDriver):
"""Example implementation of a Driver."""
def __init__(self):
self.power = ipmitool.IPMIPower()
self.deploy = fake.FakeDeploy()
self.vendor = ipmitool.VendorPassthru()
class FakePXEDriver(base.BaseDriver):
"""Example implementation of a Driver."""
def __init__(self):
self.power = fake.FakePower()
self.deploy = pxe.PXEDeploy()
self.vendor = pxe.VendorPassthru()
class FakeSSHDriver(base.BaseDriver):
"""Example implementation of a Driver."""
def __init__(self):
self.power = ssh.SSHPower()
self.deploy = fake.FakeDeploy()
class FakeIPMINativeDriver(base.BaseDriver):
"""Example implementation of a Driver."""
def __init__(self):
self.power = ipminative.NativeIPMIPower()
self.deploy = fake.FakeDeploy()
self.vendor = ipminative.VendorPassthru()
class FakeSeaMicroDriver(base.BaseDriver):
"""Fake SeaMicro driver."""
def __init__(self):
if not importutils.try_import('seamicroclient'):
raise exception.DriverNotFound('FakeSeaMicroDriver')
self.power = seamicro.Power()
self.deploy = fake.FakeDeploy()
self.vendor = seamicro.VendorPassthru()
|
anhstudios/swganh
|
data/scripts/templates/object/weapon/ranged/vehicle/shared_vehicle_atst_ranged.py
|
Python
|
mit
| 454 | 0.048458 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXA
|
MPLES
from swgpy.object import *
def create(kernel):
result = Weapon()
re
|
sult.template = "object/weapon/ranged/vehicle/shared_vehicle_atst_ranged.iff"
result.attribute_template_id = 10
result.stfName("obj_n","unknown_weapon")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
felixbb/forseti-security
|
google/cloud/security/common/data_access/violation_dao.py
|
Python
|
apache-2.0
| 3,393 | 0 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the data access object (DAO) for Organizations."""
import MySQLdb
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access import errors as db_errors
from google.cloud.security.common.data_access import violation_map as vm
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
class ViolationDao(dao.Dao):
"""Data access object (DAO) for rule violations."""
def insert_violations(self, violations, resource_name,
snapshot_timestamp=None):
"""Import violations into database.
Args:
violations: An iterator of RuleViolations.
resource_name: String that defines a resource
snapshot_timestamp: The snapshot timestamp to associate these
violations with.
Return:
A tuple of (int, list) containing the count of inserted rows and
a list of violations that encountered an error during insert.
Raise:
MySQLError if snapshot table could not be created.
"""
try:
# Make sure to have a reasonable timestamp to use.
if not snapshot_timestamp:
snapshot_timestamp = self.get_latest_snapshot_timestamp(
('PARTIAL_SUCCESS', 'SUCCESS'))
# Create the violations snapshot table.
snapshot_table = self._create_snapshot_table(
resource_name, snapshot_timestamp)
except MySQLdb.Error, e:
raise db_errors.MySQLError(resource_name, e)
inserted_rows = 0
violation_errors = []
for violation in violations:
for formatted_violation in _format_violation(violation,
resource_name):
try:
self.execute_sql_with_commit(
resource_name,
vm.VIOLATION_INSERT_MAP[resource_name](snapshot_table),
formatted_violation)
inserted_rows += 1
except MySQLdb.Error, e:
LOGGER.error('Unable to insert violation %s due to %s',
formatted_violation, e)
|
violation_errors.append(formatted_violation)
return (inserted_rows, violation_errors)
def _format_violation(violation, resource_name):
"""Violation formating stub that uses a map to call the formating
function for the resource.
Args:
violation: An iterator of RuleViolations.
resource_nam
|
e: String that defines a resource
Returns:
Formatted violations
"""
formatted_output = vm.VIOLATION_MAP[resource_name](violation)
return formatted_output
|
RackHD/RackHD
|
test/tests/amqp/test_amqp_node_rediscover.py
|
Python
|
apache-2.0
| 18,627 | 0.003382 |
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Author(s):
Norton Luo
This test validate the AMQP message send out in the workflow, and node delete and discover.
It also validate the web hook api and node registeration function.
Ths test will choose a node and reset it. After the system start reset. It will delete the node and let the node
run into discover workflow. AMQP and webhook are lunched before that in separate working thread to monitor the messages.
'''
from sm_plugin import smp_get_stream_monitor
from time import sleep
import gevent
import gevent.queue
import random
import flogging
import unittest
import json
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import fit_common
import env_ip_helpers
import test_api_utils
from nose.plugins.attrib import attr
from nosedep import depends
logs = flogging.get_loggers()
WEBHOOK_PORT = 9889
class RequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
logs.debug('POST came in on test-http-server')
request_headers = self.headers
content_length = request_headers.getheaders('content-length')
if content_length:
length = int(content_length[0])
else:
length
|
= 0
webhook_body = str(self.rfile.read(length))
logs.tdl.debug('body is: %s', webhook_body)
self.send_response(200)
self.server.do_post_queue.put(webhook_body)
class HttpWorker(gevent.Greenlet):
def __init__(self, port, timeout=10):
super(HttpWorker, self).__init__()
|
self.__server = HTTPServer(('', port), RequestHandler)
self.__server.timeout = timeout
self.__server.do_post_queue = gevent.queue.Queue()
testhost_ipv4 = env_ip_helpers.get_testhost_ip()
self.ipv4_address = testhost_ipv4
self.ipv4_port = port
@property
def do_post_queue(self):
return self.__server.do_post_queue
def dispose(self):
logs.debug('http service shutdown')
def _run(self):
self.__server.handle_request()
@attr(all=True, regression=False, smoke=False)
class test_node_rediscover_amqp_message(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get the stream-monitor plugin for AMQP
cls._amqp_sp = smp_get_stream_monitor('amqp')
# Create the "all events" tracker
cls._on_events_tracker = cls._amqp_sp.create_tracker('on-events-all', 'on.events', '#')
# We have context information that needs to be passed from test-to-test. Set up the template
# space.
cls._run_context = {
'start_nodeid': None,
'start_node_uuid': None,
'reboot_graphid': None,
'rediscovered_nodeid': None
}
# Set up the web-serverlet to get the callback from the hooks part
# of the api. We do this here so thee server stays up for the required
# tests!
cls._serverworker = HttpWorker(WEBHOOK_PORT, 300)
cls._serverworker.start()
@classmethod
def tearDownClass(cls):
cls._serverworker.dispose()
def setUp(self):
# attach a processor to the on-events-tracker amqp tracker. Then we can
# attach indiviual match-clauses to this in each test-case.
self.__qproc = self._amqp_sp.get_tracker_queue_processor(self._on_events_tracker)
def __set_run_context(self, key, value):
assert key in self._run_context, \
'{} not a run-context variable'.format(key)
assert self._run_context[key] is None, \
'attempt to set existing run-context for {} to {}, was already {}'.format(
key, value, self._run_context[key])
self._run_context[key] = value
def __get_run_context(self, key):
assert key in self._run_context, \
'{} not a run-context variable'.format(key)
assert self._run_context[key] is not None, \
'attempt to get unset run-context for {}'.format(key)
return self._run_context[key]
def __set_web_hook(self):
mondata = fit_common.rackhdapi('/api/current/hooks')
self.assertTrue(
mondata['status'] < 209,
'Incorrect HTTP return code, could not check hooks. expected<209, got:' + str(mondata['status']))
ip = self._serverworker.ipv4_address
# ip = '172.31.110.34'
port = self._serverworker.ipv4_port
hookurl = "http://" + str(ip) + ":" + str(port)
for hooks in mondata['json']:
if hooks['url'] == hookurl:
logs.debug("Hook URL already exist in RackHD")
return
response = fit_common.rackhdapi(
'/api/current/hooks',
action='post',
payload={
"name": "FITdiscovery",
"url": hookurl,
"filters": [{"type": "node",
"action": "discovered"}]})
self.assertTrue(
response['status'] < 209,
'Incorrect HTTP return code, expected<209, got:' + str(response['status']))
def __apply_obmsetting_to_node(self, nodeid):
usr = None
# pwd = ''
response = fit_common.rackhdapi(
'/api/2.0/nodes/' + nodeid + '/catalogs/bmc')
bmcip = response['json']['data']['IP Address']
# Try credential record in config file
for creds in fit_common.fitcreds()['bmc']:
if fit_common.remote_shell(
'ipmitool -I lanplus -H ' + bmcip + ' -U ' + creds['username'] + ' -P ' +
creds['password'] + ' fru')['exitcode'] == 0:
usr = creds['username']
pwd = creds['password']
break
# Put the credential to OBM settings
if usr is not None:
payload = {
"service": "ipmi-obm-service",
"config": {
"host": bmcip,
"user": usr,
"password": pwd},
"nodeId": nodeid}
api_data = fit_common.rackhdapi("/api/2.0/obms", action='put', payload=payload)
if api_data['status'] == 201:
return True
return False
def __check_skupack(self):
sku_installed = fit_common.rackhdapi('/api/2.0/skus')['json']
if len(sku_installed) < 2:
return False
else:
return True
def __process_web_message(self, webhook_body):
try:
webhook_body_json = json.loads(webhook_body)
except ValueError:
self.fail("FAILURE - The message body is not json format!")
self.assertIn("action", webhook_body_json, "action field is not contained in the discover message")
self.assertEquals(
webhook_body_json['action'], "discovered",
"action field not correct! expect {0}, get {1}"
.format("discovered", webhook_body_json['action']))
self.assertIn("data", webhook_body_json, "data field is not contained in the discover message")
self.assertIn("nodeId", webhook_body_json["data"], "nodeId is not contained in the discover message")
self.assertNotEquals(
webhook_body_json["data"]["nodeId"], "", "nodeId generated in discovery doesn't include valid data ")
self.assertIn(
"ipMacAddresses", webhook_body_json["data"], "ipMacAddresses is not contained in the discover message")
self.assertNotEquals(
webhook_body_json["data"]["ipMacAddresses"], "",
"ipMacAddresses generated during node discovery doesn't include valid data ")
def __build_info_vblock(self, message_type, action, typeid, nodeid):
expected_payload = {
"type": message_type,
"action": action,
"typeId": typeid,
"nodeId": nodeid,
"severity": "information",
"createdAt": "<<present>>",
"data": "<<present>>",
|
clarkkarenl/brautbot
|
wordcount.py
|
Python
|
artistic-2.0
| 1,390 | 0 |
#!/usr/bin/env python
from __future__ import print_function
from collections import Counter
from operator import itemgetter
import os
_path = os.path.abspath(
|
os.path.dirname(__file__))
SOURCE = os.path.join(_path, 'poems_for_wordcount.txt')
DESTINATION = os.path.join(_path, 'poem_words_out.txt')
def sort_word_counts(word_dict):
# first sort to get k by alpha
sorted_by_key = sorted(word_dict.items(), key=itemgetter(0))
# then reverse sort on number of occurrences (v) to get list in desc order
return sorted(s
|
orted_by_key, key=itemgetter(1), reverse=1)
def main():
with open(SOURCE, 'rb') as source, open(DESTINATION, 'wb') as destination:
word_counts = Counter(source.read().lower().split())
for item in sort_word_counts(word_counts):
print("{} {}".format(*item), file=destination)
def test_sort_word_counts():
word_list = 'you watch the brown fox jumped over the fence'.split()
word_counts = Counter(word_list)
sorted_list = sort_word_counts(word_counts)
assert sorted_list[0][0] == 'the'
assert sorted_list[1][0] == 'brown'
assert sorted_list[-1][0] == 'you'
def test_output():
main()
output = open(DESTINATION, 'rb').readlines()
word, count = output[0].split()
assert len(output) == 3518
assert word == 'the'
assert int(count) == 1085
if __name__ == '__main__':
main()
|
napperley/OpenVPN_Tunneler
|
openvpn.py
|
Python
|
apache-2.0
| 1,290 | 0.003101 |
__author__ = 'Nick Apperley'
# -*- c
|
oding: utf-8 -*-
#
# Establishes an OpenVPN connection using an OVPN file. Based on a Hacking Lab Python script
# (http://media.hacking-lab.com/largefiles/livecd/z_openvpn_config/backtrack/vpn-with-python.py). Requires Python 3
# and the pexpect library (module).
import pexpect
from invalid_credentials_error import InvalidCredentialsError
# Set the timeout in seconds.
timeout = 15
def open_vpn_connection(username, password, conf_dir, ovpn_file):
p
|
rocess = pexpect.spawn('openvpn %s' % ovpn_file, cwd=conf_dir, timeout=timeout)
try:
process.expect('Enter Auth Username:')
process.sendline(username)
process.expect('Enter Auth Password:')
process.sendline(password)
print('Connecting...')
process.expect('Initialization Sequence Completed')
print('Connected')
except pexpect.EOF:
print('Invalid username and/or password')
raise InvalidCredentialsError('Invalid OpenVPN username and/or password')
except pexpect.TIMEOUT:
print('Connection failed!')
raise TimeoutError('Cannot connect to OpenVPN server')
return process
def close_vpn_connection(process):
if process is not None:
process.kill(0)
print('Disconnected')
|
chrisfilo/NeuroVault
|
scripts/prepare_sdr_package.py
|
Python
|
mit
| 1,853 | 0.013492 |
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
import json, requests
import os, errno
import urllib.request, urllib.parse, urllib.error
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
|
pass
else: raise
collections = []
next_url_url = "http://neurovault.org/api/collections/?format=json"
target_folder = "D:/scratch/neurovault_backup"
while next_url_url:
print("fetching %s"%next_url_url)
resp = requests.get(url=next_url_url)
|
data = json.loads(resp.text)
collections += [res for res in data['results'] if res['DOI'] != None]
next_url_url = data['next']
print("Fetched metadata of %d collections"%len(collections))
images_url_template = "http://neurovault.org/api/collections/%d/images/"
for collection in collections:
next_url = images_url_template%collection['id']
images = []
while next_url:
print("fetching %s"%next_url)
resp = requests.get(url=next_url)
data = json.loads(resp.text)
images += data['results']
next_url = data['next']
if len(images) == 0:
collections.remove(collection)
continue
mkdir_p(target_folder + "/%d"%collection['id'])
json.dump(images, open(target_folder + "/%d/images.json"%collection['id'], "w"), indent=4, sort_keys=True)
for image in images:
print("fetching %s"%image['file'])
try:
urllib.request.urlretrieve(image['file'], target_folder + "/%d/"%collection['id'] + str(image['id']) + ".nii.gz")
except:
print("failed to download %s"%image['file'])
json.dump(collections, open(target_folder + "/collections.json", "w"), indent=4, sort_keys=True)
|
esikachev/scenario
|
sahara/tests/tempest/scenario/data_processing/client_tests/test_jobs.py
|
Python
|
apache-2.0
| 2,479 | 0 |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest.scenario.data_processing.client_tests import base
from tempest import test
class JobTest(base.BaseDataProcessingTest):
def _check_create_job(self):
job_binary = {
'name': data_utils.rand_name('sahara-job-binary'),
'url': 'swift://sahara-container.sahara/example.jar',
'description': 'Test job binary',
'extra': {
'user': 'test',
'password': '123'
}
}
# create job_binary
job_binary = self.create_job_binary(**job_binary)
self.job = {
'job_type': 'Pig',
'mains': [job_binary.id]
}
job_name = data_utils.rand_name('sahara-job')
# create job
job = self.create_job(job_name, **self.job)
# check that job created successfully
self.assertEqual(job_name, job.name)
return job.id, job.name
def _check_job_list(self, job_id, job_name):
# check for job in list
job_list = self.client.jobs.list()
jobs_info = [(job.id, job.name) for job in job_list]
self.assertIn((job_id, job_name), jobs_info)
def _check_get_job(self, job_id, job_name):
# check job fetch by id
job = self.client.jobs.get(job_id)
self.assertEqual(job_name, job.name)
def _check_delete_job(self, job_id):
# delete job by id
self.client.jobs.delete(job_id)
# check that job really deleted
job_list = self.client.jobs.list()
self.assertNotIn(job_id, [job.id for job in job_list])
@test.services('data_processing')
def test_job(self):
job_id, job_name = self._check_create_job()
self._c
|
heck_job_list
|
(job_id, job_name)
self._check_get_job(job_id, job_name)
self._check_delete_job(job_id)
|
netsec-ethz/scion
|
acceptance/cert_renewal/test.py
|
Python
|
apache-2.0
| 9,210 | 0.000326 |
#!/usr/bin/env python3
# Copyright 2021 Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import pathlib
import subprocess
import time
from typing import List
import sys
from http import client
from plumbum import cli
from acceptance.common import base
from acceptance.common import docker
from acceptance.common import scion
from python.lib import scion_addr
import toml
logger = logging.getLogger(__name__)
class Test(base.TestBase):
"""
Test that in a topology with multiple ASes, every AS is capable of
requesting renewed certificates. The test verifies that each AS has loaded
the renewed certificate.
The test is split into multiple steps:
1. Start the topology.
2. For each AS in the topology, create a new private key and request
certificate chain renewal. The renewed chain is verified against the
TRC.
3. Remove the previous private key from the control servers.
4. Ensure that the new private key and certificate are loaded by observing
the http endpoint.
5. Check connectivity with an end to end test.
6. Stop all control servers and purge the state. This includes deleting
all databases with cached data, including the path and trust database.
7. Restart control servers and check connectivity again.
"""
end2end = cli.SwitchAttr(
"end2end_integration",
str,
default="./bin/end2end_integration",
help="The end2end_integration binary " +
"(default: ./bin/end2end_integration)",
)
def main(self):
if not self.nested_command:
try:
self.setup()
# Give some time for the topology to start.
time.sleep(10)
self._run()
finally:
self.teardown()
def _run(self):
isd_ases = scion.ASList.load("%s/gen/as_list.yml" %
self.test_state.artifacts).all
cs_configs = self._cs_configs()
logger.info("==> Start renewal process")
for isd_as in isd_ases:
logging.info("===> Start renewal: %s" % isd_as)
self._renewal_request(isd_as)
logger.info("==> Check key and certificate reloads")
self._check_key_cert(cs_configs)
logger.info("==> Check connectivity")
subprocess.run(
[self.end2end, "-d", "-outDir", self.test_state.artifacts],
check=True)
logger.info("==> Shutting down control servers and purging caches")
for container in self.list_containers("scion_sd.*"):
self.test_state.dc("rm", container)
for container in self.list_containers("scion_cs.*"):
self.stop_container(container)
for cs_config in cs_configs:
files = list((pathlib.Path(self.test_state.artifacts) /
"gen-cache").glob("%s*" % cs_config.stem))
for db_file in files:
db_file.unlink()
logger.info("Deleted files: %s" % [file.name for file in files])
logger.info("==> Restart containers")
self.setup_start()
time.sleep(5)
logger.info("==> Check connectivity")
subprocess.run(
[self.end2end, "-d", "-outDir", self.test_state.artifacts],
check=True)
logger.info("==> Backup mode")
for isd_as in isd_ases:
logging.info("===> Start renewal: %s" % isd_as)
self._renewal_request(isd_as, mode="--backup")
def _renewal_request(
self,
isd_as: scion_addr.ISD_AS,
mode: str = "--force",
):
as_dir = self._to_as_dir(isd_as)
docker_dir = pathlib.Path("/share") / self._rel(as_dir)
def read_file(filename: str) -> str:
with open(as_dir / "crypto/as" / filename) as f:
return f.read()
chain_name = "ISD%s-AS%s.pem" % (isd_as.isd_str(),
isd_as.as_file_fmt())
old_chain = read_file(chain_name)
old_key = read_file("cp-as.key")
chain = docker_dir / "crypto/as" / chain_name
args = [
chain,
docker_dir / "crypto/as/cp-as.key",
mode,
"--trc",
docker_dir / "certs/ISD1-B1-S1.trc",
"--sciond",
self.execute("tester_%s" % isd_as.file_fmt(), "sh", "-c",
"echo $SCION_DAEMON").strip(),
*self._local_flags(isd_as),
]
logger.info("Requesting certificate chain renewal: %s" %
chain.relative_to(docker_dir))
logger.info(
self.execute("tester_%s" % isd_as.file_fmt(), "./bin/scion-pki",
"certificate", "renew", *args))
logger.info("Verify renewed certificate chain")
verify_out = self.execute("tester_%s" % isd_as.file_fmt(),
"./bin/scion-pki", "certificate", "verify",
chain, "--trc",
"/share/gen/trcs/ISD1-B1-S1.trc")
logger.info(str(verify_out).rstrip("\n"))
renewed_chain = read_file(chain_name)
renewed_key = read_file("cp-as.key")
if renewed_chain == old_chain:
raise Exception(
"renewed chain does not differ from previous chain")
if renewed_key == old_key:
raise Exception("renewed key does not differ from previous key")
def _check_key_cert(self, cs_configs: List[pathlib.Path]):
not_ready = [*cs_configs]
for _ in range(5):
logger.info(
"Checking if all control servers have reloaded the key and certificate..."
)
for cs_config in not_ready:
conn = client.HTTPConnection(self._http_endpoint(cs_config))
conn.request("GET", "/signer")
resp = conn.getresponse()
if resp.status != 200:
logger.info("Unexpected response: %d %s", resp.status,
|
resp.reason)
continue
isd_as = scion_addr.ISD_AS(cs_config.stem[2:-2])
as_dir = self._to_as_dir(isd_as)
chain_name = "ISD%s-AS%s.pem" % (isd_as.isd_str(),
isd_as.as_file_fmt())
|
pld = json.loads(resp.read().decode("utf-8"))
if pld["subject_key_id"] != self._extract_skid(
as_dir / "crypto/as" / chain_name):
continue
logger.info(
"Control server successfully loaded new key and certificate: %s"
% self._rel(cs_config))
not_ready.remove(cs_config)
if not not_ready:
break
time.sleep(3)
else:
logger.error(
"Control servers without reloaded key and certificate: %s" %
[cs_config.name for cs_config in not_ready])
sys.exit(1)
def _http_endpoint(self, cs_config: pathlib.Path):
with open(cs_config, "r") as f:
cfg = toml.load(f)
return cfg["metrics"]["prometheus"]
def _extract_skid(self, file: pathlib.Path):
out = subprocess.check_output(
['openssl', 'x509', '-in', file, '-noout', '-text'])
lines = out.splitlines()
for i, v in enumerate(lines):
if v.decode("utf-8").find("Subject Key Identifier") > -1:
skid = lines[i + 1].decode("utf-8").split()[-1].repl
|
toddself/beerlog2
|
runserver.py
|
Python
|
apache-2.0
| 44 | 0 |
from beerlog import app
app.run(debug=Tru
|
e)
|
|
hiidef/pylogd
|
pylogd/twisted/socket.py
|
Python
|
mit
| 1,335 | 0.001498 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A twisted UDP interface that is similar to the built-in socket interface."""
import traceback
from twisted.internet import reactor
from twisted.internet.protocol import DatagramProtocol
class UDPSocket(DatagramProtocol):
def __init__(self, host, port):
self.host = host
self.port = port
self.task = None
reactor.callWhenRunning(self.connect)
def connect(self):
self.task = reactor.listenUDP(0, self)
def connectTransport(self, ip):
self.transport.connect(ip, self.port)
def startProtocol(self):
"""Start the protocol. Resolve the host in case it is a hostname,
then call connect on the resulting ip and configured port."""
reactor.resolve(self.host).addCallback(self.connectTransport)
def sendto(self, msg, addr):
# ignore the
|
addr, becaus
|
e we only send to one place
try:
self.transport.write(msg)
except AttributeError:
# trying to log before twisted is running, nothing we can really do
pass
except AssertionError:
# trying to log before connection yields an assertion error
pass
def stopProtocol(self):
self.task.stopListening()
DatagramProtocol.stopProtocol(self)
|
cjaymes/pyscap
|
src/scap/model/oval_5/var/OvalVariablesElement.py
|
Python
|
gpl-3.0
| 1,169 | 0.004277 |
# Copyright 2016 Case
|
y Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Licens
|
e for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
logger = logging.getLogger(__name__)
class OvalVariablesElement(Model):
MODEL_MAP = {
'tag_name' : 'oval_variables',
'elements': [
{'tag_name': 'generator', 'class': 'scap.model.oval_5.GeneratorType'},
{'tag_name': 'variables', 'class': 'VariablesType', 'min': 0, 'max': 1},
{'xmlns': 'http://www.w3.org/2000/09/xmldsig#', 'tag_name': 'Signature', 'min': 0, 'max': 1},
],
}
|
goyal-sidd/BLT
|
comments/migrations/0004_auto_20170727_1308.py
|
Python
|
agpl-3.0
| 598 | 0.001672 |
#
|
-*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-27 13:08
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0003_auto_20170726_1348'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='parent',
field=models.ForeignKe
|
y(default=-1, null=True, on_delete=django.db.models.deletion.CASCADE,
to='comments.Comment'),
),
]
|
theavey/ParaTemp
|
tests/test_sim_setup/conftest.py
|
Python
|
apache-2.0
| 584 | 0 |
"""
fixtures and setup for testing the sim_setup subpackage
"""
import pytest
|
@pytest.fixture(scope='function') # the created directories should be unique
def molecule(path_test_data, tmp_path):
from paratemp import cd
from paratemp.sim_setup import Molecule
path_gro = path_test_data / 'water.mol2'
# Note: this instantiation will make a new directory!
with cd(tmp_path):
mol = Molecule(path_gro)
return mol, tmp_path
@pytest.fixture
def molecule_w_params(molecule):
mol, tmp_path = molecule
mol.parameterize()
return mol, tmp_path
| |
kudrom/lupulo
|
lupulo/descriptors/date.py
|
Python
|
gpl-2.0
| 883 | 0 |
# -*- encoding: utf-8 -*-
# Copyright (C) 2015 Alejandro López Espinosa (kudrom)
import datetime
import random
class Date(object):
"""
Descriptor for a date datum
"""
|
def __init__(self, variance, **kwargs):
"""
@param variance is the maximum variance of time
allowed for the generation of random data.
"""
self.variance = variance
def generate(self):
"""
Generates random data for the descriptor.
This is called by
|
the DataSchemaManager.generate
"""
now = datetime.datetime.now().strftime("%s")
return int(now) + random.randrange(0, self.variance)
def validate(self, data):
"""
Validates @param data against the descriptor.
This is called by the DataSchemaManager.validate
"""
return True
|
Onager/plaso
|
tests/parsers/plist_plugins/timemachine.py
|
Python
|
apache-2.0
| 1,644 | 0.001825 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the timemachine plist plugin."""
import unittest
from plaso.parsers.plist_plugins import timemachine
from tests.parsers.plist_plugins import test_lib
class TimeMachinePluginTest(test_lib.PlistPluginTestCase):
"""Tests for the timemachine plist plugin."""
def testProcess(self):
"""Tests the Process function."""
plist_name = 'com.apple.TimeMachine.plist'
plugin = timemachine.TimeMachinePlugin()
storage_writer = self._ParsePlistFileWithPlugin(
plugin, [plist_name], plist_name)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 13)
# The order in which PlistParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_times
|
tamps = [
1379165051000000, 1380098455000000, 1380810276000000, 1381883538000000,
1382647890000000, 1383351739000000, 1384090020000000, 1385130914000000,
1386265911000000, 1386689852000000, 1387723091000000, 1388840950000000,
1388842718000000]
timestamps = sorted(
|
[event.timestamp for event in events])
self.assertEqual(timestamps, expected_timestamps)
expected_event_values = {
'data_type': 'plist:key',
'desc': (
'TimeMachine Backup in BackUpFast '
'(5B33C22B-A4A1-4024-A2F5-C9979C4AAAAA)'),
'key': 'item/SnapshotDates',
'root': '/Destinations'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
iw3hxn/LibrERP
|
data_migration/tests/__init__.py
|
Python
|
agpl-3.0
| 70 | 0 |
# from . import test_partner_import
from . i
|
mport test_product_import
| |
b3j0f/simpleneed
|
www/simpleneed/urls.py
|
Python
|
mit
| 1,471 | 0 |
"""simpleneed URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Exa
|
mples:
Function views
1. Add an import: from my_app import v
|
iews
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from .views import (
NeedLocationViewSet, ContactViewSet, MoodViewSet, GenderViewSet,
NeedViewSet, RoamViewSet, StatsViewSet, LocatedElementViewSet,
MessageViewSet, SupplyLocationViewSet
)
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'moods', MoodViewSet)
router.register(r'needs', NeedViewSet)
router.register(r'genders', GenderViewSet)
router.register(r'needlocations', NeedLocationViewSet)
router.register(r'contacts', ContactViewSet)
router.register(r'roams', RoamViewSet)
router.register(r'stats', StatsViewSet)
router.register(r'messages', MessageViewSet)
router.register(r'locatedelements', LocatedElementViewSet)
router.register(r'supplylocations', SupplyLocationViewSet)
urlpatterns = [url(r'', include(router.urls))]
|
abshkd/benzene
|
torrents/utils/__init__.py
|
Python
|
bsd-3-clause
| 254 | 0.031496 |
from django.uti
|
ls.datastructures import SortedDict
from bencode import bencode, bdecode
def sort_dict(D):
result = SortedDict()
for key in sorted(D.keys()):
if type(D[key]) is dict:
D[key] = sort_dict(D[key])
result[key] = D[
|
key]
return result
|
TimMaylon/corecoin
|
contrib/bitrpc/bitrpc.py
|
Python
|
mit
| 7,838 | 0.038147 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:4496")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:4496")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Corecoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getba
|
lance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
excep
|
t:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Corecoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
yephper/django
|
django/urls/exceptions.py
|
Python
|
bsd-3-clause
| 167 | 0 |
from __future__ import unicode_literals
from django.
|
http im
|
port Http404
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
|
shaunstanislaus/lemur
|
lemur/decorators.py
|
Python
|
apache-2.0
| 1,945 | 0.000514 |
"""
.. module: lemur.decorators
:copyr
|
ight: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
"""
from builtins import str
from datetime import timedelta
from flask import make_response, request, current_app
from functools import up
|
date_wrapper
# this is only used for dev
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True): # pragma: no cover
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Headers'] = "Origin, X-Requested-With, Content-Type, Accept, Authorization "
h['Access-Control-Allow-Credentials'] = 'true'
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
|
alfred82santa/tarrabme2
|
src/events/adminForms.py
|
Python
|
gpl-3.0
| 298 | 0.010067 |
from models import Event, EventRole
from common.adminForms import CommonAd
|
minForm
class EventAdminForm(CommonAdminForm):
class Meta(CommonAdminForm.Meta):
model = Event
class EventRoleAdminForm
|
(CommonAdminForm):
class Meta(CommonAdminForm.Meta):
model = EventRole
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.