repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
AlexRiina/django-s3direct
|
example/example/settings.py
|
Python
|
mit
| 4,212 | 0.000237 |
"""
Django settings for hello project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')nw@1z2xt-dy2f$1mfpzyuohxv-tmu4+5-q55)*(e6obam-p=4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
's3direct',
'cat',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/setti
|
ngs/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/
|
static/'
# If AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are not defined,
# django-s3direct will attempt to use the EC2 instance profile instead.
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME', 'test-bucket')
S3DIRECT_REGION = os.environ.get('S3DIRECT_REGION', 'us-east-1')
def create_filename(filename):
import uuid
ext = filename.split('.')[-1]
filename = '%s.%s' % (uuid.uuid4().hex, ext)
return os.path.join('custom', filename)
S3DIRECT_DESTINATIONS = {
# Allow anybody to upload any MIME type
'misc': {
'key': '/'
},
# Allow staff users to upload any MIME type
'pdfs': {
'key': 'uploads/pdfs',
'auth': lambda u: u.is_staff
},
# Allow anybody to upload jpeg's and png's. Limit sizes to 5kb - 20mb
'images': {
'key': 'uploads/images',
'auth': lambda u: True,
'allowed': [
'image/jpeg',
'image/png'
],
'content_length_range': (5000, 20000000),
},
# Allow authenticated users to upload mp4's
'videos': {
'key': 'uploads/videos',
'auth': lambda u: u.is_authenticated(),
'allowed': ['video/mp4']
},
# Allow anybody to upload any MIME type with a custom name function
'custom_filename': {
'key': create_filename
},
}
|
lcy-seso/models
|
fluid/image_classification/caffe2fluid/kaffe/custom_layers/__init__.py
|
Python
|
apache-2.0
| 2,996 | 0.001335 |
"""
"""
from .register import get_registered_layers
#custom layer import begins
import axpy
import flatten
import argmax
import reshape
import roipooling
import priorbox
import permute
import detection_out
import normalize
import select
import crop
import reduction
#custom layer import ends
custom_layers = get_registered_layers()
def set_args(f, params, node=None):
""" set args for function 'f' using the parameters in node.layer.parameters
Args:
f (function): a python function object
params (object): a object contains attributes needed by f's arguments
Returns:
arg_names (list): a list of argument names
kwargs (dict): a dict contains needed arguments
"""
from ..protobuf_to_dict import protobuf_to_dict
argc = f.__code__.co_argcount
arg_list = f.__code__.co_varnames[0:argc]
kwargs = {}
for arg_name in arg_list:
if arg_name in params:
|
kwargs[arg_name] = params[arg_name]
if node is not None and len(node.metadata):
kwargs.update(node.metadata)
return arg_list, kwargs
def has_layer(kind):
""" test whether this layer exists in custom layer
"""
|
return kind in custom_layers
def compute_output_shape(kind, node):
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
shape_func = custom_layers[kind]['shape']
parents = node.parents
inputs = [list(p.output_shape) for p in parents]
arg_names, kwargs = set_args(shape_func, node.params)
if len(inputs) == 1:
inputs = inputs[0]
return shape_func(inputs, **kwargs)
def make_node(template, kind, node):
""" make a PaddleNode for custom layer which means construct
a piece of code to define a layer implemented in 'custom_layers'
Args:
@template (PaddleNode): a factory to new a instance of PaddleNode
@kind (str): type of custom layer
@node (graph.Node): a layer in the net
Returns:
instance of PaddleNode
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
#construct arguments needed by custom layer function from node's parameters
arg_names, kwargs = set_args(layer_func, node.params, node)
return template('custom_layer', kind, **kwargs)
def make_custom_layer(kind, inputs, name, *args, **kwargs):
""" execute a custom layer which is implemented by users
Args:
@kind (str): type name of this layer
@inputs (vars): variable list created by fluid
@namme (str): name for this layer
@args (tuple): other positional arguments
@kwargs (dict): other kv arguments
Returns:
output (var): output variable for this layer
"""
assert kind in custom_layers, "layer[%s] not exist in custom layers" % (
kind)
layer_func = custom_layers[kind]['layer']
return layer_func(inputs, name, *args, **kwargs)
|
anarang/robottelo
|
tests/foreman/ui/test_location.py
|
Python
|
gpl-3.0
| 34,753 | 0 |
# -*- encoding: utf-8 -*-
"""Test class for Locations UI"""
from fauxfactory import gen_ipaddr, gen_stri
|
ng
from nailgun import entities
from robottelo.config import settings
from robottelo.datafactory import generate_strings_list, invalid_values_list
from robottelo.decorators import run_only_on, tier1, tier2
from robottelo.constants import (
ANY_CONTEXT,
INSTALL_MEDIUM_URL,
LIBVIRT_RESOURCE_URL,
OS_TEMPLATE_DATA_FILE,
)
from robottelo.helpers import get_data_file
from robottelo.test import UITestCase
from robottelo.ui.factory import make_loc, make_templates, set_context
fr
|
om robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.session import Session
def valid_org_loc_data():
"""Returns a list of valid org/location data"""
return [
{'org_name': gen_string('alpha', 10),
'loc_name': gen_string('alpha', 10)},
{'org_name': gen_string('numeric', 10),
'loc_name': gen_string('numeric', 10)},
{'org_name': gen_string('alphanumeric', 10),
'loc_name': gen_string('alphanumeric', 10)},
{'org_name': gen_string('utf8', 10),
'loc_name': gen_string('utf8', 10)},
{'org_name': gen_string('latin1', 20),
'loc_name': gen_string('latin1', 10)},
{'org_name': gen_string('html', 20),
'loc_name': gen_string('html', 10)}
]
def valid_env_names():
"""Returns a list of valid environment names"""
return [
gen_string('alpha'),
gen_string('numeric'),
gen_string('alphanumeric'),
]
class LocationTestCase(UITestCase):
"""Implements Location tests in UI"""
location = None
# Auto Search
@run_only_on('sat')
@tier1
def test_positive_auto_search(self):
"""Can auto-complete search for location by partial name
@feature: Locations
@assert: Created location can be auto search by its partial name
"""
loc_name = gen_string('alpha')
with Session(self.browser) as session:
page = session.nav.go_to_loc
make_loc(session, name=loc_name)
auto_search = self.location.auto_complete_search(
page,
locators['location.select_name'],
loc_name[:3],
loc_name,
search_key='name'
)
self.assertIsNotNone(auto_search)
# Positive Create
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create Location with valid name only
@feature: Locations
@assert: Location is created, label is auto-generated
"""
with Session(self.browser) as session:
for loc_name in generate_strings_list():
with self.subTest(loc_name):
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_names(self):
"""Create location with invalid name
@feature: Locations
@assert: location is not created
"""
with Session(self.browser) as session:
for loc_name in invalid_values_list(interface='ui'):
with self.subTest(loc_name):
make_loc(session, name=loc_name)
error = session.nav.wait_until_element(
common_locators['name_haserror'])
self.assertIsNotNone(error)
@run_only_on('sat')
@tier1
def test_negative_create_with_same_name(self):
"""Create location with valid values, then create a new one
with same values.
@feature: Locations
@assert: location is not created
"""
loc_name = gen_string('utf8')
with Session(self.browser) as session:
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
make_loc(session, name=loc_name)
error = session.nav.wait_until_element(
common_locators['name_haserror'])
self.assertIsNotNone(error)
@run_only_on('sat')
@tier2
def test_positive_create_with_location_and_org(self):
"""Create and select both organization and location.
@feature: Locations
@assert: Both organization and location are selected.
"""
with Session(self.browser) as session:
for test_data in valid_org_loc_data():
with self.subTest(test_data):
org_name = test_data['org_name']
loc_name = test_data['loc_name']
org = entities.Organization(name=org_name).create()
self.assertEqual(org.name, org_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
location = session.nav.go_to_select_loc(loc_name)
organization = session.nav.go_to_select_org(org_name)
self.assertEqual(location, loc_name)
self.assertEqual(organization, org_name)
# Positive Update
@run_only_on('sat')
@tier1
def test_positive_update_name(self):
"""Create Location with valid values then update its name
@feature: Locations
@assert: Location name is updated
"""
loc_name = gen_string('alpha')
with Session(self.browser) as session:
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
for new_name in generate_strings_list():
with self.subTest(new_name):
self.location.update(loc_name, new_name=new_name)
self.assertIsNotNone(self.location.search(new_name))
loc_name = new_name # for next iteration
# Negative Update
@run_only_on('sat')
@tier1
def test_negative_update_with_too_long_name(self):
"""Create Location with valid values then fail to update
its name
@feature: Locations
@assert: Location name is not updated
"""
loc_name = gen_string('alphanumeric')
with Session(self.browser) as session:
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
new_name = gen_string('alpha', 247)
self.location.update(loc_name, new_name=new_name)
error = session.nav.wait_until_element(
common_locators['name_haserror'])
self.assertIsNotNone(error)
@run_only_on('sat')
@tier1
def test_positive_delete(self):
"""Create location with valid values then delete it.
@feature: Location Positive Delete test.
@assert: Location is deleted
"""
with Session(self.browser) as session:
for loc_name in generate_strings_list():
with self.subTest(loc_name):
entities.Location(name=loc_name).create()
session.nav.go_to_loc()
self.location.delete(loc_name)
@run_only_on('sat')
@tier2
def test_positive_add_subnet(self):
"""Add a subnet by using location name and subnet name
@feature: Locations
@assert: subnet is added
"""
strategy, value = common_locators['entity_deselect']
with Session(self.browser) as session:
for subnet_name in generate_strings_list():
with self.subTest(subnet_name):
loc_name = gen_string('alpha')
subnet = entities.Subnet(
name=subnet_name,
network=gen_ipaddr(ip3=True),
mask='255.255.255.0',
).create()
self.assertEqual(subnet.name, subnet_name)
make_loc(session, name=loc_name)
self.assertIsNotNone(self.location.search(loc_name))
self.location.update(loc_name, new_subnets=[subnet_n
|
Linkid/numpy
|
numpy/core/tests/test_regression.py
|
Python
|
bsd-3-clause
| 78,626 | 0.002073 |
from __future__ import division, absolute_import, print_function
import copy
import pickle
import sys
import platform
import gc
import copy
import warnings
import tempfile
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,), dtype
|
=[('x', np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.s
|
eek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self,level=rlevel):
"""Check that nothing is done when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self,level=rlevel):
"""Check that ravel works when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError, np.dtype,
{'names':['a'],'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self,level=rlevel):
"""GitHub issue #369"""
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self,level=rlevel):
"""Ticket #2185"""
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a
|
EduJGURJC/elastest-service-manager
|
src/esm/models/binding_request.py
|
Python
|
apache-2.0
| 6,152 | 0.004714 |
# coding: utf-8
from __future__ import absolute_import
#
from esm.models.bind_resource import BindResource
from .base_model_ import Model
from ..util import deserialize_model
class BindingRequest(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, app_guid: str=None, plan_id: str=None, service_id: str=None, parameters: object=None, bind_resource: BindResource=None):
"""
BindingRequest - a model defined in Swagger
:param app_guid: The app_guid of this BindingRequest.
:type app_guid: str
:param plan_id: The plan_id of this BindingRequest.
:type plan_id: str
:param service_id: The service_id of this BindingRequest.
:type service_id: str
:param parameters: The parameters of this BindingRequest.
:type parameters: object
:param bind_resource: The bind_resource of this BindingRequest.
:type bind_resource: BindResource
"""
self.swagger_types = {
'app_guid': str,
'plan_id': str,
'service_id': str,
'parameters': object,
'bind_resource': BindResource
}
self.attribute_map = {
'app_guid': 'app_guid',
'plan_id': 'plan_id',
'service_id': 'service_id',
'parameters': 'parameters',
'bind_resource': 'bind_resource'
}
self._app_guid = app_guid
self._plan_id = plan_id
self._service_id = service_id
self._parameters = parameters
self._bind_resource = bind_resour
|
ce
@classmethod
def from_dict(cls, dikt) -> 'BindingRequest':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The BindingRequest of this BindingRequest.
:rtype: BindingRequest
"""
return deserialize_model(dikt, cls)
@property
def app_guid(self) -> str:
"""
Gets the app_guid of this BindingRequest.
Deprecated in favor of bind_resource.app_guid. GUID of an application ass
|
ociated with the binding to be created. If present, MUST be a non-empty string.
:return: The app_guid of this BindingRequest.
:rtype: str
"""
return self._app_guid
@app_guid.setter
def app_guid(self, app_guid: str):
"""
Sets the app_guid of this BindingRequest.
Deprecated in favor of bind_resource.app_guid. GUID of an application associated with the binding to be created. If present, MUST be a non-empty string.
:param app_guid: The app_guid of this BindingRequest.
:type app_guid: str
"""
self._app_guid = app_guid
@property
def plan_id(self) -> str:
"""
Gets the plan_id of this BindingRequest.
ID of the plan from the catalog. MUST be a non-empty string.
:return: The plan_id of this BindingRequest.
:rtype: str
"""
return self._plan_id
@plan_id.setter
def plan_id(self, plan_id: str):
"""
Sets the plan_id of this BindingRequest.
ID of the plan from the catalog. MUST be a non-empty string.
:param plan_id: The plan_id of this BindingRequest.
:type plan_id: str
"""
if plan_id is None:
raise ValueError("Invalid value for `plan_id`, must not be `None`")
self._plan_id = plan_id
@property
def service_id(self) -> str:
"""
Gets the service_id of this BindingRequest.
ID of the service from the catalog. MUST be a non-empty string.
:return: The service_id of this BindingRequest.
:rtype: str
"""
return self._service_id
@service_id.setter
def service_id(self, service_id: str):
"""
Sets the service_id of this BindingRequest.
ID of the service from the catalog. MUST be a non-empty string.
:param service_id: The service_id of this BindingRequest.
:type service_id: str
"""
if service_id is None:
raise ValueError("Invalid value for `service_id`, must not be `None`")
self._service_id = service_id
@property
def parameters(self) -> object:
"""
Gets the parameters of this BindingRequest.
Configuration options for the service binding. An opaque object, controller treats this as a blob. Brokers SHOULD ensure that the client has provided valid configuration parameters and values for the operation.
:return: The parameters of this BindingRequest.
:rtype: object
"""
return self._parameters
@parameters.setter
def parameters(self, parameters: object):
"""
Sets the parameters of this BindingRequest.
Configuration options for the service binding. An opaque object, controller treats this as a blob. Brokers SHOULD ensure that the client has provided valid configuration parameters and values for the operation.
:param parameters: The parameters of this BindingRequest.
:type parameters: object
"""
self._parameters = parameters
@property
def bind_resource(self) -> BindResource:
"""
Gets the bind_resource of this BindingRequest.
A JSON object that contains data for platform resources associated with the binding to be created. See Bind Resource Object for more information.
:return: The bind_resource of this BindingRequest.
:rtype: BindResource
"""
return self._bind_resource
@bind_resource.setter
def bind_resource(self, bind_resource: BindResource):
"""
Sets the bind_resource of this BindingRequest.
A JSON object that contains data for platform resources associated with the binding to be created. See Bind Resource Object for more information.
:param bind_resource: The bind_resource of this BindingRequest.
:type bind_resource: BindResource
"""
self._bind_resource = bind_resource
|
carlvlewis/detective.io
|
app/detective/management/commands/parseowl.py
|
Python
|
lgpl-3.0
| 16,471 | 0.006982 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from lxml import etree
from app.detective.utils import to_class_name, to_camelcase, to_underscores
import re
# Defines the owl and rdf namespaces
namespaces = {
'owl': 'http://www.w3.org/2002/07/owl#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#'
}
# transform property name
pron = lambda name: to_underscores(to_camelcase(name))
# get local tag
def get(sets, el):
if hasattr(sets, "iterchildren"):
props = [ e for e in sets.iterchildren() if re.search('#}%s$' % el, e.tag) ]
return props[0].text if len(props) else ''
else:
return ""
# Merge 2 list and remove duplicates using the given field as reference
def merge(first_list, second_list, field):
refs = [ x[field] for x in second_list ]
return second_list + [ x for x in first_list if x[field] not in refs ]
class Command(BaseCommand):
help = "Parse the given OWL file to generate its neo4django models."
args = 'filename.owl'
root = None
def handle(self, *args, **options):
if not args:
raise CommandError('Please specify path to ontology file.')
# Gives the ontology URI. Only needed for documentation purposes
ontologyURI = "http://www.semanticweb.org/nkb/ontologies/2013/6/impact-investment#"
# This string will contain the models.py file
headers = [
"# -*- coding: utf-8 -*-",
"# The ontology can be found in its entirety at %s" % ontologyURI,
"from neo4django.db import models",
"from neo4django.graph_auth.models import User",
""
]
# This array contains the correspondance between data types
correspondanceTypes = {
"string" : "StringProperty",
"anyURI" : "URLProperty",
"int" : "IntegerProperty",
"nonNegativeInteger" : "IntegerProperty",
"nonPositiveInteger" : "IntegerProperty",
"PositiveInteger" : "IntegerProperty",
"NegativeInteger" : "IntegerProperty",
# Looking forward the neo4django float support!
# See also: https://github.com/scholrly/neo4django/issues/197
"float" : "StringProperty",
"integer" : "IntegerProperty",
"dateTimeStamp" : "DateTimeProperty",
"dateTime" : "DateTimeProperty",
"boolean" : "BooleanProperty"
}
try :
# Parses the file with etree
tree = etree.parse(args[0])
except:
raise CommandError('Unable to parse the given file.')
self.root = tree.getroot()
models = []
# Finds all the Classes
for ontologyClassElement in self.root.findall("owl:Class", namespaces):
# Finds the URI of the class
classURI = ontologyClassElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about"]
#Finds the name of the class
className = to_class_name(classURI.split("#")[1])
# By default, the class has no parent
parentClass = "models.NodeModel"
# Declares an array to store the relationships and properties from this class
relations = []
properties = []
scope = get(ontologyClassElement, "scope").replace("'", "\\'")
# Class help text
help_text = get(ontologyClassElement, "help_text").replace("'", "\\'")
# Verbose names
verbose_name = get(ontologyClassElement, "verbose_name").replace("'", "\\'")
verbose_name_plural = get(ontologyClassElement, "verbose_name_plural").replace("'", "\\'")
# Finds all the subClasses of the Class
for subClassElement in ontologyClassElement.findall("rdfs:subClassOf", namespaces):
# If the Class is actually an extension of another Class
if "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource" in subClassElement.attrib:
parentClassURI = subClassElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
parentClass = to_class_name(parentClassURI.split("#")[1])
else:
for restriction in subClassElement.findall("owl:Restriction", namespaces):
# If there is a relationship defined in the subclass
if restriction.find("owl:onClass", namespaces) is not None:
# Finds the relationship and its elements
# (destination Class and type)
relationClass = restriction.find("owl:onClass", namespaces)
relation = {}
relation["URI"] = relationClass.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
relation["name"] = to_class_name(relation["URI"].split("#")[1])
# Exception when the relation's destination is
# an individual from the same class
if relation["name"] == className:
relation["name"] = '"self"'
else:
relation["name"] = '"%s"' % relation["name"]
relationType = restriction.find("owl:onProperty", namespaces)
relationTypeURI = relationType.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
relation["type"] = relationTypeURI.split("#")[1]
# Guesses the destination of the relation based on the name.
# Name should be "has_..."
if relation["type"].find('has') == 0:
relation["destination"] = pron(relation["type"][3:])
# Get the property's options
options = self.propOptions(relation["type"])
# Help text
relation["help_text"] = get(options, "help_text").replace("'", "\\'")
# Verbose name
relation["verbose_name"] = get(options, "verbose_name")
relation["type"] = relation["type"]
# Adds the relationship to the array containing all relationships for the class only
# if the relation has a destination
if "destination" in relation:
relations.append(relation)
# If the
|
re is a property defined in the subclass
elif restriction.find("owl:onDataRange", namespaces) is not None or restriction.find("owl:someValuesFrom", namespaces) is not None:
|
propertyTypeElement = restriction.find("owl:onProperty", namespaces)
propertyTypeURI = propertyTypeElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
propertyType = propertyTypeURI.split("#")[1]
if restriction.find("owl:onDataRange", namespaces) is not None:
dataTypeElement = restriction.find("owl:onDataRange", namespaces)
else:
dataTypeElement = restriction.find("owl:someValuesFrom", namespaces)
dataTypeURI = dataTypeElement.attrib["{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
t = dataTypeURI.split("#")[1]
if t in correspondanceTypes:
dataType = correspondanceTypes[t]
# Get the property's options
options = self.propOptions(propertyType)
|
les69/calvin-base
|
calvin/calvinsys/events/timer.py
|
Python
|
apache-2.0
| 2,309 | 0.002599 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.async import async
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class TimerEvent(async.DelayedCall):
def __init__(self, actor_id, delay, trigger_loop, repeats=False):
super(TimerEvent, self).__init__(delay, callback=self.trigger)
self._actor_id = actor_id
self._triggered = False
self.trigger_loop = trigger_loop
self.repeats = repeats
_log.debug("Set calvinsys timer %f %s on %s" % (delay, "repeat" if self.repeats else "", self._actor_id))
@property
def triggered(self):
return self._triggered
def ack(self):
self._triggered = False
def trigger(self):
_log.debug("Trigger calvinsys timer on %s" % (self._
|
actor_id))
self._triggered = True
if self.repeats:
self.reset()
self.trigger_loop(actor_ids=[self._actor_id])
class TimerHandler(object):
def __init__(self, node, actor):
super(TimerHandler, self).__init__()
self._actor = actor
self.node = node
def once(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop)
def re
|
peat(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop, repeats=True)
def register(node, actor, events=None):
"""
Registers is called when the Event-system object is created.
Place an object in the event object - in this case the
nodes only timer object.
Also register any hooks for actor migration.
@TODO: Handle migration (automagically and otherwise.)
"""
return TimerHandler(node=node, actor=actor)
|
Mr-Robots/Gesture-controlled-surveillance-vehicle
|
Ti_Monitor/Gesture_serial.py
|
Python
|
gpl-2.0
| 251 | 0.015936 |
import serial
ser = serial.Serial('/dev/ttyUSB2',38400)
while True:
try:
x = ser.read()
f=open('gesture_command.txt','w')
f.write(x)
f.close()
except:
print "Gesture serial : port error!"
brea
|
k
|
|
praekelt/molo-iogt
|
iogt/migrations/0003_convert_recomended_articles.py
|
Python
|
bsd-2-clause
| 2,316 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
Article
|
PageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_arti
|
cle=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('iogt', '0002_create_importers_group'),
]
operations = [
migrations.RunPython(convert_articles),
]
|
eayunstack/neutron
|
neutron/pecan_wsgi/hooks/policy_enforcement.py
|
Python
|
apache-2.0
| 11,798 | 0.000085 |
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli
|
cable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the Lice
|
nse.
import copy
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
from pecan import hooks
import webob
from neutron._i18n import _
from neutron.common import constants as const
from neutron.extensions import quotasv2
from neutron import manager
from neutron.pecan_wsgi import constants as pecan_constants
from neutron.pecan_wsgi.controllers import quota
from neutron.pecan_wsgi.hooks import utils
from neutron import policy
LOG = logging.getLogger(__name__)
def _custom_getter(resource, resource_id):
"""Helper function to retrieve resources not served by any plugin."""
if resource == quotasv2.RESOURCE_NAME:
return quota.get_tenant_quotas(resource_id)[quotasv2.RESOURCE_NAME]
def fetch_resource(method, neutron_context, controller,
collection, resource, resource_id,
parent_id=None):
field_list = []
if method == 'PUT':
attrs = controller.resource_info
if not attrs:
# this isn't a request for a normal resource. it could be
# an action like removing a network from a dhcp agent.
# return None and assume the custom controller for this will
# handle the necessary logic.
return
field_list = [name for (name, value) in attrs.items()
if (value.get('required_by_policy') or
value.get('primary_key') or 'default' not in value)]
plugin = manager.NeutronManager.get_plugin_for_resource(collection)
if plugin:
if utils.is_member_action(controller):
getter = controller.parent_controller.plugin_shower
else:
getter = controller.plugin_shower
getter_args = [neutron_context, resource_id]
if parent_id:
getter_args.append(parent_id)
return getter(*getter_args, fields=field_list)
else:
# Some legit resources, like quota, do not have a plugin yet.
# Retrieving the original object is nevertheless important
# for policy checks.
return _custom_getter(resource, resource_id)
class PolicyHook(hooks.PecanHook):
priority = 140
def before(self, state):
# This hook should be run only for PUT,POST and DELETE methods and for
# requests targeting a neutron resource
resources = state.request.context.get('resources', [])
if state.request.method not in ('POST', 'PUT', 'DELETE'):
return
# As this routine will likely alter the resources, do a shallow copy
resources_copy = resources[:]
neutron_context = state.request.context.get('neutron_context')
resource = state.request.context.get('resource')
# If there is no resource for this request, don't bother running authZ
# policies
if not resource:
return
controller = utils.get_controller(state)
if not controller or utils.is_member_action(controller):
return
collection = state.request.context.get('collection')
needs_prefetch = (state.request.method == 'PUT' or
state.request.method == 'DELETE')
policy.init()
action = controller.plugin_handlers[
pecan_constants.ACTION_MAP[state.request.method]]
# NOTE(salv-orlando): As bulk updates are not supported, in case of PUT
# requests there will be only a single item to process, and its
# identifier would have been already retrieved by the lookup process;
# in the case of DELETE requests there won't be any item to process in
# the request body
original_resources = []
if needs_prefetch:
try:
item = resources_copy.pop()
except IndexError:
# Ops... this was a delete after all!
item = {}
resource_id = state.request.context.get('resource_id')
parent_id = state.request.context.get('parent_id')
method = state.request.method
resource_obj = fetch_resource(method, neutron_context, controller,
collection, resource, resource_id,
parent_id=parent_id)
if resource_obj:
original_resources.append(resource_obj)
obj = copy.copy(resource_obj)
obj.update(item)
obj[const.ATTRIBUTES_TO_UPDATE] = item.keys()
# Put back the item in the list so that policies could be
# enforced
resources_copy.append(obj)
# TODO(salv-orlando): as other hooks might need to prefetch resources,
# store them in the request context. However, this should be done in a
# separate hook which is conveniently called before all other hooks
state.request.context['original_resources'] = original_resources
for item in resources_copy:
try:
policy.enforce(
neutron_context, action, item,
pluralized=collection)
except oslo_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying it's own object, it's safe to
# return a 403. Otherwise, pretend that it doesn't exist
# to avoid giving away information.
controller = utils.get_controller(state)
s_action = controller.plugin_handlers[controller.SHOW]
if not policy.check(neutron_context, s_action, item,
pluralized=collection):
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def after(self, state):
neutron_context = state.request.context.get('neutron_context')
resource = state.request.context.get('resource')
collection = state.request.context.get('collection')
controller = utils.get_controller(state)
if not resource:
# can't filter a resource we don't recognize
return
# NOTE(kevinbenton): extension listing isn't controlled by policy
if resource == 'extension':
return
try:
data = state.response.json
except ValueError:
return
if state.request.method not in pecan_constants.ACTION_MAP:
return
if not data or (resource not in data and collection not in data):
return
policy.init()
is_single = resource in data
action_type = pecan_constants.ACTION_MAP[state.request.method]
if action_type == 'get':
action = controller.plugin_handlers[controller.SHOW]
else:
action = controller.plugin_handlers[action_type]
key = resource if is_single else collection
to_process = [data[resource]] if is_single else data[collection]
# in the single case, we enforce which raises on violation
# in the plural case, we just check so violating items are hidden
policy_method = policy.enforce if is_single else policy.check
plugin = manager.NeutronManager.get_plugin_for_resource(collection)
try:
resp = [self._get_filtered_item(state.request, controller,
resource, collection,
|
jcurbelo/networkx
|
networkx/algorithms/traversal/breadth_first_search.py
|
Python
|
bsd-3-clause
| 3,994 | 0.002754 |
"""
====================
Breadth-first search
====================
Basic algorithms for breadth-first searching the nodes of a graph.
"""
import networkx as nx
from collections import defaultdict, deque
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['bfs_edges', 'bfs_tree', 'bfs_predecessors', 'bfs_successors']
def bfs_edges(G, source, reverse=False):
"""Produce edges in a breadth-first-search starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
reverse : bool, optional
If True traverse a directed graph in the reverse direction
Returns
-------
edges: generator
A generator of edges in the breadth-first-search.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(list(nx.bfs_edges(G,0)))
[(0, 1), (1, 2)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
if reverse and isinstance(G, nx.DiGraph):
neighbors = G.predecessors
else:
neighbors = G.neighbors
visited = set([source])
queue = deque([(source, neighbors(source))])
while queue:
parent, children = queue[0]
try:
child = next(children)
if child not in visited:
yield parent, child
visited.add(child)
queue.append((child, neighbors(child)))
except StopIteration:
queue.popleft()
de
|
f bfs_tree(G, source, reverse=False):
"""Return an oriented tree constructed from of a breadth-first-search
starting at source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
reverse : bool, optional
If True traverse a directed graph in the reverse direction
Returns
-------
T: NetworkX DiGraph
|
An oriented tree
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(list(nx.bfs_edges(G,0)))
[(0, 1), (1, 2)]
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
T = nx.DiGraph()
T.add_node(source)
T.add_edges_from(bfs_edges(G,source,reverse=reverse))
return T
def bfs_predecessors(G, source):
"""Return dictionary of predecessors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
Returns
-------
pred: dict
A dictionary with nodes as keys and predecessor nodes as values.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(nx.bfs_predecessors(G,0))
{1: 0, 2: 1}
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
return dict((t,s) for s,t in bfs_edges(G,source))
def bfs_successors(G, source):
"""Return dictionary of successors in breadth-first-search from source.
Parameters
----------
G : NetworkX graph
source : node
Specify starting node for breadth-first search and return edges in
the component reachable from source.
Returns
-------
succ: dict
A dictionary with nodes as keys and list of succssors nodes as values.
Examples
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2])
>>> print(nx.bfs_successors(G,0))
{0: [1], 1: [2]}
Notes
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
d = defaultdict(list)
for s,t in bfs_edges(G,source):
d[s].append(t)
return dict(d)
|
tredly/tredly
|
tests/testobjects/firewallchecks.py
|
Python
|
mit
| 2,018 | 0.007433 |
# Performs network checks
from subprocess import Popen, PIPE
from includes.output import *
class FirewallChecks:
# C
|
onstructor
def __init__(self, uuid = None):
# if uuid == None then check the host
self.uuid = uuid
def checkIpfwRule(self, permission, fromIP, toIP, toPort, direction):
cmd = ['ipfw', 'list']
# add the jexec command if we're dealing with a container
if (self.uuid is not None):
cmd = ['jexec', 'trd-' + self.uuid] + cmd
process = Po
|
pen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
stdOutString = stdOut.decode('utf-8')
stdErrString = stdErr.decode('utf-8')
for line in stdOutString.splitlines():
words = line.split()
# chcek against this line
if (words[1] == permission) and (words[7] == fromIP) and (words[9] == toIP) and (words[11] == toPort):
return True
return False
# checks that a value exists in an ipfw table
def checkIpfwTable(self, tableNum, value):
cmd = ['ipfw', 'table',str(tableNum), 'list']
# add the jexec command if we're dealing with a container
if (self.uuid is not None):
cmd = ['jexec', 'trd-' + self.uuid] + cmd
process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdOut, stdErr = process.communicate()
stdOutString = stdOut.decode('utf-8')
stdErrString = stdErr.decode('utf-8')
if (process.returncode != 0):
e_error("Failed to check ipfw table")
print(stdOutString)
print(stdErrString)
print('exitcode: ' + process.returncode)
exit(process.returncode)
# loop over the lines looking for our value
for line in stdOutString.splitlines():
if (line.split()[0] == value):
return True
return False
|
ThiefMaster/sqlalchemy
|
lib/sqlalchemy/orm/attributes.py
|
Python
|
mit
| 57,173 | 0.000122 |
# orm/attributes.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation for class attributes and their interaction
with instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import operator
from .. import util, event, inspection
from . import interfaces, collections, exc as orm_exc
from .base import instance_state, instance_dict, manager_of_class
from .base import PASSIVE_NO_RESULT, ATTR_WAS_SET, ATTR_EMPTY, NO_VALUE,\
NEVER_SET, NO_CHANGE, CALLABLES_OK, SQL_OK, RELATED_OBJECT_OK,\
INIT_OK, NON_PERSISTENT_OK, LOAD_AGAINST_COMMITTED, PASSIVE_OFF,\
PASSIVE_RETURN_NEVER_SET, PASSIVE_NO_INITIALIZE, PASSIVE_NO_FETCH,\
PASSIVE_NO_FETCH_RELATED, PASSIVE_ONLY_PERSISTENT, NO_AUTOFLUSH
from .base import state_str, instance_str
@inspection._self_inspects
class QueryableAttribute(interfaces._MappedAttribute,
interfaces.InspectionAttr,
interfaces.PropComparator):
"""Base class for :term:`descriptor` objects that intercept
attribute events on behalf of a :class:`.MapperProperty`
object. The actual :class:`.MapperProperty` is accessible
via the :attr:`.QueryableAttribute.property`
attribute.
.. seealso::
:class:`.InstrumentedAttribute`
:class:`.MapperProperty`
:attr:`.Mapper.all_orm_descriptors`
:attr:`.Mapper.attrs`
"""
is_attribute = True
def __init__(self, class_, key, impl=None,
comparator=None, parententity=None,
of_type=None):
self.class_ = class_
self.key = key
self.impl = impl
self.comparator = comparator
self._parententity = parententity
self._of_type = of_type
manager = manager_of_class(class_)
# manager is None in the case of AliasedClass
if manager:
# propagate existing event listeners from
# immediate superclass
for base in manager._bases:
if key in base:
self.dispatch._update(base[key].dispatch)
@util.memoized_property
def _supports_population(self):
return self.impl.supports_population
def get_history(self, instance, passive=PASSIVE_OFF):
return self.impl.get_history(instance_state(instance),
instance_dict(instance), passive)
def __selectable__(self):
# TODO: conditionally attach this method based on clause_element ?
return self
@util.memoized_property
def info(self):
"""Return the 'info' dictionary for the underlying SQL element.
The behavior here is as follows:
* If the attribute is a column-mapped property, i.e.
:class:`.ColumnProperty`, which is mapped directly
to a schema-level :class:`.Column` object, this attribute
will return the :attr:`.SchemaItem.info` dictionary associated
with the core-level :class:`.Column` object.
* If the attribute is a :class:`.ColumnProperty` but is mapped to
any other kind of SQL expression other than a :class:`.Column`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated directly with the :class:`.ColumnProperty`,
assuming the SQL expression itself does not have its own ``.info``
attribute (which should be the case, unless a user-defined SQL
construct has defined one).
* If the attribute refers to any other kind of
:class:`.MapperProperty`, including :class:`.RelationshipProperty`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated with that :class:`.MapperProperty`.
* To access the :attr:`.MapperProperty.info` dictionary of the
:class:`.MapperProperty` unconditionally, including for a
:class:`.ColumnProperty` that's associated directly with a
:class:`.schema.Column`, the attribute can be referred to using
:attr:`.QueryableAttribute.property` attribute, as
``MyClass.someattribute.property.info``.
.. versionadded:: 0.8.0
.. seealso::
:attr:`.SchemaItem.info`
:attr:`.MapperProperty.info`
"""
return self.comparator.info
@util.memoized_property
def parent(self):
"""Return an inspection instance representing the parent.
This will be either an instance of :class:`.Mapper`
or :class:`.AliasedInsp`, depending upon the nature
of the parent entity which this attribute is associated
with.
"""
return inspection.inspect(self._parententity)
@property
def expression(self):
return self.comparator.__clause_element__()
def __clause_element__(self):
return self.comparator.__clause_element__()
def _query_clause_element(self):
"""like __clause_element__(), but called specifically
by :class:`.Query` to allow special behavior."""
return self.comparator._query_clause_element()
def adapt_to_entity(self, adapt_to_entity):
assert not self._of_type
return self.__class__(adapt_to_entity.entity,
self.key, impl=self.impl,
comparator=self.comparator.adapt_to_entity(
adapt_to_entity),
parententity=adapt_to_entity)
def of_type(self, cls):
return QueryableAttribute(
self.class_,
self.key,
self.impl,
self
|
.comparator.of_type(cls),
self._parentent
|
ity,
of_type=cls)
def label(self, name):
return self._query_clause_element().label(name)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, state, optimistic=False):
return self.impl.hasparent(state, optimistic=optimistic) is not False
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object associated with %s '
'has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
self,
key)
)
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
@util.memoized_property
def property(self):
"""Return the :class:`.MapperProperty` associated with this
:class:`.QueryableAttribute`.
Return values here will commonly be instances of
:class:`.ColumnProperty` or :class:`.RelationshipProperty`.
"""
return self.comparator.property
class InstrumentedAttribute(QueryableAttribute):
"""Class bound instrumented attribute which adds basic
:term:`descriptor` methods.
See :class:`.QueryableAttribute` for a description of most features.
"""
def __set__(self, instance, value):
self.impl.set(instance_state(instance),
instance_dict(instance), value, None)
def __delete__(self, instance):
self.impl.delete(instance_state(instance), instance_dict(instance))
def __get__(self, instance, owner):
if instance is None:
return self
dict_ = instance_dict(instance)
if self._supports_population and self.key in dict_:
return dict_[self.key]
else:
return self.impl.get(instance_state(instance), dict_)
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
Returns a new QueryableAttribute type tha
|
olkku/tf-info
|
manager/tests.py
|
Python
|
bsd-3-clause
| 3,567 | 0.001402 |
from django.test import TestCase
from manager.models import Page
from datetime import datetime, timedelta
from django.utils import timezone
class PageTestCase(TestCase):
def setUp(self):
now = timezone.now()
Page.objects.create(url="testurl", description="test description")
def test_regular_page_active(self):
"""Page with no pause or time/date range is active."""
page = Page.objects.get(url="/testurl")
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
def test_paused_page_not_active(self):
"""Page that has been paused is not active."""
page = Page.objects.get(url="/testurl")
page.pause_at = timezone.now().replace(hour=12)
current_time = timezone.now().replace(hour=13)
self.assertTrue(page.is_paused(current_time))
self.assertFalse(page.is_active(current_time))
def test_previously_paused_page_active(self):
"""Page that has is not paused but has been in the past is active."""
page = Page.objects.get(url="/testurl")
page.paused_at = timezone.now() - timedelta(hours=48)
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
page.paused_at = timezone.now()
morning = timezone.now().replace(hour=6)
self.assertFalse(page.is_paused(morning))
self.assertTrue(page.is_active(morning))
def test_page_active_time_of_day(self):
"""Page has certain times of day it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now().replace(hour=12)
# Default page has no times -> active
self.assertTrue(page.is_active(now))
# Set start time in the future
page.active_time_start = now.replace(hour=13).
|
time()
self.assertFalse(page.is_active(now))
# Set time to be past start time
now = now.replace(hour=14)
self.assertTrue(page.is_active(now))
# Set end time in the future, still active
page.active_time_end = now.replace(hour=15).time()
self.assertTrue(page.is_active(now))
# Set time to be past end-time -> inactive
now = now.r
|
eplace(hour=16)
self.assertFalse(page.is_active(now))
# Set start time in the future but bigger than end-time
page.active_time_start = now.replace(hour=17).time()
self.assertFalse(page.is_active(now))
# Time bigger than start time in the evening
now = now.replace(hour=19)
self.assertTrue(page.is_active(now))
def test_page_date_range(self):
"""Page has certains dates it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now()
today = now.date()
page.active_date_start = today
self.assertTrue(page.is_active(now))
page.active_date_start = today + timedelta(days=1)
self.assertFalse(page.is_active(now))
page.active_date_start = today - timedelta(days=7)
page.active_date_end = today - timedelta(days=3)
self.assertFalse(page.is_active(now))
def test_page_weekdays(self):
"""Page is active on certain weekdays"""
page = Page.objects.get(url="/testurl")
now = datetime(2014, 4, 28, 16, 53) # Monday
page.active_date_start = now.date()
self.assertTrue(page.is_active(now))
page.monday = False
self.assertFalse(page.is_active(now))
now = now + timedelta(days=1)
self.assertTrue(page.is_active(now))
|
Tinkerforge/brickv
|
src/brickv/plugin_system/plugins/red/red_tab_settings_brickd.py
|
Python
|
gpl-2.0
| 14,059 | 0.003272 |
# -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014 Ishraq Ibne Ashraf <ishraq@tinkerforge.com>
Copyright (C) 2014 Olaf Lüke <olaf@tinkerforge.com>
Copyright (C) 2014-2015 Matthias Bolte <matthias@tinkerforge.com>
red_tab_settings_brickd.py: RED settings brickd tab implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtWidgets import QMessageBox, QWidget
from brickv.plugin_system.plugins.red.ui_red_tab_settings_brickd import Ui_REDTabSettingsBrickd
from brickv.plugin_system.plugins.red.api import *
from brickv.plugin_system.plugins.red import config_parser
from brickv.async_call import async_call
from brickv.utils import get_main_window
# Constants
BRICKD_CONF_PATH = '/etc/brickd.conf'
CBOX_BRICKD_LOG_LEVEL_ERROR = 0
CBOX_BRICKD_LOG_LEVEL_WARN = 1
CBOX_BRICKD_LOG_LEVEL_INFO = 2
CBOX_BRICKD_LOG_LEVEL_DEBUG = 3
CBOX_BRICKD_LED_TRIGGER_CPU = 0
CBOX_BRICKD_LED_TRIGGER_GPIO = 1
CBOX_BRICKD_LED_TRIGGER_HEARTBEAT = 2
CBOX_BRICKD_LED_TRIGGER_MMC = 3
CBOX_BRICKD_LED_TRIGGER_OFF = 4
CBOX_BRICKD_LED_TRIGGER_ON = 5
class REDTabSettingsBrickd(QWidget, Ui_REDTabSettingsBrickd):
def __init__(self):
QWidget.__init__(self)
self.setupUi(self)
self.session = None # Set from REDTabSettings
self.script_manager = None # Set from REDTabSettings
self.image_version = None # Set from REDTabSettings
self.service_state = None # Set from REDTabSettings
self.brickd_conf = {}
self.cbox_brickd_ll.addItem('Error')
self.cbox_brickd_ll.addItem('Warn')
self.cbox_brickd_ll.addItem('Info')
self.cbox_brickd_ll.addItem('Debug')
self.cbox_brickd_rt.addItem('cpu')
self.cbox_brickd_rt.addItem('gpio')
self.cbox_brickd_rt.addItem('heartbeat')
self.cbox_brickd_rt.addItem('mmc')
self.cbox_brickd_rt.addItem('off')
self.cbox_brickd_rt.addItem('on')
self.cbox_brickd_gt.addItem('cpu')
self.cbox_brickd_gt.addItem('gpio')
self.cbox_brickd_gt.addItem('heartbeat')
self.cbox_brickd_gt.addItem('mmc')
self.cbox_brickd_gt.addItem('off')
self.cbox_brickd_gt.addItem('on')
# Signals/slots
self.pbutton_brickd_save.clicked.connect(self.slot_brickd_save_clicked)
self.pbutton_brickd_refresh.clicked.connect(self.slot_brickd_refresh_clicked)
self.sbox_brickd_la_ip1.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip2.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip3.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_la_ip4.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_lp.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_lwsp.valueChanged.connect(self.brickd_settings_changed)
self.ledit_brickd_secret.textEdited.connect(self.brickd_settings_changed)
self.cbox_brickd_ll.currentIndexChanged.connect(self.brickd_settings_changed)
self.cbox_brickd_rt.currentIndexChanged.connect(self.brickd_settings_changed)
self.cbox_brickd_gt.currentIndexChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_spi_dly.valueChanged.connect(self.brickd_settings_changed)
self.sbox_brickd_rs485_dly.valueChanged.connect(self.brickd_settings_changed)
|
def tab_on_focus(self):
self.brickd_conf_rfile = REDFile(self.session)
self.slot_brickd_refresh_clicked()
def tab_off_focus(self):
pass
def tab_destroy(self):
pass
def brickd_button_refr
|
esh_enabled(self, state):
self.pbutton_brickd_refresh.setEnabled(state)
if state:
self.pbutton_brickd_refresh.setText('Refresh')
else:
self.pbutton_brickd_refresh.setText('Refreshing...')
def brickd_button_save_enabled(self, state):
self.pbutton_brickd_save.setEnabled(state)
def update_brickd_widget_data(self):
if self.brickd_conf == None:
return
# Fill keys with default values if not available
if not 'listen.address' in self.brickd_conf:
self.brickd_conf['listen.address'] = '0.0.0.0'
if not 'listen.plain_port' in self.brickd_conf:
self.brickd_conf['listen.plain_port'] = '4223'
if not 'listen.websocket_port' in self.brickd_conf:
self.brickd_conf['listen.websocket_port'] = '0'
if not 'authentication.secret' in self.brickd_conf:
self.brickd_conf['authentication.secret'] = ''
if not 'log.level' in self.brickd_conf:
self.brickd_conf['log.level'] = 'info'
if not 'led_trigger.green' in self.brickd_conf:
self.brickd_conf['led_trigger.green'] = 'heartbeat'
if not 'led_trigger.red' in self.brickd_conf:
self.brickd_conf['led_trigger.red'] = 'off'
if not 'poll_delay.spi' in self.brickd_conf:
self.brickd_conf['poll_delay.spi'] = '50'
if not 'poll_delay.rs485' in self.brickd_conf:
self.brickd_conf['poll_delay.rs485'] = '4000'
l_addr = self.brickd_conf['listen.address'].split('.')
self.sbox_brickd_la_ip1.setValue(int(l_addr[0]))
self.sbox_brickd_la_ip2.setValue(int(l_addr[1]))
self.sbox_brickd_la_ip3.setValue(int(l_addr[2]))
self.sbox_brickd_la_ip4.setValue(int(l_addr[3]))
self.sbox_brickd_lp.setValue(int(self.brickd_conf['listen.plain_port']))
self.sbox_brickd_lwsp.setValue(int(self.brickd_conf['listen.websocket_port']))
self.ledit_brickd_secret.setText(self.brickd_conf['authentication.secret'])
log_level = self.brickd_conf['log.level']
if log_level == 'debug':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_DEBUG)
elif log_level == 'info':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_INFO)
elif log_level == 'warn':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_WARN)
elif log_level == 'error':
self.cbox_brickd_ll.setCurrentIndex(CBOX_BRICKD_LOG_LEVEL_ERROR)
trigger_green = self.brickd_conf['led_trigger.green']
if trigger_green == 'cpu':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_CPU)
elif trigger_green == 'gpio':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_GPIO)
elif trigger_green == 'heartbeat':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_HEARTBEAT)
elif trigger_green == 'mmc':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_MMC)
elif trigger_green == 'off':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_OFF)
elif trigger_green == 'on':
self.cbox_brickd_gt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_ON)
trigger_red = self.brickd_conf['led_trigger.red']
if trigger_red == 'cpu':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_CPU)
elif trigger_red == 'gpio':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_GPIO)
elif trigger_red == 'heartbeat':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_HEARTBEAT)
elif trigger_red == 'mmc':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_MMC)
elif trigger_red == 'off':
self.cbox_brickd_rt.setCurrentIndex(CBOX_BRICKD_LED_TRIGGER_OFF)
|
mayankjohri/LetsExplorePython
|
Section 2 - Advance Python/Chapter S2.04 - Database/code/sqlalchemy/runbook.py
|
Python
|
gpl-3.0
| 2,255 | 0.01155 |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 17 05:52:09 2016
@author: hclqaVirtualBox1
"""
from object_test import session
import random
import string
import model
test_page = model.Page()
N = 5
test_page.title = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))
test_page.content = u'Test content'
print(test_page.title)
session.add(test_page)
print("1 ----- TestPage ID")
print(test_page.id)
"""
At this point the test_page object is known to SQLAlchemy,
but not to the database. To send it to the database,
a flush operation can be forced:
"""
session.flush()
print("2 ----- TestPage ID")
print (test_page.id)
"""
Commits - Commits the changes in db
"""
session.commit()
"""
Delete - To delete the test_page object from the database you would use:
"""
session.delete(test_page)
session.flush()
print("3 ----- TestPage ID")
print(test_page.id)
"""
rollback - At this point you can either commit
the transaction or do a rollback.
Let’s do a rollback this time:
"""
session.rollback()
print("4 ----- TestPage ID")
print(test_page.id)
"""
Query - Queries are performed with query objects that are created from the
session. The simplest way to create and use a query object is like this:
"""
page_q = session.query(model.Page)
for page in page_q:
print(page.title)
print("---- page_q.all()")
print(page_q.all())
page = page_q.first()
print(page.title)
print(page_q[2:5])
print(page_q.get(1).title)
#
#
#"""
#Working with Objects
#-------------------
#Now let’s think about how you could add a comment to a page.
# One approach would be to insert a new row in the comment table using the
# SQL Expre
|
ssion API, ensuring that the pageid field contained the value 1
# so that the comment was associated with the correct page via a foreign key.
#
# The Object-Relational API provides a much better approach:
#"""
#
#comment1 = model.Comment()
#comment1.name= u'James'
#comment1.email = u'james@example.com'
#comment1.content = u'This page needs a bit more detail ;-)'
#comment2 = model.Comment()
#comment2.name = u'Mike'
#comment2.email = u'mike@example.com'
#page.comments.append(commen
|
t1)
#page.comments.append(comment2)
#session.commit()
|
shimniok/rockblock
|
mtrecv.py
|
Python
|
mit
| 576 | 0.006944 |
#!/usr/bin/env python
##################################################################################################
## mtrecv.py
##
## Receive message via RockBLOCK over serial
#################
|
#################################################################################
import sys
import os
from rbControl import RockBlockCon
|
trol
if __name__ == '__main__':
if len(sys.argv) == 1:
# TODO: configurable serial device
RockBlockControl("/dev/ttyUSB0").mt_recv()
else:
print "usage: %s" % os.path.basename(sys.argv[0])
exit(1)
|
cjaymes/pyscap
|
src/scap/model/xal_2_0/EndorsementLineCodeType.py
|
Python
|
gpl-3.0
| 987 | 0.002026 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from
|
scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class EndorsementLineCodeType(Model):
MODEL_MAP = {
|
'tag_name': 'EndorsementLineCode',
'attributes': {
'Type': {},
'Code': {}, # from grPostal
'*': {},
}
}
|
jtk1rk/xsubedit
|
view.py
|
Python
|
gpl-3.0
| 14,528 | 0.00351 |
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('GObject', '2.0')
from gi.repository import Gtk, Gdk, GObject
from gcustom.audioWidget import cAudioWidget
from gcustom.progressBar import cProgressBar
class TimedStatusBar(Gtk.Statusbar):
def __init__(self, timeout):
super(TimedStatusBar, self).__init__()
self.timeout = timeout
self.last_tag = None
def clear(self):
self.pop(0)
self.last_tag = None
def del_timer(self):
if self.last_tag:
GObject.source_remove(self.last_tag)
def set_timer(self):
self.del_timer()
self.last_tag = GObject.timeout_add(self.timeout, self.clear)
def output(self, msg):
self.del_timer()
self.clear()
self.push(0, msg)
self.set_timer()
class EventDrawingArea(Gtk.EventBox):
def __init__(self):
super(EventDrawingArea, self).__init__()
self.DrawingArea = Gtk.DrawingArea()
self.add(self.DrawingArea)
self.set_events(Gdk.EventMask.SCROLL_MASK | Gdk.EventMask.POINTER_MOTION_MASK)
self.show_all()
class View(Gtk.Window):
width = 1024
height = 768
audioViewSize = 0.8
subtitlesViewSize = 0.7
widgets = {}
def __init__(self, prog_title):
super(View, self).__init__(title = prog_title)
self.prog_title = prog_title
self.set_default_size(self.width, self.height)
black = Gdk.RGBA(0,0,0,1)
# Widgets
self.widgets['video'] = EventDrawingArea()
self.widgets['video'].override_background_color(0, black)
self.widgets['audio'] = cAudioWidget()
self.widgets['audio'].override_background_color(0, black)
self.widgets['subtitles'] = Gtk.TreeView()
self.widgets['video-eventbox'] = Gtk.EventBox()
self.widgets['scale'] = Gtk.HScale.new_with_range(0,100,1)
self.widgets['scale'].set_property('draw-value', False)
self.widgets['scale'].set_property('has-origin', False)
self.widgets['progress-bar'] = cProgressBar(height = 5)
#self.widgets['statusbar'] = TimedStatusBar(4000)
# Toolbar
self.widgets['toolbar'] = Gtk.Toolbar()
self.widgets['saveFileTB'] = Gtk.ToolButton()
self.widgets['saveFileTB'].set_tooltip_text('Save Project')
self.widgets['saveFileTB'].set_stock_id(Gtk.STOCK_SAVE)
self.widgets['newFileTB'] = Gtk.ToolButton()
self.widgets['newFileTB'].set_tooltip_text('Create Project')
self.widgets['newFileTB'].set_stock_id(Gtk.STOCK_NEW)
self.widgets['openFileTB'] = Gtk.ToolButton()
self.widgets['openFileTB'].set_tooltip_text('Open Project')
self.widgets['openFileTB'].set_stock_id(Gtk.STOCK_OPEN)
self.widgets['separator1TB'] = G
|
tk.SeparatorToolItem()
self.widgets['undoTB'] = Gtk.ToolButton()
self
|
.widgets['undoTB'].set_tooltip_text('Undo')
self.widgets['undoTB'].set_stock_id(Gtk.STOCK_UNDO)
self.widgets['redoTB'] = Gtk.ToolButton()
self.widgets['redoTB'].set_tooltip_text('Redo')
self.widgets['redoTB'].set_stock_id(Gtk.STOCK_REDO)
self.widgets['preferencesTB'] = Gtk.ToolButton()
self.widgets['preferencesTB'].set_tooltip_text('Preferences')
self.widgets['preferencesTB'].set_stock_id(Gtk.STOCK_PROPERTIES)
self.widgets['importSRTTB'] = Gtk.ToolButton()
self.widgets['importSRTTB'].set_tooltip_text('Import/Merge Subtitles')
self.widgets['importSRTTB'].set_stock_id(Gtk.STOCK_ADD)
self.widgets['splitSubsTB'] = Gtk.ToolButton()
self.widgets['splitSubsTB'].set_tooltip_text('Split Subtitle')
self.widgets['splitSubsTB'].set_stock_id(Gtk.STOCK_CUT)
self.widgets['visualSyncTB'] = Gtk.ToolButton()
self.widgets['visualSyncTB'].set_tooltip_text('Visual Sync')
self.widgets['visualSyncTB'].set_stock_id(Gtk.STOCK_REFRESH)
self.widgets['autoSyncOtherVersionTB'] = Gtk.ToolButton()
self.widgets['autoSyncOtherVersionTB'].set_tooltip_text('Try to automatically sync another version.')
self.widgets['autoSyncOtherVersionTB'].set_stock_id(Gtk.STOCK_UNINDENT)
self.widgets['checkTB'] = Gtk.ToolButton()
self.widgets['checkTB'].set_tooltip_text('Check Subtitles')
self.widgets['checkTB'].set_stock_id(Gtk.STOCK_SPELL_CHECK)
self.widgets['separator2TB'] = Gtk.SeparatorToolItem()
self.widgets['separator3TB'] = Gtk.SeparatorToolItem()
self.widgets['separator4TB'] = Gtk.SeparatorToolItem()
self.widgets['position-label'] = Gtk.Label('Position: 00:00:00,000 ')
self.widgets['duration-label'] = Gtk.Label('Duration: 00:00:00,000\t\t')
self.widgets['MergeSplitTB'] = Gtk.ToolButton()
self.widgets['MergeSplitTB'].set_tooltip_text('Merge/Split (non project subs)')
self.widgets['MergeSplitTB'].set_stock_id(Gtk.STOCK_PAGE_SETUP)
self.widgets['toolbar'].add(self.widgets['newFileTB'])
self.widgets['toolbar'].add(self.widgets['openFileTB'])
self.widgets['toolbar'].add(self.widgets['saveFileTB'])
self.widgets['toolbar'].add(self.widgets['separator1TB'])
self.widgets['toolbar'].add(self.widgets['preferencesTB'])
self.widgets['toolbar'].add(self.widgets['separator2TB'])
self.widgets['toolbar'].add(self.widgets['undoTB'])
self.widgets['toolbar'].add(self.widgets['redoTB'])
self.widgets['toolbar'].add(self.widgets['separator3TB'])
self.widgets['toolbar'].add(self.widgets['importSRTTB'])
self.widgets['toolbar'].add(self.widgets['splitSubsTB'])
#self.widgets['toolbar'].add(self.widgets['autoSyncOtherVersionTB'])
self.widgets['toolbar'].add(self.widgets['checkTB'])
self.widgets['toolbar'].add(self.widgets['visualSyncTB'])
self.widgets['toolbar'].add(self.widgets['separator4TB'])
self.widgets['toolbar'].add(self.widgets['MergeSplitTB'])
# AudioView Context Menu
self.widgets['AudioContextMenu'] = Gtk.Menu()
self.widgets['ACM-SplitHere'] = Gtk.MenuItem('Split Subtitle')
self.widgets['ACM-CreateHere'] = Gtk.MenuItem('New Subtitle')
self.widgets['ACM-DeleteSub'] = Gtk.MenuItem('Delete Subtitle')
self.widgets['ACM-ResetAudioScale'] = Gtk.MenuItem('Reset Vertical Zoom')
self.widgets['ACM-StickZoom'] = Gtk.CheckMenuItem('Stick Zoom')
self.widgets['ACM-StickZoom'].set_tooltip_text('Keep current horizontal zoom size')
self.widgets['AudioContextMenu'].add(self.widgets['ACM-CreateHere'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-SplitHere'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-DeleteSub'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-ResetAudioScale'])
self.widgets['AudioContextMenu'].add(self.widgets['ACM-StickZoom'])
self.widgets['ACM-StickZoom'].show()
self.widgets['ACM-SplitHere'].show()
self.widgets['ACM-CreateHere'].show()
self.widgets['ACM-DeleteSub'].show()
# Header Context Menu
self.widgets['HeaderContextMenu'] = Gtk.Menu()
self.widgets['HCM-N'] = Gtk.CheckMenuItem('N')
self.widgets['HCM-StartTime'] = Gtk.CheckMenuItem('StartTime')
self.widgets['HCM-StopTime'] = Gtk.CheckMenuItem('StopTime')
self.widgets['HCM-Duration'] = Gtk.CheckMenuItem('Duration')
self.widgets['HCM-Reference'] = Gtk.CheckMenuItem('Reference')
self.widgets['HCM-RS'] = Gtk.CheckMenuItem('RS')
self.widgets['HCM-Count'] = Gtk.CheckMenuItem('Count')
self.widgets['HCM-Info'] = Gtk.CheckMenuItem('Info')
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-N'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-StartTime'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-StopTime'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Duration'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-Reference'])
self.widgets['HeaderContextMenu'].add(self.widgets['HCM-RS'])
|
thinkAmi-sandbox/Django_separate_model_file-sample
|
myproject/settings.py
|
Python
|
unlicense
| 3,216 | 0.001244 |
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'le0az@o@j&x@5gl01_fp6&rj445lmxj15ngt2x^x#$ng71)^yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application
|
definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.myapp',
'apps.outsideapp',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMidd
|
leware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
emehrkay/Gizmo
|
gizmo/test/integration/tinkerpop.py
|
Python
|
mit
| 1,104 | 0.001812 |
import asyncio
import unittest
import random
from gremlinpy import Gremlin
from . import ConnectionTestCases, EntityTestCases, Mapper
|
TestCases
from gizmo import Mapper, Request, Collection, Vertex, Edge
from gizmo.mapper import EntityMapper
class BaseTests(unittest.TestCase):
def setUp(self):
self.request = Request('localhost', port=8182)
self.gremlin = Gremlin('gizmo_testing')
self.mapper = Mapper(self.request, self.gremlin)
self.ioloop = asyncio.get
|
_event_loop()
super(BaseTests, self).setUp()
def tearDown(self):
super(BaseTests, self).tearDown()
async def purge(self):
script = "%s.V().map{it.get().remove()}" % self.gremlin.gv
res = await self.mapper.query(script=script)
return res
class ConnectionTests(BaseTests, ConnectionTestCases):
pass
class EntityTests(EntityTestCases, BaseTests):
pass
class MapperTests(MapperTestCases, BaseTests):
pass
class CollectionTests(BaseTests):
pass
class TraversalTests(BaseTests):
pass
if __name__ == '__main__':
unittest.main()
|
vivescere/yajuu
|
yajuu/extractors/season_extractor.py
|
Python
|
gpl-3.0
| 1,048 | 0.000954 |
from yajuu.extractors.extractor import Extractor
from yajuu.media.sources.source_list import SourceList
class SeasonExtractor(Extractor):
|
def __init__(self, media, season, range_):
super().__init__(media)
self.seasons = {}
self.season = season
self.start, self.end = range_
# Overwrite
self.sources = {}
def _should_process(self, episode_identifier):
try:
episode_number = int(episode_identifier)
except ValueError:
return False
return self.start <= episode_number <= self.end
def _add
|
_source(self, identifier, source):
if identifier not in self.sources:
self.sources[identifier] = SourceList()
self.sources[identifier].add_source(source)
return True
def _add_sources(self, identifier, sources):
returned = []
if sources is None:
return
for source in sources:
returned.append(self._add_source(identifier, source))
return returned
|
UNINETT/nav
|
tests/unittests/smsd/dispatcher_test.py
|
Python
|
gpl-2.0
| 2,660 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 3 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Unit tests for the dispatcher module."""
from __future__ import print_function
import types
import pytest
from nav.smsd import dispatcher
class TestDispatcherHandler(object):
"""Tests for the DispatcherHandler class.
Uses a subclass of the DispatcherHandler to provide a fake
dispatcher loader function. This loads a faked dispatcher
module/class that will cooperate with this unit test.
"""
config = {
'main': {'exit_on_permanent_error': 'yes'},
'dispatcher': {'dispatcherretry': '30',
'dispatcher1': 'FakeDispatcher'},
'FakeDispatcher': {}
}
def test_init_with_simple_config(self):
assert FakeDispatcherHandler(self.config)
def test_empty_message_list(self):
handler = FakeDispatcherHandler(self.config)
assert handler.sendsms('fakenumber', [])
def test_dispatcher_exception(self):
handler = FakeDispatcherHandler(self.config)
with pytest.raises(dispatcher.DispatcherError):
handler.sendsms('failure', [])
def test_dispatcher_unhandled_exception(self):
handler = FakeDispatcherHandler(self.config)
with pytest.raises(dispatcher.DispatcherError):
handler.sendsms('unhandled', [])
class FakeDispatcherHandler(dispatcher.DispatcherHandler):
def importbyname(self, name):
print("import by name: %r" % name)
fakemodule = types.Modul
|
eType('fakedispatcher')
fakemodule.FakeDispatcher = FakeDispatcher
return fakemodule
class FakeDispatcher(object):
def __init__(self, *args, **kwargs):
self.lastfailed = None
pass
def sendsms(self, phone, msgs):
print("got phone %r and msgs %r" % (phone, msgs))
|
if phone == 'failure':
raise dispatcher.DispatcherError('FakeDispatcher failed')
elif phone == 'unhandled':
raise Exception('This exception should be unknown')
return (None, 1, 0, 1, 1)
|
scikit-rf/scikit-rf
|
skrf/media/tests/test_media.py
|
Python
|
bsd-3-clause
| 11,278 | 0.011601 |
# -*- coding: utf-8 -*-
import unittest
import os
import numpy as npy
from skrf.media import DefinedGammaZ0, Media
from skrf.network import Network
from skrf.frequency import Frequency
import skrf
class DefinedGammaZ0TestCase(unittest.TestCase):
def setUp(self):
self.files_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'qucs_prj'
)
self.dummy_media = DefinedGammaZ0(
frequency = Frequency(1,100,21,'ghz'),
gamma=1j,
z0 = 50 ,
)
def test_impedance_mismatch(self):
"""
"""
fname = os.path.join(self.files_dir,\
'impedanceMismatch,50to25.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.thru(z0=50)**\
self.dummy_media.thru(z0=25)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_resistor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'resistor,1ohm.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.resistor(1)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_capacitor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'capacitor,p01pF.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.capacitor(.01e-12)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_inductor(self):
"""
"""
fname = os.path.join(self.files_dir,\
'inductor,p1nH.s2p')
qucs_ntwk = Network(fname)
self.dummy_media.frequency = qucs_ntwk.frequency
skrf_ntwk = self.dummy_media.inductor(.1e-9)
self.assertEqual(qucs_ntwk, skrf_ntwk)
def test_scalar_gamma_z0_media(self):
"""
test ability to create a Media from scalar quantities for gamma/z0
and change frequency resolution
"""
a = DefinedGammaZ0 (Frequency(1,10,101),gamma=1j,z0 = 50)
self.assertEqual(a.line(1),a.line(1))
# we should be able to re-sample the media
a.npoints = 21
self.assertEqual(len(a.gamma), len(a))
self.assertEqual(len(a.z0), len(a))
self.assertEqual(len(a.z0), len(a))
def test_vector_gamma_z0_media(self):
"""
test ability to create a Media from vector quantities for gamma/z0
"""
freq = Frequency(1,10,101)
a = DefinedGammaZ0(freq,
gamma = 1j*npy.ones(len(freq)) ,
z0 = 50*npy.ones(len(freq)),
)
self.assertEqual(a.line(1),a.line(1))
with self.assertRaises(NotImplementedError):
a.npoints=4
def test_write_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
os.remove(fname)
def test_from_csv(self):
fname = os.path.join(self.files_dir,\
'out.csv')
self.dummy_media.write_csv(fname)
a_media = DefinedGammaZ0.from_csv(fname)
self.assertEqual(a_media,self.dummy_media)
os.remove(fname)
class STwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that S parameters of media base elements versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21, 'GHz'),
gamma=1j,
z0=50,
)
def test_s_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have S matrix of the form:
[ Z/Z0 / (Z/Z0 + 2) 2/(Z/Z0 + 2) ]
[ 2/(Z/Z0 + 2) Z/Z0 / (Z/Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
Z0 = self.dummy_media.z0
S11 = (R/Z0) / (R/Z0 + 2)
S21 = 2 / (R/Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have S matrix of the form:
[ -Y Z0 / (Y Z0 + 2) 2/(Y Z0 + 2) ]
[ 2/(Y Z0 + 2) Z/Z0 / (Y Z0 + 2) ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.short())
Z0 = self.dummy_media.z0
S11 = -(1/R*Z0) / (1/R*Z0 + 2)
S21 = 2 / (1/R*Z0 + 2)
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossless_line(self):
"""
Lossless transmission line of characteristic impedance z1, length l
and wavenumber beta
_______
○----- -----○
z0 z1 z0
○-----_______-----○
"""
l = 5.0
z1 = 30.0
z0 = self.dummy_media.z0
ntw = self.dummy_media.line(d=0, unit='m', z0=z0) \
** self.dummy_media.line(d=l, unit='m', z0=z1) \
** self.dummy_media.line(d=0, unit='m', z0=z0)
beta = self.dummy_media.beta
_z1 = z1/z0
S11 = 1j*(_z1**2 - 1)*npy.sin(beta*l) / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
S21 = 2*_z1 / \
(2*_z1*npy.cos(beta*l) + 1j*(_z1**2 + 1)*npy.sin(beta*l))
npy.testing.assert_array_almost_equal(ntw.s[:,0,0], S11)
npy.testing.assert_array_almost_equal(ntw.s[:,0,1], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,0], S21)
npy.testing.assert_array_almost_equal(ntw.s[:,1,1], S11)
def test_s_lossy_line(self):
"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""
class ABCDTwoPortsNetworkTestCase(unittest.TestCase):
"""
Check that ABCD parameters of media base elements (such as lumped elements)
versus theoretical results.
"""
def setUp(self):
self.dummy_media = DefinedGammaZ0(
frequency=Frequency(1, 100, 21,'GHz'),
gamma=1j,
z0=50 ,
)
def test_abcd_series_element(self):
"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have ABCD matrix of the form:
[ 1 Z ]
[ 0 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.resistor(R)
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_shunt_element(self):
"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have ABCD matrix of
|
the form:
[ 1 0 ]
[ Y 1 ]
"""
R = 1.0 # Ohm
ntw = self.dummy_media.shunt(self.dummy_media.resistor(R)**self.dummy_media.sho
|
rt())
npy.testing.assert_array_almost_equal(ntw.a[:,0,0], 1.0)
npy.testing.assert_array_almost_equal(ntw.a[:,0,1], 0.0)
npy.testing.assert_array_almost_equal(ntw.a[:,1,0], 1.0/R)
npy.testing.assert_array_almost_equal(ntw.a[:,1,1], 1.0)
def test_abcd_series_shunt_elements(se
|
google/grr
|
grr/server/grr_response_server/gui/api_call_router_test.py
|
Python
|
apache-2.0
| 4,408 | 0.006352 |
#!/usr/bin/env python
"""Tests for API call routers."""
from absl import app
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_proto import tests_pb2
from grr_response_server import access_control
from grr_response_server.gui import api_call_router
from grr.test_lib import test_lib
class SingleMethodDummyApiCallRouter(api_call_router.ApiCallRouter):
"""Dummy ApiCallRouter implementation overriding just a single method."""
@api_call_router.Http("GET", "/api/foo/bar")
def SomeRandomMethod(self, args, context=None):
pass
|
def CreateFlow(self, args, context=None):
pass
class SingleMethodDummyApiCallRouterChild(SingleMethodDummyApiCallRouter):
pass
class EmptyRouter(api_call_router.ApiCallRouterStub):
pass
class ApiCallRouterTest(test_lib.GRRBaseTest):
"""Tests for ApiCallRouter."""
def testAllAnnotatedMethodsAreNotImplemented(self):
# We can't initialize ApiCallRouter directly because it's abstract.
router = EmptyRouter()
for name in api_call_router.ApiCallRouter.GetAnnotatedMethods():
|
with self.assertRaises(NotImplementedError):
getattr(router, name)(None, context=None)
def testGetAnnotatedMethodsReturnsNonEmptyDict(self):
methods = api_call_router.ApiCallRouterStub.GetAnnotatedMethods()
self.assertTrue(methods)
def testGetAnnotatedMethodsReturnsMethodsFromAllClassesInMroChain(self):
self.assertIn("SomeRandomMethod",
SingleMethodDummyApiCallRouter.GetAnnotatedMethods())
self.assertIn("SomeRandomMethod",
SingleMethodDummyApiCallRouterChild.GetAnnotatedMethods())
def testHttpUrlParametersMatchArgs(self):
"""Tests that URL params are actual fields of ArgsType in HTTP routes."""
# Example:
# @ArgsType(api_client.ApiGetClientArgs)
# @Http("GET", "/api/clients/<client_id>")
methods = api_call_router.ApiCallRouterStub.GetAnnotatedMethods()
for method in methods.values():
if method.args_type is None:
continue # Skip methods like ListOutputPluginDescriptors.
valid_parameters = method.args_type.type_infos.descriptor_names
for name in method.GetQueryParamsNames():
self.assertIn(
name, valid_parameters,
"Parameter {} in route {} is not found in {}. "
"Valid parameters are {}.".format(
name, method.name, compatibility.GetName(method.args_type),
valid_parameters))
def testRouterMethodNamesAreInLengthLimit(self):
for name in api_call_router.ApiCallRouterStub.GetAnnotatedMethods():
self.assertLessEqual(
len(name), 128,
"Router method name {} exceeds MySQL length limit of 128.".format(
name))
class DisabledApiCallRouterTest(test_lib.GRRBaseTest):
"""Tests for ApiCallRouter."""
def testRaisesUnauthorizedAccess(self):
router = api_call_router.DisabledApiCallRouter()
with self.assertRaises(access_control.UnauthorizedAccess):
router.SearchClients(None)
class ApiSingleStringArgument(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.ApiSingleStringArgument
class RouterMethodMetadataTest(test_lib.GRRBaseTest):
"""Tests for RouterMethodMetadata."""
def testGetQueryParamsNamesReturnsEmptyListsOnEmptyMetadata(self):
m = api_call_router.RouterMethodMetadata("SomeMethod")
self.assertEqual(m.GetQueryParamsNames(), [])
def testGetQueryParamsNamesReturnsMandaotryParamsCorrectly(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod", http_methods=[("GET", "/a/<arg>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["arg", "zoo"])
def testGetQueryParamsNamesReturnsOptionalParamsForGET(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod",
args_type=ApiSingleStringArgument,
http_methods=[("GET", "/a/<foo>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["foo", "zoo", "arg"])
def testGetQueryParamsNamesReturnsNoOptionalParamsForPOST(self):
m = api_call_router.RouterMethodMetadata(
"SomeMethod",
args_type=ApiSingleStringArgument,
http_methods=[("POST", "/a/<foo>/<bar:zoo>", {})])
self.assertEqual(m.GetQueryParamsNames(), ["foo", "zoo"])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
tomprince/txgithub
|
txgithub/scripts/tests/test_gist.py
|
Python
|
mit
| 8,506 | 0 |
"""
Tests for L{txgithub.scripts.gist}
"""
import io
from collections import namedtuple
from twisted.python import usage
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.defer import Deferred, succeed
from txgithub.scripts import gist
from . _options import (_OptionsTestCaseMixin,
_FakeOptionsTestCaseMixin,
_FakePrintTestCaseMixin,
_FakeSystemExitTestCaseMixin,
_SystemExit)
class OptionsTestCase(_OptionsTestCaseMixin):
"""
Tests for L{gist.Options}
"""
files = ('files',)
required_args = files
options_factory = gist.Options
def test_single_file_ok(self):
"""
Files is an argument.
"""
self.config.parseOptions(self.files)
self.assertEqual(self.config['files'], self.files)
def test_files_ok(self):
"""
Multiple files are collected.
"""
self.config.parseOptions(["file1", "file2"])
self.assertEqual(self.config['files'], ("file1", "file2"))
def test_token_ok(self):
"""
--token is an option.
"""
token = 'some token'
self.assert_option(['--token=' + token], 'token', token)
def test_t_ok(self):
"""
-t is short for --token
"""
token = 'some token'
self.assert_option(['-t', token], 'token', token)
class RecordsFakeGistsEndpoint(object):
"""
Records and orchestrates L{FakeGistsEndpoint}.
"""
def __init__(self):
self.create_calls = []
self.create_returns = Deferred()
class FakeGistsEndpoint(object):
"""
A fake implementation of L{txgithub.api.GithubApi} that records
calls.
"""
def __init__(self, recorder):
self._recorder = recorder
def create(self, files):
self._recorder.create_calls.append(files)
return self._recorder.create_returns
class RecordsFakeGithubAPI(object):
"""
Records and orchestrates L{FakeGithubAPI}.
"""
def __init__(self):
self.init_calls = []
class FakeGithubAPI(object):
"""
A fake implementation of L{txgithub.api.GithubApi} that records
calls.
"""
def __init__(self, recorder, gists):
self._recorder = recorder
self.gists = gists
def _init(self, token):
self._recorder.init_calls.append(token)
return self
class PostGistTests(SynchronousTestCase):
"""
Tests for L{gist.postGist}.
"""
def setUp(self):
self.token = "token"
self.getToken_call_count = 0
self.getToken_returns = succeed(self.token)
self.gists_recorder = RecordsFakeGistsEndpoi
|
nt()
self.gists = FakeGistsEndpoint(self.gists_recorder)
self.api_recorder = RecordsFakeGithubAPI()
s
|
elf.fake_api = FakeGithubAPI(self.api_recorder, self.gists)
self.content = u"content"
self.stdin = io.StringIO(self.content)
self.open_calls = []
self.open_returns = io.StringIO(self.content)
self.print_calls = []
self.patch(gist, "getToken", self.fake_getToken)
self.patch(gist, "GithubApi", self.fake_api._init)
self.patch(gist, "_open", self.fake_open)
self.patch(gist, "stdin", self.stdin)
self.patch(gist, "_print", self.fake_print)
def fake_getToken(self):
"""
A fake get token implementation that records its calls.
"""
self.getToken_call_count += 1
return self.getToken_returns
def fake_open(self, filename):
"""
A fake L{open} that records its calls.
"""
self.open_calls.append(filename)
return self.open_returns
def fake_print(self, *args):
"""
A fake L{print} that records its calls.
"""
self.print_calls.append(args)
def test_getToken_by_default(self):
"""
When no token is provided, the get token implementation is
called to retrieve one.
"""
gist.postGist("reactor", token="", files=["something"])
self.assertEqual(self.getToken_call_count, 1)
self.assertEqual(self.api_recorder.init_calls, [self.token])
def test_token_used(self):
"""
The provided token is used to connect to GitHub.
"""
token = "my token"
gist.postGist("reactor", token=token, files=["something"])
self.assertEqual(self.getToken_call_count, 0)
self.assertEqual(self.api_recorder.init_calls, [token])
def test_stdin_gist(self):
"""
When no files are provided, the gist is read from stdin.
"""
gist.postGist("reactor", token=self.token, files=())
self.assertEqual(self.gists_recorder.create_calls, [
{
"gistfile1": {
"content": self.content,
},
}
])
self.assertEqual(self.stdin.tell(), len(self.content))
def test_files_used(self):
"""
The filenames provided are read and comprise the gist's content.
"""
filename = "some file"
gist.postGist("reactor", token=self.token, files=[filename])
self.assertEqual(self.open_calls, [filename])
self.assertTrue(self.open_returns.closed)
self.assertEqual(self.gists_recorder.create_calls, [
{
"some file": {
"content": self.content,
},
}
])
def test_response_printed(self):
"""
The URL in the API's response is printed.
"""
url = "https://something"
response = gist.postGist("reactor", token=self.token, files=[])
self.gists_recorder.create_returns.callback(
{
"html_url": url,
}
)
self.successResultOf(response)
self.assertEqual(self.print_calls, [(url,)])
_PostGistCall = namedtuple("_PostGistCall",
["reactor", "token", "files"])
class RunTests(_FakeOptionsTestCaseMixin,
_FakeSystemExitTestCaseMixin,
_FakePrintTestCaseMixin):
"""
Tests for L{txgithub.scripts.gist.run}
"""
def setUp(self):
super(RunTests, self).setUp()
self.postGist_calls = []
self.postGist_returns = "postGist return value"
self.patch(gist, "Options", lambda: self.options)
self.patch(gist, "_print", self.fake_print)
self.patch(gist, "exit", self.fake_exit)
self.patch(gist, "postGist", self.fake_postGist)
def fake_postGist(self, reactor, token, files):
"""
A fake L{gist.postGist} implementation that records its calls.
"""
self.postGist_calls.append(_PostGistCall(reactor, token, files))
return self.postGist_returns
def test_run_usage_error(self):
"""
A usage error results in a help message and an exit code of 1.
"""
errortext = "error text"
first_line = ': '.join([self.argv0, errortext])
self.options_recorder.parseOptions_raises = usage.UsageError(errortext)
self.assertRaises(_SystemExit,
gist.run, "reactor", self.argv0, "bad args")
self.assertEqual(self.options_recorder.parseOptions_calls,
[("bad args",)])
self.assertEqual(len(self.print_calls), 2)
self.assertEqual(self.print_calls[0], (first_line,))
self.assertIn("--help", self.print_calls[1][0])
self.assertEqual(len(self.exit_calls), 1)
[code] = self.exit_calls
self.assertEqual(code, 1)
self.assertNot(self.postGist_calls)
def test_run_ok(self):
"""
The post gist implementation is called with the options
specified on the command line.
"""
reactor = "reactor"
self.options["token"] = "the token"
self.options["files"] = ("file1",)
result = gist.run(reactor, self.argv0, "good args")
self.assertEqual(self.options_recorder.parseOptions_calls,
[("
|
heidi666/WorldsAtWar
|
wawmembers/apps.py
|
Python
|
mit
| 136 | 0 |
from __future__ import unicode_literals
from django.apps imp
|
ort AppConfig
class WawmembersConfig(AppConfig):
name = 'wawmembers
|
'
|
ajdawson/eofs
|
doc/conf.py
|
Python
|
gpl-3.0
| 9,234 | 0.00574 |
# -*- coding: utf-8 -*-
#
# eofs documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 5 15:47:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot
|
_directive',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
|
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'eofs'
copyright = '2013-{} Andrew Dawson'.format(time.localtime().tm_year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import eofs
version = eofs.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- extlinks configuration ----------------------------------------------------
# Allow e.g. :issue:`42` and :pr:`42` roles:
extlinks = {'issue': ('https://github.com/ajdawson/eofs/issues/%s', '#'),
'pr': ('https://github.com/ajdawson/eofs/pull/%s', '#')}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx13'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['sidebar_toc.html',
'relations.html',
'sourcelink.html',
'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'eofsdoc'
# Options for intersphinx.
intersphinx_mapping = {
'eof2': ('http://ajdawson.github.com/eof2', None),
'iris': ('http://scitools.org.uk/iris/docs/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'xarray': ('http://xarray.pydata.org/en/stable', None),
'dask': ('https://docs.dask.org/en/latest', None),
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': """\\usepackage{amssymb}
\\usepackage{amsmath}""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('userguide/index', 'userguide.tex', 'eofs User Guide', 'Andrew Dawson',
'manual'),
('examples/index', 'examples.tex', 'eofs Examples', 'Andrew Dawson',
'manual'),
('api/index', 'api.tex', 'eofs API Reference', 'Andrew Dawson',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# On
|
bks/veusz
|
veusz/utils/formatting.py
|
Python
|
gpl-2.0
| 7,914 | 0.002654 |
# Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from __future__ import division
import re
import math
import numpy as N
from . import d
|
ates
_formaterror = 'FormatError'
# a format statement in a string
_format_re = re.compile(r'%([-#0-9 +.hlL]*?)([diouxXeEfFgGcrs%])')
def localeFormat(totfmt, args, locale=None):
"""Format using fmt statement fmt, qt QLocale object locale and
arguments to formatting args.
* arguments are not supported in this formatting, nor is using
a dict to supply v
|
alues for statement
"""
# substitute all format statements with string format statements
newfmt = _format_re.sub("%s", totfmt)
# do formatting separately for all statements
strings = []
i = 0
for f in _format_re.finditer(totfmt):
code = f.group(2)
if code == '%':
s = '%'
else:
try:
s = f.group() % args[i]
i += 1
except IndexError:
raise TypeError("Not enough arguments for format string")
s = s.replace('-', u'\u2212')
if locale is not None and code in 'eEfFgG':
s = s.replace('.', locale.decimalPoint())
strings.append(s)
if i != len(args):
raise TypeError("Not all arguments converted during string formatting")
return newfmt % tuple(strings)
def sciToHuman(val, cleanup=False):
"""Convert output from C formatting to human scientific notation.
if cleanup, remove zeros after decimal points
"""
# split around the exponent
leader, exponent = val.split('e')
# strip off trailing decimal point and zeros if no format args
if cleanup and leader.find('.') >= 0:
leader = leader.rstrip('0').rstrip('.')
# trim off leading 1
if leader == '1' and cleanup:
leader = ''
else:
# add multiply sign
leader += u'\u00d7'
return '%s10^{%i}' % (leader, int(exponent))
def formatSciNotation(num, formatargs, locale=None):
"""Format number into form X \times 10^{Y}.
This function trims trailing zeros and decimal point unless a formatting
argument is supplied
This is similar to the %e format string
formatargs is the standard argument in a format string to control the
number of decimal places, etc.
locale is a QLocale object
"""
# handle nan, inf, -inf
if not N.isfinite(num):
return str(num)
# create an initial formatting string
if formatargs:
formatstr = '%' + formatargs + 'e'
else:
formatstr = '%.10e'
# do formatting, catching errors
try:
text = formatstr % num
except:
return _formaterror
text = sciToHuman(text, cleanup=formatargs=='')
# do substitution of decimals
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
def formatGeneral(num, fmtarg, locale=None):
"""General formatting which switches from normal to scientic
notation."""
if fmtarg:
# if an argument is given, we convert output
try:
retn = ('%'+fmtarg+'g') % num
except ValueError:
retn = _formaterror
if retn.find('e') >= 0:
# in scientific notation, so convert
retn = sciToHuman(retn, cleanup=False)
else:
a = abs(num)
# manually choose when to switch from normal to scientific
# as the default %g isn't very good
if a >= 1e4 or (a < 1e-2 and a > 1e-110):
retn = formatSciNotation(num, fmtarg, locale=locale)
else:
retn = '%.10g' % num
if locale is not None:
# replace decimal point with correct decimal point
retn = retn.replace('.', locale.decimalPoint())
return retn
engsuffixes = ( 'y', 'z', 'a', 'f', 'p', 'n',
u'\u03bc', 'm', '', 'k', 'M', 'G',
'T', 'P', 'E', 'Z', 'Y' )
def formatEngineering(num, fmtarg, locale=None):
"""Engineering suffix format notation using SI suffixes."""
if num != 0.:
logindex = math.log10( abs(num) ) / 3.
# for numbers < 1 round down suffix
if logindex < 0. and (int(logindex)-logindex) > 1e-6:
logindex -= 1
# make sure we don't go out of bounds
logindex = min( max(logindex, -8),
len(engsuffixes) - 9 )
suffix = engsuffixes[ int(logindex) + 8 ]
val = num / 10**( int(logindex) *3)
else:
suffix = ''
val = num
text = ('%' + fmtarg + 'g%s') % (val, suffix)
if locale is not None:
text = text.replace('.', locale.decimalPoint())
return text
# catch general veusz formatting expression
_formatRE = re.compile(r'%([-0-9.+# ]*)(VDVS|VD.|V.|[A-Za-z%])')
def formatNumber(num, formatstr, locale=None):
""" Format a number in different ways.
formatstr is a standard C format string, with some additions:
%Ve scientific notation X \times 10^{Y}
%Vg switches from normal notation to scientific outside 10^-2 to 10^4
%VE engineering suffix option
%VDx date formatting, where x is one of the arguments in
http://docs.python.org/lib/module-time.html in the function
strftime
"""
outitems = []
while formatstr:
# repeatedly try to do string format
match = _formatRE.search(formatstr)
if not match:
outitems.append(formatstr)
break
# argument and type of formatting
farg, ftype = match.groups()
# special veusz formatting
if ftype[:1] == 'V':
# special veusz formatting
if ftype == 'Ve':
out = formatSciNotation(num, farg, locale=locale)
elif ftype == 'Vg':
out = formatGeneral(num, farg, locale=locale)
elif ftype == 'VE':
out = formatEngineering(num, farg, locale=locale)
elif ftype[:2] == 'VD':
d = dates.floatToDateTime(num)
# date formatting (seconds since start of epoch)
if ftype[:4] == 'VDVS':
# special seconds operator
out = ('%'+ftype[4:]+'g') % (d.second+d.microsecond*1e-6)
else:
# use date formatting
try:
out = d.strftime(str('%'+ftype[2:]))
except ValueError:
out = _formaterror
else:
out = _formaterror
# replace hyphen with true minus sign
out = out.replace('-', u'\u2212')
elif ftype == '%':
out = '%'
else:
# standard C formatting
try:
out = localeFormat('%' + farg + ftype, (num,), locale=locale)
except:
out = _formaterror
outitems.append(formatstr[:match.start()])
outitems.append(out)
formatstr = formatstr[match.end():]
return ''.join(outitems)
|
googleapis/python-area120-tables
|
samples/generated_samples/area120tables_v1alpha1_generated_tables_service_list_tables_async.py
|
Python
|
apache-2.0
| 1,496 | 0.000668 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/license
|
s/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListTables
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require mod
|
ifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-area120-tables
# [START area120tables_v1alpha1_generated_TablesService_ListTables_async]
from google.area120 import tables_v1alpha1
async def sample_list_tables():
# Create a client
client = tables_v1alpha1.TablesServiceAsyncClient()
# Initialize request argument(s)
request = tables_v1alpha1.ListTablesRequest(
)
# Make the request
page_result = client.list_tables(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END area120tables_v1alpha1_generated_TablesService_ListTables_async]
|
c2nes/javalang
|
javalang/test/test_java_8_syntax.py
|
Python
|
mit
| 8,702 | 0.000115 |
import unittest
from pkg_resources import resource_string
from .. import parse, parser, tree
def setup_java_class(content_to_add):
""" returns an example java class with the
given content_to_add contained within a method.
"""
template = """
public class Lambda {
public static void main(String args[]) {
%s
}
}
"""
return template % content_to_add
def filter_type_in_method(clazz, the_type, method_name):
""" yields the result of filtering the given class for the given
type inside the given method identified by its name.
"""
for path, node in clazz.filter(the_type):
for p in reversed(path):
if isinstance(p, tree.MethodDeclaration):
if p.name == method_name:
yield path, node
class LambdaSupportTest(unittest.TestCase):
""" Contains tests for java 8 lambda syntax. """
def assert_contains_lambda_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given tree contains a method with the supplied
method name containing a lambda expression.
"""
matches = list(filter_type_in_method(
clazz, tree.LambdaExpression, method_name))
if not matches:
self.fail('No matching lambda expression found.')
return matches
def test_lambda_support_no_parameters_no_body(self):
""" tests support for lambda with no parameters and no body. """
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("() -> {};")))
def test_lambda_support_no_parameters_expression_body(self):
""" tests support for lambda with no parameters and an
expression body.
"""
test_classes = [
setup_java_class("() -> 3;"),
setup_java_class("() -> null;"),
setup_java_class("() -> { return 21; };"),
setup_java_class("() -> { System.exit(1); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_lambda_support_no_parameters_complex_expression(self):
""" tests support for lambda with no parameters and a
complex expression body.
"""
code = """
() -> {
if (true) return 21;
else
{
int result = 21;
return result / 2;
}
};"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class(code)))
def test_parameter_no_type_expression_body(self):
""" tests support for lambda with parameters with inferred types. """
test_classes = [
setup_java_class("(bar) -> bar + 1;"),
setup_java_class("bar -> bar + 1;"),
set
|
up_java_class("x -> x.length();"),
setup_jav
|
a_class("y -> { y.boom(); };"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameter_with_type_expression_body(self):
""" tests support for lambda with parameters with formal types. """
test_classes = [
setup_java_class("(int foo) -> { return foo + 2; };"),
setup_java_class("(String s) -> s.length();"),
setup_java_class("(int foo) -> foo + 1;"),
setup_java_class("(Thread th) -> { th.start(); };"),
setup_java_class("(String foo, String bar) -> "
"foo + bar;"),
]
for test_class in test_classes:
clazz = parse.parse(test_class)
self.assert_contains_lambda_expression_in_m(clazz)
def test_parameters_with_no_type_expression_body(self):
""" tests support for multiple lambda parameters
that are specified without their types.
"""
self.assert_contains_lambda_expression_in_m(
parse.parse(setup_java_class("(x, y) -> x + y;")))
def test_parameters_with_mixed_inferred_and_declared_types(self):
""" this tests that lambda type specification mixing is considered
invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, int y) -> x+y;"))
def test_parameters_inferred_types_with_modifiers(self):
""" this tests that lambda inferred type parameters with modifiers are
considered invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(x, final y) -> x+y;"))
def test_invalid_parameters_are_invalid(self):
""" this tests that invalid lambda parameters are are
considered invalid as per the specifications.
"""
with self.assertRaises(parser.JavaSyntaxError):
parse.parse(setup_java_class("(a b c) -> {};"))
def test_cast_works(self):
""" this tests that a cast expression works as expected. """
parse.parse(setup_java_class("String x = (String) A.x() ;"))
class MethodReferenceSyntaxTest(unittest.TestCase):
""" Contains tests for java 8 method reference syntax. """
def assert_contains_method_reference_expression_in_m(
self, clazz, method_name='main'):
""" asserts that the given class contains a method with the supplied
method name containing a method reference.
"""
matches = list(filter_type_in_method(
clazz, tree.MethodReference, method_name))
if not matches:
self.fail('No matching method reference found.')
return matches
def test_method_reference(self):
""" tests that method references are supported. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::length;")))
def test_method_reference_to_the_new_method(self):
""" test support for method references to 'new'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::new;")))
def test_method_reference_to_the_new_method_with_explict_type(self):
""" test support for method references to 'new' with an
explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String::<String> new;")))
def test_method_reference_from_super(self):
""" test support for method references from 'super'. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("super::toString;")))
def test_method_reference_from_super_with_identifier(self):
""" test support for method references from Identifier.super. """
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("String.super::toString;")))
@unittest.expectedFailure
def test_method_reference_explicit_type_arguments_for_generic_type(self):
""" currently there is no support for method references
for an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("List<String>::size;")))
def test_method_reference_explicit_type_arguments(self):
""" test support for method references with an explicit type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("Arrays::<String> sort;")))
@unittest.expectedFailure
def test_method_reference_from_array_type(self):
""" currently there is no support for method references
from a primary type.
"""
self.assert_contains_method_reference_expression_in_m(
parse.parse(setup_java_class("int[]::new;")))
class InterfaceSupportTest(unittest.TestCase):
""" Contains tests for java 8 interface extensions. """
def test_interface_sup
|
ms83/pydevp2p
|
devp2p/tests/test_kademlia_protocol.py
|
Python
|
mit
| 13,421 | 0.000522 |
# -*- coding: utf-8 -*-
import random
import time
from devp2p.utils import int_to_big_endian
from devp2p import kademlia
import pytest
import gevent
random.seed(42)
class WireMock(kademlia.WireInterface):
messages = [] # global messages
def __init__(self, sender):
assert isinstance(sender, kademlia.Node)
self.sender = sender
assert not self.messages
@classmethod
def empty(cls):
while cls.messages:
cls.messages.pop()
def send_ping(self, node):
echo = hex(random.randint(0, 2**256))[-32:]
self.messages.append((node, 'ping', self.sender, echo))
return echo
def send_pong(self, node, echo):
self.messages.append((node, 'pong', self.sender, echo))
def send_find_node(self, node, nodeid):
self.messages.append((node, 'find_node', self.sender, nodeid))
def send_neighbours(self, node, neighbours):
self.messages.append((node, 'neighbours', self.sender, neighbours))
def poll(self, node):
for i, x in enumerate(self.messages):
if x[0] == node:
del self.messages[i]
return x[1:]
def process(self, kademlia_protocols, steps=0):
"""
process messages until none are left
or if process steps messages if steps >0
"""
i = 0
proto_by_node = dict((p.this_node, p) for p in kademlia_protocols)
while self.messages:
msg = self.messages.pop(0)
assert isinstance(msg[2], kademlia.Node)
target = proto_by_node[msg[0]]
cmd = 'recv_' + msg[1]
getattr(target, cmd)(*msg[2:])
i += 1
if steps and i == steps:
return # messages may be left
assert not self.messages
def random_pubkey():
pk = int_to_big_endian(random.getrandbits(kademlia.k_pubkey_size))
return '\x00' * (kademlia.k_pubkey_size / 8 - len(pk)) + pk
def random_node():
return kademlia.Node(random_pubkey())
def routing_table(num_nodes=1000):
node = random_node()
routing = kademlia.RoutingTable(node)
for i in range(num_nodes):
routing.add_node(random_node())
assert len(routing.buckets) <= i + 2
assert len(routing.buckets) <= 512
assert i == num_nodes - 1
return routing
def get_wired_protocol():
this_node = random_node()
return kademlia.KademliaProtocol(this_node, WireMock(this_node))
def test_bootstrap():
proto = get_wired_protocol()
wire = proto.wire
other = routing_table()
# lookup self
proto.bootstrap(nodes=[other.this_node])
msg = wire.poll(other.this_node)
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
assert wire.poll(other.this_node) is None
assert wire.messages == []
def test_setup():
"""
nodes connect to any peer and do a lookup for them selfs
"""
proto = get_wired_protocol()
wire = proto.wire
other = routing_table()
# lookup self
proto.bootstrap(nodes=[other.this_node])
msg = wire.poll(other.this_node)
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
assert wire.poll(other.this_node) is None
assert wire.messages == []
# respond with neighbours
closest = other.neighbours(msg[2])
assert len(closest) == kademlia.k_bucket_size
proto.recv_neighbours(random_node(), closest)
# expect 3 lookups
for i in range(kademlia.k_find_concurrency):
msg = wire.poll(closest[i])
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
# and pings for all nodes
for node in closest:
msg = wire.poll(node)
assert msg[0] == 'ping'
# nothing else
assert wire.messages == []
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_find_node_timeout():
proto = get_wired_protocol()
other = routing_table()
wire = proto.wire
# lookup self
proto.bootstrap(nodes=[other.this_node])
msg = wire.poll(other.this_node)
assert msg == ('find_node', proto.routing.this_node, proto.routing.this_node.id)
assert wire.poll(other.this_node) is None
assert wire.messages == []
# do timeout
gevent.sleep(kademlia.k_request_timeout)
# respond with neighbours
closest = other.neighbours(msg[2])
assert len(closest) == kademlia.k_bucket_size
proto.recv_neighbours(random_node(), closest)
# expect pings, but no other lookup
msg = wire.poll(closest[0])
assert msg[0] == 'ping'
assert wire.poll(closest[0]) is None
wire.empty()
assert wire.messages == []
def test_eviction():
proto = get_wired_protocol()
proto.routing = routing_table(1000)
wire = proto.wire
# trigger node ping
node = proto.routing.neighbours(random_node())[0]
proto.ping(node)
msg = wire.poll(node)
assert msg[0] == 'ping'
assert wire.messages == []
proto.recv_pong(node, msg[2])
# expect no message and that node is still there
assert wire.messages == []
assert node in proto.routing
# expect node to be on the tail
assert proto.routing.bucket_by_node(node).tail == node
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_eviction_timeout():
proto = get_wired_protocol()
proto.routing = routing_table(1000)
wire = proto.wire
# trigger node ping
node = proto.routing.neighbours(random_node())[0]
proto.ping(node)
msg = wire.poll(node)
assert msg[0] == 'ping'
assert wire.messages == []
gevent.sleep(kademlia.k_request_timeout)
proto.recv_pong(node, msg[2])
# expect no message and that is not there anymore
assert wire.messages == []
assert node not in proto.routing
# expect node not to be in the replacement_cache
assert node not in proto.routing.bucket_by_node(node).replacement_cache
def test_eviction_node_active():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(10000) # set high, s
|
o add won't split
wire = proto.wire
# get a full bucket
full_buckets = [b for b in proto.routing.buckets if b.is_full and not b.should_split]
assert full_buckets
bucket = full_buckets[0]
assert not bucket.should_split
assert len(bucket) == kademlia.k_bucket_size
bucket_nodes = bucket.nodes[:]
eviction_candidate = bucket.head
# create node
|
to insert
node = random_node()
node.id = bucket.start + 1 # should not split
assert bucket.in_range(node)
assert bucket == proto.routing.bucket_by_node(node)
# insert node
proto.update(node)
# expect bucket was not split
assert len(bucket) == kademlia.k_bucket_size
# expect bucket to be unchanged
assert bucket_nodes == bucket.nodes
assert eviction_candidate == bucket.head
# expect node not to be in bucket yet
assert node not in bucket
assert node not in proto.routing
# expect a ping to bucket.head
msg = wire.poll(eviction_candidate)
assert msg[0] == 'ping'
assert msg[1] == proto.this_node
assert len(proto._expected_pongs) == 1
expected_pingid = proto._expected_pongs.keys()[0]
assert len(expected_pingid) == 96
echo = expected_pingid[:32]
assert len(echo) == 32
assert wire.messages == []
# reply in time
# can not check w/o mcd
print 'sending pong'
proto.recv_pong(eviction_candidate, echo)
# expect no other messages
assert wire.messages == []
# expect node was not added
assert node not in proto.routing
# eviction_candidate is around and was promoted to bucket.tail
assert eviction_candidate in proto.routing
assert eviction_candidate == bucket.tail
# expect node to be in the replacement_cache
assert node in bucket.replacement_cache
@pytest.mark.timeout(5)
@pytest.mark.xfail
def test_eviction_node_inactive():
"""
active nodes (replying in time) should not be evicted
"""
proto = get_wired_protocol()
proto.routing = routing_table(10000) # set high, so add won't split
wire = proto.wire
# get a full bu
|
nanfeng1101/Seq2Seq
|
pytorch_models/models/beam_search.py
|
Python
|
mit
| 4,237 | 0.001652 |
# -*- coding:utf-8 -*-
__author__ = 'chenjun'
import torch
from torch.autograd import Variable
from utils.util import *
"""Beam search module.
Beam search takes the top K results from the model, predicts the K results for
each of the previous K result, getting K*K results. Pick the top K results from
K*K results, and start over again until certain number of results are fully
decoded.
"""
class Hypothesis(object):
"""Defines a hypothesis during beam search."""
def __init__(self, tokens, log_prob, state):
"""Hypothesis constructor.
Args:
tokens: start tokens for decoding.
log_prob: log prob of the start tokens, usually 1.
state: decoder state.
"""
self.tokens = tokens
self.log_prob = log_prob
self.state = state
def extend(self, token, log_prob, new_state):
"""Extend the hypothesis with result from latest step.
Args:
token: latest token from decoding.
log_prob: log prob of the latest decoded tokens.
new_state: decoder output state. Fed to the decoder for next step.
Returns:
New Hypothesis with the results from latest step.
"""
return Hypothesis(self.tokens + [token], self.log_prob + log_prob, new_state)
@property
def latest_token(self):
return self.tokens[-1]
@property
def sequence_tokens(self):
return self.tokens
@property
def decode_state(self):
return self.state
class BeamSearch(object):
"""Beam search for generation."""
def __init__(self, vocab_size, beam_size, state=None):
"""
beam search init.
:param vocab_size: target vocab size
:param beam_size: beam size
"""
self.beam_size = beam_size
self.vocab_size = vocab_size
self.hypothesis = [Hypothesis([], 0.0, state)] * self.beam_size
self.results = []
def top_hypothesis(self, hypothesis, normalize=False):
"""
sort the hypothesis list based on log_probs and length.
:param hypothesis: list of hypothesis
:param normalize: bool, normalized by length, only for last search to output
|
:return:
"""
# This length normalization is only effective for the final results.
if normalize:
return sorted(hypothesis, key=lambda h: h.log_prob/len(h.tokens), reverse=True)
else:
return sorted(hypothesis, key=lambda h: h.log_prob, reverse=True)
def variable(self, token):
"""
convert token to torch variable.
:param token: int
:ret
|
urn:
"""
return Variable(torch.LongTensor([[token]]))
def beam_search(self, inputs):
"""
beam search to generate sequence.
:param inputs: list of decoder outputs, (decoder_out, decode_state)
:return:
"""
all_hypothesis = []
for i, (input, state) in enumerate(inputs):
top_log_probs, top_tokens = input.data.topk(self.vocab_size)
for j in xrange(self.beam_size*2):
token = top_tokens[0][j] # value
log_prob = top_log_probs[0][j] # value
all_hypothesis.append(self.hypothesis[i].extend(token, log_prob, state))
# Filter and collect any hypotheses that have the end token.
self.hypothesis = []
for h in self.top_hypothesis(all_hypothesis):
if h.latest_token == EOS_token:
# Pull the hypothesis off the beam if the end token is reached.
self.results.append(h)
else:
# Otherwise continue to the extend the hypothesis.
self.hypothesis.append(h)
if len(self.hypothesis) == self.beam_size or len(self.results) == self.beam_size:
break
outputs = [(self.variable(hyp.latest_token), hyp.decode_state) for hyp in self.hypothesis]
return outputs
def generate(self, num):
"""
return top num of generated sequence tokens.
:return:
"""
generates = [hyp.sequence_tokens for hyp in self.top_hypothesis(self.results, normalize=True)[:num]]
return generates
|
aehlke/epywing
|
src/epywing/utils/test.py
|
Python
|
gpl-3.0
| 527 | 0.009747 |
# -*- coding: utf-8 -*-
import subprocess
import os
cmd=['/Users/jehlke/workspace/epywing/src/epywing/utils/mecab/bin/mecab',
'-Owakati', '--dicdir=mecab/dic/
|
ipadic']
#cmd = ['mecab', '-Owakati', '--dicdir=../dic/ipadic']
a = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
a.stdin.write(u'何~これですか what is that HUH OK I SEE ?\n\n'.encode('utf8'))
a.stdin.flush()
b =
|
unicode(a.stdout.readline().decode('utf8'))
print 'test'
print b.strip()#.split()
print 'test'
|
RaD/django-south
|
south/migration/migrators.py
|
Python
|
apache-2.0
| 12,278 | 0.001547 |
from __future__ import print_function
from copy import copy, deepcopy
import datetime
import inspect
import sys
import traceback
from django.core.management import call_command
from django.core.management.commands import loaddata
from django.db import models
import south.db
from south import exceptions
from south.db import DEFAULT_DB_ALIAS
from south.models import MigrationHistory
from south.signals import ran_migration
from south.utils.py3 import StringIO
class Migrator(object):
def __init__(self, verbosity=0, interactive=False):
self.verbosity = int(verbosity)
self.interactive = bool(interactive)
@staticmethod
def title(target):
raise NotImplementedError()
def print_title(self, target):
if self.verbosity:
print(self.title(target))
@staticmethod
def status(target):
raise NotImplementedError()
def print_status(self, migration):
status = self.status(migration)
if self.verbosity and status:
print(status)
@staticmethod
def orm(migration):
raise NotImplementedError()
def backwards(self, migration):
return self._wrap_direction(migration.backwards(), migration.prev_orm())
def direction(self, migration):
raise NotImplementedError()
@staticmethod
def _wrap_direction(direction, orm):
args = inspect.getargspec(direction)
if len(args[0]) == 1:
# Old migration, no ORM should be passed in
return direction
return (lambda: direction(orm))
@staticmethod
def record(migration, database):
raise NotImplementedError()
def run_migration_error(self, migration, extra_info=''):
return (
' ! Error found during real run of migration! Aborting.\n'
'\n'
' ! Since you have a database that does not support running\n'
' ! schema-altering statements in transactions, we have had \n'
' ! to leave it in an interim state between migrations.\n'
'%s\n'
' ! The South developers regret this has happened, and would\n'
' ! like to gently persuade you to consider a slightly\n'
' ! easier-to-deal-with DBMS (one that supports DDL transactions)\n'
' ! NOTE: The error which caused the migration to fail is further up.'
) % extra_info
def run_migration(self, migration, database):
migration_function = self.direction(migration)
south.db.db.start_transaction()
try:
migration_function()
south.db.db.execute_deferred_sql()
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
# record us as having done this in the same transaction,
# since we're not in a dry run
self.record(migration, database)
except:
south.db.db.rollback_transaction()
if not south.db.db.has_ddl_transactions:
print(self.run_migration_error(migration))
print("Error in migration: %s" % migration)
raise
else:
try:
south.db.db.commit_transaction()
except:
print("Error during commit in migration: %s" % migration)
raise
def run(self, migration, database):
# Get the correct ORM.
south.db.db.current_orm = self.orm(migration)
# If we're not already in a dry run, and the database doesn't support
# running DDL inside a transaction, *cough*MySQL*cough* then do a dry
# run first.
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
if not south.db.db.has_ddl_transactions:
|
dry_run = DryRunMigrator(migrator=self, ignore_fail=False)
dry_run.run_migration(migration, database)
return self.run_migration(migration, database)
def send_ran_migration(self, migration, database):
ran_migration.send(None,
app=migration.app_label(),
migration=migration,
|
method=self.__class__.__name__.lower(),
verbosity=self.verbosity,
interactive=self.interactive,
db=database)
def migrate(self, migration, database):
"""
Runs the specified migration forwards/backwards, in order.
"""
app = migration.migrations._migrations
migration_name = migration.name()
self.print_status(migration)
result = self.run(migration, database)
self.send_ran_migration(migration, database)
return result
def migrate_many(self, target, migrations, database):
raise NotImplementedError()
class MigratorWrapper(object):
def __init__(self, migrator, *args, **kwargs):
self._migrator = copy(migrator)
attributes = dict([(k, getattr(self, k))
for k in self.__class__.__dict__
if not k.startswith('__')])
self._migrator.__dict__.update(attributes)
self._migrator.__dict__['_wrapper'] = self
def __getattr__(self, name):
return getattr(self._migrator, name)
class DryRunMigrator(MigratorWrapper):
def __init__(self, ignore_fail=True, *args, **kwargs):
super(DryRunMigrator, self).__init__(*args, **kwargs)
self._ignore_fail = ignore_fail
def _run_migration(self, migration):
if migration.no_dry_run():
if self.verbosity:
print(" - Migration '%s' is marked for no-dry-run." % migration)
return
south.db.db.dry_run = True
# preserve the constraint cache as it can be mutated by the dry run
constraint_cache = deepcopy(south.db.db._constraint_cache)
if self._ignore_fail:
south.db.db.debug, old_debug = False, south.db.db.debug
pending_creates = south.db.db.get_pending_creates()
south.db.db.start_transaction()
migration_function = self.direction(migration)
try:
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
raise exceptions.FailedDryRun(migration, sys.exc_info())
finally:
south.db.db.rollback_transactions_dry_run()
if self._ignore_fail:
south.db.db.debug = old_debug
south.db.db.clear_run_data(pending_creates)
south.db.db.dry_run = False
# restore the preserved constraint cache from before dry run was
# executed
south.db.db._constraint_cache = constraint_cache
def run_migration(self, migration, database):
try:
self._run_migration(migration)
except exceptions.FailedDryRun:
if self._ignore_fail:
return False
raise
def send_ran_migration(self, *args, **kwargs):
pass
class FakeMigrator(MigratorWrapper):
def run(self, migration, database):
# Don't actually run, just record as if ran
self.record(migration, database)
if self.verbosity:
print(' (faked)')
def send_ran_migration(self, *args, **kwargs):
pass
class LoadInitialDataMigrator(MigratorWrapper):
def load_initial_data(self, target, db='default'):
if target is None or target != target.migrations[-1]:
return
# Load initial data, if we ended up at target
if self.verbosity:
print(" - Loading initial data for %s." % target.app_label())
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps = models.get_apps
new_get_apps = lambda: [models.get_app(target.app_label())]
models.get_apps = new_get_apps
loaddata.get_apps = new_get_apps
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
models.get_apps = old_get_apps
|
vandys/nowplaying
|
reports/disptime.py
|
Python
|
unlicense
| 107 | 0.009346 |
#!/usr/b
|
in/python
import sys, time
for ts
|
in sys.argv[1:]:
print ts, time.ctime(float(ts))
sys.exit(0)
|
MelissaChan/Crawler_Facebook
|
Crawler/facebook_mysql.py
|
Python
|
mit
| 1,040 | 0.021236 |
# __author__ =
|
MelissaChan
# -*- coding: utf-8
|
-*-
# 16-4-16 下午10:53
import MySQLdb
def connect(id,name,gender,region,status,date,inter):
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd=' ',port=3306)
cur = conn.cursor()
# cur.execute('create database if not exists PythonDB')
conn.select_db('Facebook')
# cur.execute('create table Test(id int,name varchar(20),info varchar(20))')
value = [id,name,gender,region,status,date,inter]
cur.execute('insert into info values(%s,%s,%s,%s,%s,%s,%s)',value)
# values = []
# for i in range(20):
# values.append((i,'Hello World!','My number is '+str(i)))
#
# cur.executemany('insert into Test values(%s,%s,%s)',values)
# cur.execute('update Test set name="ACdreamer" where id=3')
conn.commit()
cur.close()
conn.close()
print 'insert ok~'
except MySQLdb.Error,msg:
print "MySQL Error %d: %s" %(msg.args[0],msg.args[1])
|
pyrocko/pyrocko
|
maintenance/docstring_cop.py
|
Python
|
gpl-3.0
| 543 | 0.01105 |
import sys, re
for fn in sys.argv[1:]:
with open(fn, 'r') as f:
s = f
|
.read()
xx = re.findall(r'([^\n]+)\s+\'\'\'(.*?)\'\'\'', s, re.M|re.S)
for (obj, doc) in xx:
s = re.findall('[^:`]\B(([`*])[a-zA-Z_][a-zA-Z0-9_]*\\2)\B', doc)
if s:
print '-'*50
print fn, obj
print '.'*50
pr
|
int doc
print '.'*50
print [ss[0] for ss in s]
# for vim:
# :s/\([^`:]\)\([`*]\)\([a-zA-Z0-9_]\+\)\2/\1``\3``/
|
apanda/modeling
|
tests/examples/AclContentCacheTest.py
|
Python
|
bsd-3-clause
| 1,990 | 0.011055 |
import components
def AclContentCacheTest ():
"""ACL content cache test"""
ctx = components.Context (['a', 'b', 'c', 'd', 'cc', 'f'],\
['ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_cc', 'ip_f
|
'])
net = components.Network (ctx)
a = components.EndHost(ctx.a, net, ctx)
b = components.EndHost(ctx.b, net, ctx)
c = components.EndHost(ctx.c, net, ctx)
d = components.EndHost(c
|
tx.d, net, ctx)
cc = components.AclContentCache(ctx.cc, net, ctx)
f = components.AclFirewall(ctx.f, net, ctx)
net.setAddressMappings([(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(f, ctx.ip_f), \
(cc, ctx.ip_cc)])
addresses = [ctx.ip_a, ctx.ip_b, ctx.ip_c, ctx.ip_d, ctx.ip_cc, ctx.ip_f]
net.RoutingTable(a, [(x, f) for x in addresses])
net.RoutingTable(b, [(x, f) for x in addresses])
net.RoutingTable(c, [(x, f) for x in addresses])
net.RoutingTable(d, [(x, f) for x in addresses])
net.RoutingTable(f, [(x, cc) for x in addresses])
net.RoutingTable(cc, [(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, c), \
(ctx.ip_d, d)])
net.Attach(a, b, c, d, cc)
endhosts = [a, b, c, d]
f.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
cc.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
net.Attach(a, b, c, d, cc, f)
endhosts = [a, b, c, d]
class AclContentCacheReturn (object):
def __init__ (self, net, ctx, a, b, c, d, cc, f):
self.net = net
self.ctx = ctx
self.a = a
self.b = b
self.c = c
self.d = d
self.cc = cc
self.f = f
self.check = components.PropertyChecker (ctx, net)
return AclContentCacheReturn(net, ctx, a, b, c, d, cc, f)
|
ltowarek/budget-supervisor
|
third_party/saltedge/test/test_rates_response.py
|
Python
|
mit
| 900 | 0 |
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_c
|
lient.models.rates_response import RatesResponse # noqa: E501
from swagger_client.rest import ApiException
class TestRatesResponse(unittest.TestCase):
"""RatesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRatesResponse(self):
"""Test RatesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.rates_response.RatesResponse() # n
|
oqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
dudakp/rasPi_systemInfo
|
lib_si1145/lib_si1145.py
|
Python
|
mit
| 2,472 | 0.045307 |
#!/usr/bi
|
n/python
from ops_i2cbase import I2CBase
# ===========================================================================
# SI1145 Class
#
# Ported from github.com/adafruit/Adafruit_SI1145_Library/
# ===========================================================================
class SI1145:
i2c = None
# SI1145 Address
address = 0x60
# Commands
SI1145_PARAM_QUERY = 0x80
SI1145_PARAM_SET = 0xA0
SI1145_PSALS_AUTO = 0x0F
# Parameters
SI1
|
145_PARAM_I2CADDR = 0x00
SI1145_PARAM_CHLIST = 0x01
SI1145_PARAM_CHLIST_ENUV = 0x80
SI1145_PARAM_CHLIST_ENAUX = 0x40
SI1145_PARAM_CHLIST_ENALSIR = 0x20
SI1145_PARAM_CHLIST_ENALSVIS = 0x10
SI1145_PARAM_CHLIST_ENPS1 = 0x01
SI1145_PARAM_CHLIST_ENPS2 = 0x02
SI1145_PARAM_CHLIST_ENPS3 = 0x04
# Registers
SI1145_REG_PARTID = 0x00
SI1145_REG_UCOEFF0 = 0x13
SI1145_REG_UCOEFF1 = 0x14
SI1145_REG_UCOEFF2 = 0x15
SI1145_REG_UCOEFF3 = 0x16
SI1145_REG_PARAMWR = 0x17
SI1145_REG_COMMAND = 0x18
SI1145_REG_MEASRATE0 = 0x08
SI1145_REG_MEASRATE1 = 0x09
# Constructor
def __init__(self):
# I2C
self.i2c = I2CBase(self.address)
id = self.i2c.readU8(self.SI1145_REG_PARTID)
if (id != 0x45):
print "SI1145 is not found"
# to enable UV reading, set the EN_UV bit in CHLIST, and configure UCOEF [0:3] to the default values of 0x7B, 0x6B, 0x01, and 0x00.
self.i2c.write8(self.SI1145_REG_UCOEFF0, 0x7B)
self.i2c.write8(self.SI1145_REG_UCOEFF1, 0x6B)
self.i2c.write8(self.SI1145_REG_UCOEFF2, 0x01)
self.i2c.write8(self.SI1145_REG_UCOEFF3, 0x00)
# enable UV sensor
self.i2c.write8(self.SI1145_REG_PARAMWR, self.SI1145_PARAM_CHLIST_ENUV | self.SI1145_PARAM_CHLIST_ENALSIR | self.SI1145_PARAM_CHLIST_ENALSVIS | self.SI1145_PARAM_CHLIST_ENPS1)
self.i2c.write8(self.SI1145_REG_COMMAND, self.SI1145_PARAM_CHLIST | self.SI1145_PARAM_SET)
# measurement rate for auto
self.i2c.write8(self.SI1145_REG_MEASRATE0, 0xFF)
# auto run
self.i2c.write8(self.SI1145_REG_COMMAND, self.SI1145_PSALS_AUTO)
def readUVIndex(self):
"Read UV index data from sensor (UV index * 100)"
rawData = self.i2c.readU16(0x2C)
if rawData > 0x0258:
return 0x0258
else:
return rawData
def readAmbientLight(self):
"Read Ambient Light data from sensor (Visible light + IR) in lux"
rawData = self.i2c.readU16(0x22)
return rawData
def readIRLight(self):
"Read IR data from sensor in lux"
rawData = self.i2c.readU16(0x24)
return rawData
|
asterick/pytari
|
Palettes.py
|
Python
|
gpl-2.0
| 5,212 | 0.13891 |
#
# This is the container for the palettes. To change them
# simply edit this.
#
from numpy import *
NTSC = array([
[0x00,0x00,0x00],[0x40,0x40,0x40],[0x6C,0x6C,0x6C],[0x90,0x90,0x90],
[0xB0,0xB0,0xB0],[0xC8,0xC8,0xC8],[0xDC,0xDC,0xDC],[0xEC,0xEC,0xEC],
[0x44,0x44,0x00],[0x64,0x64,0x10],[0x84,0x84,0x24],[0xA0,0xA0,0x34],
[0xB8,0xB8,0x40],[0xD0
|
,0xD0,0x50],[0xE8,0xE8,0x5C],[0xFC,0xFC,0x68],
[0x70,0x28,0x00],[0x84,0x44,0x14],[0x98,0x5C,0x28],[0xAC,0x78,0x3C],
[0xBC,0x8C,0x4C],[0xCC,0xA0,0x5C],[0xDC,0xB4,0x68],[0xEC,0xC8,0x78],
[0x84,0x18,0x00],[0x98,0x34,0x18],[0xAC,0x50,0x30],[0xC0,0x68,0x48],
[0xD0,0x80,0x5C],[0xE0,0x94,0x70],[0xEC,0xA8,0x80],[0xFC,0xBC,0x94],
[0x88,0x00,0x00],[0x9C,0x20,0x20],[0xB0,0x3C,0x
|
3C],[0xC0,0x58,0x58],
[0xD0,0x70,0x70],[0xE0,0x88,0x88],[0xEC,0xA0,0xA0],[0xFC,0xB4,0xB4],
[0x78,0x00,0x5C],[0x8C,0x20,0x74],[0xA0,0x3C,0x88],[0xB0,0x58,0x9C],
[0xC0,0x70,0xB0],[0xD0,0x84,0xC0],[0xDC,0x9C,0xD0],[0xEC,0xB0,0xE0],
[0x48,0x00,0x78],[0x60,0x20,0x90],[0x78,0x3C,0xA4],[0x8C,0x58,0xB8],
[0xA0,0x70,0xCC],[0xB4,0x84,0xDC],[0xC4,0x9C,0xEC],[0xD4,0xB0,0xFC],
[0x14,0x00,0x84],[0x30,0x20,0x98],[0x4C,0x3C,0xAC],[0x68,0x58,0xC0],
[0x7C,0x70,0xD0],[0x94,0x88,0xE0],[0xA8,0xA0,0xEC],[0xBC,0xB4,0xFC],
[0x00,0x00,0x88],[0x1C,0x20,0x9C],[0x38,0x40,0xB0],[0x50,0x5C,0xC0],
[0x68,0x74,0xD0],[0x7C,0x8C,0xE0],[0x90,0xA4,0xEC],[0xA4,0xB8,0xFC],
[0x00,0x18,0x7C],[0x1C,0x38,0x90],[0x38,0x54,0xA8],[0x50,0x70,0xBC],
[0x68,0x88,0xCC],[0x7C,0x9C,0xDC],[0x90,0xB4,0xEC],[0xA4,0xC8,0xFC],
[0x00,0x2C,0x5C],[0x1C,0x4C,0x78],[0x38,0x68,0x90],[0x50,0x84,0xAC],
[0x68,0x9C,0xC0],[0x7C,0xB4,0xD4],[0x90,0xCC,0xE8],[0xA4,0xE0,0xFC],
[0x00,0x3C,0x2C],[0x1C,0x5C,0x48],[0x38,0x7C,0x64],[0x50,0x9C,0x80],
[0x68,0xB4,0x94],[0x7C,0xD0,0xAC],[0x90,0xE4,0xC0],[0xA4,0xFC,0xD4],
[0x00,0x3C,0x00],[0x20,0x5C,0x20],[0x40,0x7C,0x40],[0x5C,0x9C,0x5C],
[0x74,0xB4,0x74],[0x8C,0xD0,0x8C],[0xA4,0xE4,0xA4],[0xB8,0xFC,0xB8],
[0x14,0x38,0x00],[0x34,0x5C,0x1C],[0x50,0x7C,0x38],[0x6C,0x98,0x50],
[0x84,0xB4,0x68],[0x9C,0xCC,0x7C],[0xB4,0xE4,0x90],[0xC8,0xFC,0xA4],
[0x2C,0x30,0x00],[0x4C,0x50,0x1C],[0x68,0x70,0x34],[0x84,0x8C,0x4C],
[0x9C,0xA8,0x64],[0xB4,0xC0,0x78],[0xCC,0xD4,0x88],[0xE0,0xEC,0x9C],
[0x44,0x28,0x00],[0x64,0x48,0x18],[0x84,0x68,0x30],[0xA0,0x84,0x44],
[0xB8,0x9C,0x58],[0xD0,0xB4,0x6C],[0xE8,0xCC,0x7C],[0xFC,0xE0,0x8C]
],uint8)
PAL = array([
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x80,0x58,0x00],[0x94,0x70,0x20],[0xA8,0x84,0x3C],[0xBC,0x9C,0x58],
[0xCC,0xAC,0x70],[0xDC,0xC0,0x84],[0xEC,0xD0,0x9C],[0xFC,0xE0,0xB0],
[0x44,0x5C,0x00],[0x5C,0x78,0x20],[0x74,0x90,0x3C],[0x8C,0xAC,0x58],
[0xA0,0xC0,0x70],[0xB0,0xD4,0x84],[0xC4,0xE8,0x9C],[0xD4,0xFC,0xB0],
[0x70,0x34,0x00],[0x88,0x50,0x20],[0xA0,0x68,0x3C],[0xB4,0x84,0x58],
[0xC8,0x98,0x70],[0xDC,0xAC,0x84],[0xEC,0xC0,0x9C],[0xFC,0xD4,0xB0],
[0x00,0x64,0x14],[0x20,0x80,0x34],[0x3C,0x98,0x50],[0x58,0xB0,0x6C],
[0x70,0xC4,0x84],[0x84,0xD8,0x9C],[0x9C,0xE8,0xB4],[0xB0,0xFC,0xC8],
[0x70,0x00,0x14],[0x88,0x20,0x34],[0xA0,0x3C,0x50],[0xB4,0x58,0x6C],
[0xC8,0x70,0x84],[0xDC,0x84,0x9C],[0xEC,0x9C,0xB4],[0xFC,0xB0,0xC8],
[0x00,0x5C,0x5C],[0x20,0x74,0x74],[0x3C,0x8C,0x8C],[0x58,0xA4,0xA4],
[0x70,0xB8,0xB8],[0x84,0xC8,0xC8],[0x9C,0xDC,0xDC],[0xB0,0xEC,0xEC],
[0x70,0x00,0x5C],[0x84,0x20,0x74],[0x94,0x3C,0x88],[0xA8,0x58,0x9C],
[0xB4,0x70,0xB0],[0xC4,0x84,0xC0],[0xD0,0x9C,0xD0],[0xE0,0xB0,0xE0],
[0x00,0x3C,0x70],[0x1C,0x58,0x88],[0x38,0x74,0xA0],[0x50,0x8C,0xB4],
[0x68,0xA4,0xC8],[0x7C,0xB8,0xDC],[0x90,0xCC,0xEC],[0xA4,0xE0,0xFC],
[0x58,0x00,0x70],[0x6C,0x20,0x88],[0x80,0x3C,0xA0],[0x94,0x58,0xB4],
[0xA4,0x70,0xC8],[0xB4,0x84,0xDC],[0xC4,0x9C,0xEC],[0xD4,0xB0,0xFC],
[0x00,0x20,0x70],[0x1C,0x3C,0x88],[0x38,0x58,0xA0],[0x50,0x74,0xB4],
[0x68,0x88,0xC8],[0x7C,0xA0,0xDC],[0x90,0xB4,0xEC],[0xA4,0xC8,0xFC],
[0x3C,0x00,0x80],[0x54,0x20,0x94],[0x6C,0x3C,0xA8],[0x80,0x58,0xBC],
[0x94,0x70,0xCC],[0xA8,0x84,0xDC],[0xB8,0x9C,0xEC],[0xC8,0xB0,0xFC],
[0x00,0x00,0x88],[0x20,0x20,0x9C],[0x3C,0x3C,0xB0],[0x58,0x58,0xC0],
[0x70,0x70,0xD0],[0x84,0x84,0xE0],[0x9C,0x9C,0xEC],[0xB0,0xB0,0xFC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC],
[0x00,0x00,0x00],[0x28,0x28,0x28],[0x50,0x50,0x50],[0x74,0x74,0x74],
[0x94,0x94,0x94],[0xB4,0xB4,0xB4],[0xD0,0xD0,0xD0],[0xEC,0xEC,0xEC]
],uint8)
SECAM = repeat([[0x00,0x00,0x00],
[0x21,0x21,0xFF],
[0xF0,0x3C,0x79],
[0xFF,0x50,0xFF],
[0x7F,0xFF,0x00],
[0x7F,0xFF,0xFF],
[0xFF,0xFF,0x3F],
[0xFF,0xFF,0xFF]],16).astype(uint8)
|
morissette/devopsdays-hackathon-2016
|
venv/bin/rst2pseudoxml.py
|
Python
|
gpl-3.0
| 635 | 0.001575 |
#!/home/mharris/Projects/DevOpsDays/venv/bin/python2
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Go
|
odger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, defau
|
lt_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
|
Acebulf/HockeyPythonScripts
|
schedule.py
|
Python
|
mit
| 3,298 | 0.006064 |
#Schedule-generator for LHL use written by Acebulf (acebulf at gmail.com)
#Current version 1.0 -- Jan 16 2014
#Copyrighted under the MIT License (see License included in the github repo)
import random
import time
while 1:
print "Starting random-schedule generation process..."
starttime = time.time()
kill = False
schedule = [[]]*30
teams = ["BOS", "CHI", "COL", "DET", "NJD", "WSH"]
# Randomly Choose Team
team1 = random.choice(teams)
teams_mt1 = list(teams)
teams_mt1.remove(team1)
matchups = []
for x in teams_mt1:
for y in xrange(6):
matchups.append((team1,x))
random.shuffle(matchups)
for x in xrange(30):
schedule[x]=[matchups[x]]
team2 = random.choice(teams_mt1)
teams_2 = list(teams_mt1)
teams_2.remove(team2)
matchups=[]
for x in teams_2:
for y in xrange(6):
matchups.append((team2,x))
random.shuffle(matchups)
days = range(30)
def playing_day(team, day):
occupied = [i[0] for i in day] + [i[1] for i in day]
return (team in occupied)
for matchup in matchups:
while 1:
temp_day = random.choice(days)
if time.time()-starttime >= 4:
kill = True
break
if not playing_day(matchup[0],schedule[temp_day]) and not playing_day(matchup[1],schedule[temp_day]):
schedule[temp_day].append(matchup)
days.remove(temp_day)
break
if kill:
print "Error in stage 1; restarting"
continue
print "Stage 1/3 Successfully Completed!"
days2games = list(schedule)
days1game = []
try:
for x in xrange(30):
if len(days2games[x]) == 1:
days1game.append(days2games.pop(x))
except IndexError:
pass
team3 = random.choice(tea
|
ms_2)
teams_3 = list(teams_2)
teams_3.remove(team3)
matchups=[]
for x in teams_3:
matchups.append((team3,x))
team4 = random.choice(teams_3)
teams_4 = list(teams_3)
teams_4.remove(team4)
for x in teams_4:
matchups.append((team4,x))
matchups.append
|
((teams_4[0],teams_4[1]))
for x in days2games:
for y in matchups:
if not playing_day(y[0],x) and not playing_day(y[1],x):
x.append(y)
newmatchups = []
for x in matchups:
newmatchups.append(x)
newmatchups.append(x)
random.shuffle(newmatchups)
print "Stage 2/3 Successfully Completed!"
for x in days1game:
for y in newmatchups:
if not playing_day(y[0],x) and not playing_day(y[1],x):
x.append(y)
newmatchups.remove(y)
for x in schedule:
if len(x) != 3:
print "Problem encountered in stage 3; Restarting..."
kill=True
break
if kill:
continue
print "Stage 3/3 Successfully Completed"
break
print "Schedule Successfully Generated"
print "Printing to File..."
f = open("schedule.txt","w")
dayno = 0
while dayno <= 29:
f.write("Day {0}:\n".format(dayno+1))
for x in schedule[dayno]:
f.write(x[0] + " - " + x[1]+"\n")
f.write("\n")
dayno += 1
print "Result written to file. Program terminating."
|
royalharsh/grpc
|
src/python/grpcio_tests/tests/unit/_empty_message_test.py
|
Python
|
bsd-3-clause
| 5,056 | 0.000198 |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to e
|
ndorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONT
|
RIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_REQUEST = b''
_RESPONSE = b''
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
def handle_unary_unary(request, servicer_context):
return _RESPONSE
def handle_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
yield _RESPONSE
def handle_stream_unary(request_iterator, servicer_context):
for request in request_iterator:
pass
return _RESPONSE
def handle_stream_stream(request_iterator, servicer_context):
for request in request_iterator:
yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
self.stream_stream = handle_stream_stream
elif self.request_streaming:
self.stream_unary = handle_stream_unary
elif self.response_streaming:
self.unary_stream = handle_unary_stream
else:
self.unary_unary = handle_unary_unary
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True)
else:
return None
class EmptyMessageTest(unittest.TestCase):
def setUp(self):
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_GenericHandler(),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
def testUnaryUnary(self):
response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
self.assertEqual(_RESPONSE, response)
def testUnaryStream(self):
response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
list(response_iterator))
def testStreamUnary(self):
response = self._channel.stream_unary(_STREAM_UNARY)(
iter([_REQUEST] * test_constants.STREAM_LENGTH))
self.assertEqual(_RESPONSE, response)
def testStreamStream(self):
response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
iter([_REQUEST] * test_constants.STREAM_LENGTH))
self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
list(response_iterator))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
beni55/sentry
|
src/sentry/plugins/sentry_useragents/models.py
|
Python
|
bsd-3-clause
| 2,383 | 0.00042 |
"""
sentry.plugins.sentry_useragents.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import httpagentparser
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class UserAgentPlugin(TagPlugin):
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
project_default_enabled = True
def get_tag_values(self, event):
http = event.interfaces.get('sentry.interfaces.Http')
if not http:
return []
if not http.headers:
return []
if 'User-Agent' not in http.headers:
return []
ua = httpagentparser.detect(http.headers['User-Agent'])
if not ua:
return []
result = self.
|
get_tag_from_ua(ua)
if not result:
return []
return [result]
class Browse
|
rPlugin(UserAgentPlugin):
"""
Automatically adds the 'browser' tag from events containing interface data
from ``sentry.interfaes.Http``.
"""
slug = 'browsers'
title = _('Auto Tag: Browsers')
tag = 'browser'
tag_label = _('Browser Name')
def get_tag_from_ua(self, ua):
if 'browser' not in ua:
return
tag = ua['browser']['name']
if 'version' in ua['browser']:
tag += ' ' + ua['browser']['version']
return tag
register(BrowserPlugin)
class OsPlugin(UserAgentPlugin):
"""
Automatically adds the 'os' tag from events containing interface data
from ``sentry.interfaes.Http``.
"""
slug = 'os'
title = _('Auto Tag: Operating Systems')
tag = 'os'
tag_label = _('Operating System')
def get_tag_from_ua(self, ua):
if 'flavor' in ua:
tag = ua['flavor']['name']
if 'version' in ua['flavor']:
tag += ' ' + ua['version']
elif 'os' in ua:
# Linux
tag = ua['os']['name']
if 'version' in ua['os']:
tag += ' ' + ua['version']
elif 'dist' in ua:
# Ubuntu
tag += ua['dist']['name']
else:
return
return tag
register(OsPlugin)
|
TribeMedia/synapse
|
tests/events/test_utils.py
|
Python
|
apache-2.0
| 8,591 | 0.000582 |
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import unittest
from synapse.events import FrozenEvent
from synapse.events.utils import prune_event, serialize_event
def MockEvent(**kwargs):
if "event_id" not in kwargs:
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
return FrozenEvent(kwargs)
class PruneEventTestCase(unittest.TestCase):
""" Asserts that a new event constructed with `evdict` will look like
`matchdict` when it is redacted. """
def run_test(self, evdict, matchdict):
self.assertEquals(
prune_event(FrozenEvent(evdict)).get_dict(),
matchdict
)
def test_minimal(self):
self.run_test(
{
'type': 'A',
'event_id': '$test:domain',
},
{
'type': 'A',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
|
)
def test_basic_keys(self):
self.run_test(
{
'type': 'A',
'room_id': '!1:domain',
'sender': '@2:domain',
'event_id': '$3:domain',
'origin': 'domain',
|
},
{
'type': 'A',
'room_id': '!1:domain',
'sender': '@2:domain',
'event_id': '$3:domain',
'origin': 'domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_unsigned_age_ts(self):
self.run_test(
{
'type': 'B',
'event_id': '$test:domain',
'unsigned': {'age_ts': 20},
},
{
'type': 'B',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {'age_ts': 20},
}
)
self.run_test(
{
'type': 'B',
'event_id': '$test:domain',
'unsigned': {'other_key': 'here'},
},
{
'type': 'B',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
def test_content(self):
self.run_test(
{
'type': 'C',
'event_id': '$test:domain',
'content': {'things': 'here'},
},
{
'type': 'C',
'event_id': '$test:domain',
'content': {},
'signatures': {},
'unsigned': {},
}
)
self.run_test(
{
'type': 'm.room.create',
'event_id': '$test:domain',
'content': {'creator': '@2:domain', 'other_field': 'here'},
},
{
'type': 'm.room.create',
'event_id': '$test:domain',
'content': {'creator': '@2:domain'},
'signatures': {},
'unsigned': {},
}
)
class SerializeEventTestCase(unittest.TestCase):
def serialize(self, ev, fields):
return serialize_event(ev, 1479807801915, only_event_fields=fields)
def test_event_fields_works_with_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar"
),
["room_id"]
),
{
"room_id": "!foo:bar",
}
)
def test_event_fields_works_with_nested_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"body": "A message",
},
),
["content.body"]
),
{
"content": {
"body": "A message",
}
}
)
def test_event_fields_works_with_dot_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"key.with.dots": {},
},
),
["content.key\.with\.dots"]
),
{
"content": {
"key.with.dots": {},
}
}
)
def test_event_fields_works_with_nested_dot_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"not_me": 1,
"nested.dot.key": {
"leaf.key": 42,
"not_me_either": 1,
},
},
),
["content.nested\.dot\.key.leaf\.key"]
),
{
"content": {
"nested.dot.key": {
"leaf.key": 42,
},
}
}
)
def test_event_fields_nops_with_unknown_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": "bar",
},
),
["content.foo", "content.notexists"]
),
{
"content": {
"foo": "bar",
}
}
)
def test_event_fields_nops_with_non_dict_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": ["I", "am", "an", "array"],
},
),
["content.foo.am"]
),
{}
)
def test_event_fields_nops_with_array_keys(self):
self.assertEquals(
self.serialize(
MockEvent(
sender="@alice:localhost",
room_id="!foo:bar",
content={
"foo": ["I", "am", "an", "array"],
},
),
["content.foo.1"]
),
{}
)
def test_event_fields_all_fields_if_empty(self):
self.assertEquals(
self.serialize(
MockEvent(
type="foo",
event_id="test",
room_id="!foo:bar",
content={
"foo": "bar",
},
),
[]
),
{
"type": "foo",
"event_id": "test",
"room_id": "!foo:bar",
"content": {
"foo": "bar",
|
PACKED-vzw/scoremodel
|
example_config.py
|
Python
|
gpl-2.0
| 1,348 | 0.002967 |
import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
##
# Database settings
##
DB_HOST = 'localhost'
DB_NAME = 'scoremodel'
DB_USER = 'scoremodel'
DB_PASS = 'scoremodel'
##
# MySQL SSL connections
##
use_ssl = False
SSL_CA = '/etc/mysql/certs/ca-cert.pem'
SSL_KEY = '/etc/mysql/keys/client-key.pem'
SSL_CERT = '/etc/mysql/certs/client-cert.pem'
##
# Flask-WTF
##
WTF_CSRF_ENABLED = True
SECRET_KEY = 'secret_key'
##
# Log-in
##
REMEMBER_COOKIE_SECURE = True
REMEMBER_COOKIE_HTTPONLY = True
SESSION_PROTECTION = "strong"
##
# Babel
##
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
LANGUAGES = ['nl', 'en']
##
# Uploads
##
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = ('txt', '
|
pdf', 'png', 'jpg', 'jpeg', 'gif')
MAX_CONTENT_LENGTH = 16 * 1024 * 1024 # 16 MB
##
# Logger
##
LOG_FILENAME = 'logs/scoremodel.log'
if use_ssl is True:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}?ssl_key={ssl_key}&ssl_cert={ssl_cert}'.format(
user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME, ssl_key
|
=SSL_KEY, ssl_cert=SSL_CERT)
else:
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{user}:{passw}@{host}/{db}'.format(user=DB_USER, passw=DB_PASS,
host=DB_HOST, db=DB_NAME)
|
MitchTalmadge/Emoji-Tools
|
src/main/resources/PythonScripts/fontTools/misc/macRes.py
|
Python
|
gpl-3.0
| 6,563 | 0.026512 |
""" Tools for reading Mac resource forks. """
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to
|
seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(Re
|
sourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
lw7360/dailyprogrammer
|
Intermediate/226/226.py
|
Python
|
mit
| 1,541 | 0.00584 |
# https://www.reddit.com/r/dailyprogrammer/comments/3fva66/20150805_challenge_226_intermediate_connect_four/
import sys, string
xmoves = open(sys.argv[1]).read().translate(None, string.ascii_lowercase + ' \n')
omoves = open(sys.argv[1]).read().translate(None, string.ascii_uppercase + ' \n')
board = [[' ' for x in range(6)] for x in range(7)]
def insert(colchar, player):
colnumber = ord(colchar.lower()) - ord('a')
col = board[colnumber]
for i in range(len(col)):
if col[i] == ' ':
col[i] = player
break
def checkwinner(player):
for x in range(6):
for y in range(6):
if board[x][y] == player:
top = board[x][y+1:y+4]
if len(top) == 3 and not ''.join(top).strip(player):
return True
try:
right = [board[x+1][y], board[x+2][y], board[x+3][y]]
if not ''.join(right).strip(player):
return True
except:
pass
try:
topright = [board[x+1][y+1], board[x+2][y+2], board[x+3][y+3]]
if not ''.join(topright).strip(player):
return True
except:
pass
for i in range(len(xmoves)):
insert(xmoves[i], 'X')
if checkwinner('X'):
print 'X won at move ' + str(i+1)
break
insert(omoves[i], 'O')
if checkwinner('O'):
print 'O won at move ' + str(i+1)
|
br
|
eak
|
liweitianux/atoolbox
|
python/msvst_starlet.py
|
Python
|
mit
| 23,422 | 0.003074 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# References:
# [1] Jean-Luc Starck, Fionn Murtagh & Jalal M. Fadili
# Sparse Image and Signal Processing: Wavelets, Curvelets, Morphological Diversity
# Section 3.5, 6.6
#
# Credits:
# [1] https://github.com/abrazhe/image-funcut/blob/master/imfun/atrous.py
#
# Aaron LI
# Created: 2016-03-17
# Updated: 2016-04-22
#
# ChangeLog:
# 2016-04-22:
# * Add argument "end-scale" to specifiy the end denoising scale
# * Check outfile existence first
# * Add argument "start-scale" to specifiy the start denoising scale
# * Fix a bug about "p_cutoff" when "comp" contains ALL False's
# * Show more verbose information/details
# 2016-04-20:
# * Add argparse and main() for scripting
#
"""
Starlet wavelet transform, i.e., isotropic undecimated wavelet transform
(IUWT), or à trous wavelet transform.
And multi-scale variance stabling transform (MS-VST), which can be used
to effectively remove the Poisson noises.
"""
__version__ = "0.2.5"
__date__ = "2016-04-22"
import sys
import os
import argparse
from datetime import datetime
import numpy as np
import scipy as sp
from scipy import signal
from astropy.io import fits
class B3Spline: # {{{
"""
B3-spline wavelet.
"""
# scaling function (phi)
dec_lo = np.array([1.0, 4.0, 6.0, 4.0, 1.0]) / 16
dec_hi = np.array([-1.0, -4.0, 10.0, -4.0, -1.0]) / 16
rec_lo = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
rec_hi = np.array([0.0, 0.0, 1.0, 0.0, 0.0])
# B3Spline }}}
class IUWT: # {{{
"""
Isotropic undecimated wavelet transform.
"""
## Decomposition filters list:
# a_{scale} = convole(a_0, filters[scale])
# Note: the zero-th scale filter (i.e., delta function) is the first
# element, thus th
|
e array index is the same as the decompositio
|
n scale.
filters = []
phi = None # wavelet scaling function (2D)
level = 0 # number of transform level
decomposition = None # decomposed coefficients/images
reconstruction = None # reconstructed image
# convolution boundary condition
boundary = "symm"
def __init__(self, phi=B3Spline.dec_lo, level=None, boundary="symm",
data=None):
self.set_wavelet(phi=phi)
self.level = level
self.boundary = boundary
self.data = np.array(data)
def reset(self):
"""
Reset the object attributes.
"""
self.data = None
self.phi = None
self.decomposition = None
self.reconstruction = None
self.level = 0
self.filters = []
self.boundary = "symm"
def load_data(self, data):
self.reset()
self.data = np.array(data)
def set_wavelet(self, phi):
self.reset()
phi = np.array(phi)
if phi.ndim == 1:
phi_ = phi.reshape(1, -1)
self.phi = np.dot(phi_.T, phi_)
elif phi.ndim == 2:
self.phi = phi
else:
raise ValueError("Invalid phi dimension")
def calc_filters(self):
"""
Calculate the convolution filters of each scale.
Note: the zero-th scale filter (i.e., delta function) is the first
element, thus the array index is the same as the decomposition scale.
"""
self.filters = []
# scale 0: delta function
h = np.array([[1]]) # NOTE: 2D
self.filters.append(h)
# scale 1
h = self.phi[::-1, ::-1]
self.filters.append(h)
for scale in range(2, self.level+1):
h_up = self.zupsample(self.phi, order=scale-1)
h2 = signal.convolve2d(h_up[::-1, ::-1], h, mode="same",
boundary=self.boundary)
self.filters.append(h2)
def transform(self, data, scale, boundary="symm"):
"""
Perform only one scale wavelet transform for the given data.
return:
[ approx, detail ]
"""
self.decomposition = []
approx = signal.convolve2d(data, self.filters[scale],
mode="same", boundary=self.boundary)
detail = data - approx
return [approx, detail]
def decompose(self, level, boundary="symm"):
"""
Perform IUWT decomposition in the plain loop way.
The filters of each scale/level are calculated first, then the
approximations of each scale/level are calculated by convolving the
raw/finest image with these filters.
return:
[ W_1, W_2, ..., W_n, A_n ]
n = level
W: wavelet details
A: approximation
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.decomposition = []
approx = self.data
for scale in range(1, level+1):
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = approx - approx2
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
return self.decomposition
def decompose_recursive(self, level, boundary="symm"):
"""
Perform the IUWT decomposition in the recursive way.
return:
[ W_1, W_2, ..., W_n, A_n ]
n = level
W: wavelet details
A: approximation
"""
self.level = level
self.boundary = boundary
self.decomposition = self.__decompose(self.data, self.phi, level=level)
return self.decomposition
def __decompose(self, data, phi, level):
"""
2D IUWT decomposition (or stationary wavelet transform).
This is a convolution version, where kernel is zero-upsampled
explicitly. Not fast.
Parameters:
- level : level of decomposition
- phi : low-pass filter kernel
- boundary : boundary conditions (passed to scipy.signal.convolve2d,
'symm' by default)
Returns:
list of wavelet details + last approximation. Each element in
the list is an image of the same size as the input image.
"""
if level <= 0:
return data
shapecheck = map(lambda a,b:a>b, data.shape, phi.shape)
assert np.all(shapecheck)
# approximation:
approx = signal.convolve2d(data, phi[::-1, ::-1], mode="same",
boundary=self.boundary)
# wavelet details:
w = data - approx
phi_up = self.zupsample(phi, order=1)
shapecheck = map(lambda a,b:a>b, data.shape, phi_up.shape)
if level == 1:
return [w, approx]
elif not np.all(shapecheck):
print("Maximum allowed decomposition level reached",
file=sys.stderr)
return [w, approx]
else:
return [w] + self.__decompose(approx, phi_up, level-1)
@staticmethod
def zupsample(data, order=1):
"""
Upsample data array by interleaving it with zero's.
h{up_order: n}[l] = (1) h[l], if l % 2^n == 0;
(2) 0, otherwise
"""
shape = data.shape
new_shape = [ (2**order * (n-1) + 1) for n in shape ]
output = np.zeros(new_shape, dtype=data.dtype)
output[[ slice(None, None, 2**order) for d in shape ]] = data
return output
def reconstruct(self, decomposition=None):
if decomposition is not None:
reconstruction = np.sum(decomposition, axis=0)
return reconstruction
else:
self.reconstruction = np.sum(self.decomposition, axis=0)
def get_detail(self, scale):
"""
Get the wavelet detail coefficients of given scale.
Note: 1 <= scale <= level
"""
if scale < 1 or scale > self.level:
raise ValueError("Invalid scale")
return self.decomposition[scale-1]
def get_approx(self):
"""
|
allembedded/python_web_framework
|
WebApplication/Views/PageView.py
|
Python
|
gpl-3.0
| 781 | 0.008963 |
"""
Page view class
"""
import os
from Server.Importer import ImportFromModule
class PageView(ImportFromModule("Server.PageViewBase", "PageViewBase")):
"""
Page view class.
"""
_PAGE_TITLE = "Python Web Framework"
def __init__(sel
|
f, htmlToLoad):
"""
|
Constructor.
- htmlToLoad : HTML to load
"""
self.SetPageTitle(self._PAGE_TITLE)
self.AddMetaData("charset=\"UTF-8\"")
self.AddMetaData("name=\"viewport\" content=\"width=device-width, initial-scale=1\"")
self.AddStyleSheet("/css/styles.css")
self.AddJavaScript("/js/http.js")
self.LoadHtml(os.path.join(os.path.dirname(__file__), "%s.html" % htmlToLoad))
self.SetPageData({ "PageTitle" : self._PAGE_TITLE })
|
Geoportail-Luxembourg/geoportailv3
|
geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py
|
Python
|
mit
| 2,012 | 0.000994 |
"""create table for hierarchy of accounts
Revision ID: 17fb1559a5cd
Revises: 3b7de32aebed
Create Date: 2015-09-16 14:20:30.972593
"""
# revision identifiers, used by Alembic.
revision = '17fb1559a5cd'
down_revision = '3b7de32aebed'
branch_labels = None
depends_on = None
from alembic import op, context
import sqlalchemy as sa
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_table('lux_user_inheritance', schema=schema)
op.execute("DROP FUNCTION IF EXISTS "
"%(schema)s.getMainAccount(VARCHAR)"
% {"schema": schema})
def upgrade():
schema = context.get_context().config.get_main_option('schema')
op.create_table(
'lux_user_inheritance',
sa.Column(
'login', sa.VARCHAR(), autoincrement=False,
nullable=False),
sa.Column(
'login_father', sa.VARCHAR(), autoincrement=False,
nullable=False),
schema=schema
)
op.create_primary_key(
"lux_user_inheritance_pkey", "lux_user_inheritance",
['login', 'login_father'],
schema=schema
)
op.execute(
"CREATE OR REPLACE FUNCTION %(schema)s.getMainAccount "
"(child_login VARCHAR)"
"RETURNS VARCHAR AS "
"$$ "
"DECLARE "
"cur_login_f
|
ather VARCHAR;"
"res_login_father VARCHAR;"
"c_father Cursor (p_login VARCHAR) FOR "
"Select login_father From %(
|
schema)s.lux_user_inheritance Where "
"login = p_login;"
"BEGIN "
"cur_login_father := child_login;"
"LOOP "
"OPEN c_father(cur_login_father);"
"FETCH FIRST FROM c_father into res_login_father;"
"IF FOUND THEN "
"cur_login_father := res_login_father;"
"END IF;"
"CLOSE c_father;"
"IF NOT FOUND THEN "
"RETURN cur_login_father;"
"END IF;"
"END LOOP;"
"END;"
"$$"
"LANGUAGE plpgsql;" % {"schema": schema})
|
miurahr/seahub
|
seahub/related_files/models.py
|
Python
|
apache-2.0
| 2,806 | 0.002495 |
# -*- coding: utf-8 -*-
import os
from django.db import models
from django.db.models import Q
from seahub.tags.models import FileUUIDMap
from seahub.utils import normalize_file_path
class RelatedFilesManager(models.Manager):
def get_related_files_uuid(self, uuid):
related_files_uuid = super(RelatedFilesManager, self).filter(
Q(o_uuid=uuid) | Q(r_uuid=uuid)).select_related('o_uuid', 'r_uuid')
return related_files_uuid
def get_related_file_uuid(self, o_repo_id, r_repo_id, o_path, r_path):
o_file_path = normalize_file_path(o_path)
o_filename = os.path.basename(o_file_path)
o_parent_path = os.path.dirname(o_file_path)
r_file_path = normalize_file_path(r_path)
r_filename = os.path.basename(r_file_path)
r_parent_path = os.path.dirname(r_file_path)
o_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(o_repo_id, o_parent_path, o_filename, is_dir=False)
r_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(r_repo_id, r_parent_path, r_filename, is_dir=False)
try:
return super(RelatedFilesManager, self).get(
Q(o_uuid=o_uuid, r_uuid=r_uuid) | Q(o_uuid=r_uuid, r_uuid=o_uuid))
except self.model.DoesNotExist:
return None
def add_related_file_uuid(self, o_repo_id, r_repo_id, o_path, r_path):
o_file_path = normalize_file_path(o_path)
o_filename = os.path.basename(o_file_path)
o_parent_path = os.path.dirname(o_file_path)
r_file_path = normalize_file_path(r_path)
r_filename = os.path.basename(r_file_path)
r_parent_path = os.path.dirname(r_file_path)
o_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(o_repo_id, o_parent_path, o_filename, is_dir=False)
r_uuid = FileUUIDMap.objects.get_or_create_fileuuidmap(r_repo_id, r_parent_path, r_filename, is_dir=False)
related_file_uuid = self.model(o_uuid=o_uuid, r_uuid=r_uuid)
related_file_uuid.save()
return related_file_uuid
def get_related_file_uuid_by_id(self, rela
|
ted_id):
try:
return super(RelatedFilesManager, self).get(pk=related_id)
except self.model.DoesNotExist:
return None
def delete_related_file_uuid(self, related_id):
try:
file_related = super(RelatedF
|
ilesManager, self).get(pk=related_id)
file_related.delete()
return True
except self.model.DoesNotExist:
return False
class RelatedFiles(models.Model):
o_uuid = models.ForeignKey(FileUUIDMap, db_index=True, on_delete=models.CASCADE, related_name='o_uuid')
r_uuid = models.ForeignKey(FileUUIDMap, db_index=True, on_delete=models.CASCADE, related_name='r_uuid')
objects = RelatedFilesManager()
|
rwl/PyCIM
|
CIM14/ENTSOE/StateVariables/StateVariables/SvShuntCompensatorSections.py
|
Python
|
mit
| 2,935 | 0.002726 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to
|
the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WA
|
RRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.StateVariables.Element import Element
class SvShuntCompensatorSections(Element):
"""State variable for the number of sections in service for a shunt compensator.
"""
def __init__(self, sections=0, continuousSections=0.0, ShuntCompensator=None, *args, **kw_args):
"""Initialises a new 'SvShuntCompensatorSections' instance.
@param sections: The number of sections in service.
@param continuousSections: The number of sections in service as a continous variable.
@param ShuntCompensator: The shunt compensator for which the state applies.
"""
#: The number of sections in service.
self.sections = sections
#: The number of sections in service as a continous variable.
self.continuousSections = continuousSections
self._ShuntCompensator = None
self.ShuntCompensator = ShuntCompensator
super(SvShuntCompensatorSections, self).__init__(*args, **kw_args)
_attrs = ["sections", "continuousSections"]
_attr_types = {"sections": int, "continuousSections": float}
_defaults = {"sections": 0, "continuousSections": 0.0}
_enums = {}
_refs = ["ShuntCompensator"]
_many_refs = []
def getShuntCompensator(self):
"""The shunt compensator for which the state applies.
"""
return self._ShuntCompensator
def setShuntCompensator(self, value):
if self._ShuntCompensator is not None:
self._ShuntCompensator._SvShuntCompensatorSections = None
self._ShuntCompensator = value
if self._ShuntCompensator is not None:
self._ShuntCompensator.SvShuntCompensatorSections = None
self._ShuntCompensator._SvShuntCompensatorSections = self
ShuntCompensator = property(getShuntCompensator, setShuntCompensator)
|
schoolie/bokeh
|
examples/howto/layouts/dashboard.py
|
Python
|
bsd-3-clause
| 2,816 | 0.002131 |
import numpy as np
from bokeh.layouts import layout
from bokeh.models import CustomJS, Slider, ColumnDataSource, WidgetBox
from bokeh.plotting import figure, output_file, show
output_file('dashboard.html')
tools = 'pan'
def bollinger():
# Define Bollinger Bands.
upperband = np.random.random_integers(100, 150, size=100)
lowerband = upperband - 100
x_data = np.arange(1, 101)
# Bollinger shading glyph:
band_x = np.append(x_data, x_data[::-1])
band_y = np.append(lowerband, upperband[::-1])
p = figure(x_axis_type='datetime', tools=tools)
p.patch(band_x, band_y, color='#7570B3', fill_alpha=0.2)
p.title.text = 'Bollinger Bands'
p.title_location = 'left'
p.title.align = 'left'
p.plot_height = 600
p.plot_width = 800
p.grid.grid_line_alpha = 0.4
return [p]
def slider():
x = np.linspace(0, 10, 100)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(
y_range=(-10, 10), tools='', toolbar_location=None,
title="Sliders example")
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source), code="""
var data = source.data;
var A = amp.value;
var k = freq.value;
var phi = phase.value;
var B = offset.value;
x = data['x']
y = data['y']
for (i = 0; i < x.length; i++) {
y[i] = B + A*Math.sin(k*x[i]+phi);
}
source.trigger('change');
""")
amp_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude", callback=callback, callback_policy='mouseup')
callback.args["amp"] = amp_slider
freq_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency", callback=callback)
callback.args["freq"] = freq_slider
phase_slider = Slider(start=0, end=6.4, value=0, step=.1, title="Phase", callback=callback)
callback.args["phase"] = phase_slider
offset_slider = Slider(start=-5, end=5, value=0, step=.1, title="Offset", callback=callback)
callback.args["offset"] = offset_slider
widgets = WidgetBox(amp_slider, freq_slider, phase_slider, offs
|
et_slider)
return [widgets, plot]
def linked_panning():
N = 100
x = np.linspace(0, 4 * np.pi, N)
y1 = np.sin(x)
y2 = np.cos(x)
y3 = np.sin(x) + np.cos(x)
s1 = figure(tools=tools)
s1.circle(x, y1, color="navy", size=8, alpha=0.5)
s2 = figure(tools=tools, x_range=s1.x_range, y_range=s1.y_range)
|
s2.circle(x, y2, color="firebrick", size=8, alpha=0.5)
s3 = figure(tools='pan, box_select', x_range=s1.x_range)
s3.circle(x, y3, color="olive", size=8, alpha=0.5)
return [s1, s2, s3]
l = layout([
bollinger(),
slider(),
linked_panning(),
], sizing_mode='stretch_both')
show(l)
|
zenodo/invenio
|
invenio/modules/formatter/format_elements/bfe_arxiv_link.py
|
Python
|
gpl-2.0
| 1,776 | 0.002815 |
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Links to arXiv"""
from cgi import escape
from invenio.base.i18n import gettext_set_language
def format_element(bfo, tag="037__", target="_blank"):
"""
Extracts the arXiv preprint information and
presents it as a direct link towards arXiv.org
"""
_ = gettext_set_language(bfo.lang)
potential_arxiv_ids = bfo.fields(tag)
arxiv_id = ""
for potential_arxiv_id in potential_arxiv_ids:
if potential_arxiv_id.get('9') == 'arXiv' and potential_arxiv_id.get('a', '').startswith('arXiv:'):
arxiv_id = potential_arxiv_id['a'][len('arXiv:'):]
return '<a href="http://arxiv.org/abs/%s" target="%s" alt="%s">%s</a>' % (
escape(arxiv_id, True),
escap
|
e(target, True),
escape(_("This article on arXiv.org"), True),
escape(arxiv_id))
return ""
def escape_values(bf
|
o):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
CAST-projects/Extension-SDK
|
samples/analyzer_level/mainframe/mainframe.quality_rule/empty_paragraph_end.py
|
Python
|
mit
| 1,192 | 0.012584 |
from cast.analysers import log, mainframe
class EmptyParagraphEndOfSection(mainframe.Extension):
def __init__(self):
self.program = None
def start_program(self, program):
self.program = program
def end_program(self, _):
self.program = None
def start_section(self, section):
last_paragraph = section.get_children()[-1]
if 'paragraph' == last_paragraph.get_kind():
children = last_paragraph.get_children()
if len(children) > 1:
# violation test_ko2
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
elif len(children) == 1:
|
kind = children[0].get_kind()
if kind not in ['exit', 'stop_run', 'goback']:
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
else:
|
# violation test_ko1
self.program.save_violation('MyCompany_COBOL_Rules.sectionEndParagraph', section.get_position())
|
c-PRIMED/puq
|
test/CustomParameter_test.py
|
Python
|
mit
| 7,647 | 0.024062 |
#! /usr/bin/env python
'''
Testsuite for the CustomParameter class
'''
from __future__ import absolute_import, division, print_function
import numpy as np
from puq import *
def _hisplot(y, nbins):
n, bins = np.histogram(y, nbins, normed=True)
mids = bins[:-1] + np.diff(bins) / 2.0
return mids, n
def compare_curves(x1, y1, x2, y2, **args):
ay = np.interp(x2, x1, y1)
print("maximum difference is", np.max(np.abs(ay - y2)))
assert np.allclose(ay, y2, **args)
n = NormalParameter('x','x',mean=10,dev=1)
norm80 = n.pdf.lhs(80)
# test mean and deviation
def test_custom_pdf_meandev():
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(norm80))
assert np.allclose(c.pdf.mean, 10.0, rtol=.05), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 1.0, rtol=.05), "dev=%s" % c.pdf.dev
# test lhs()
def test_custom_pdf_lhs():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.9, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.lhs(1000)
dx, dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.01)
# test lhs1()
def test_custom_pdf_lhs1():
a = np.array([12,12,13,13,13,14,14,14,14,15,15,15,15,15,16,16,16,16,16,17,17,17,18,18])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.004)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.03)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
# test lhs()
def test_custom_pdf_lhs_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
print("LHS: mean=%s dev=%s" % (c.pdf.mean, c.pdf.dev))
assert(np.allclose(c.pdf.mean, 5.04, atol=.1))
assert(np.allclose(c.pdf.dev, 1.7, atol=.1))
# test the lhs() function to see if the curve it generates is
# close enough
data = c.pdf.ds(1000)
dx,dy = _hisplot(data, 40)
"""
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
"""
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
# test lhs1()
def test_custom_pdf_lhs1_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
# test the lhs1() function to see if the curve it generates is
# close enough
xs = c.pdf.ds1(1000)
assert len(xs) == 1000
# scale [-1,1] back to original size
min, max = c.pdf.range
mean = (min + max)/2.0
xs *= max - mean
xs += mean
# bin it
mids, n = _hisplot(xs, 40)
compare_curves(c.pdf.x, c.pdf.y, mids, n, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(mids, n, color='green')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_random_nofit():
a = np.array([2,2,3,3,3,4,4,4,4,5,5,5,5,5,6,6,6,6,6,7,7,7,8,8])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, nbins=40))
data = c.pdf.random(100000)
dx,dy = _hisplot(data, 40)
compare_curves(c.pdf.x, c.pdf.y, dx, dy, atol=.4)
'''
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(c.pdf.x, c.pdf.y, color='blue')
plt.show()
'''
def test_custom_pdf_small():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.2), "dev=%s" % c.pdf.dev
def test_custom_pdf_small_fit():
a = np.array([2,3,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert np.allclose(c.pdf.mean, 7.0/3, atol=.3), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, 0.4, atol=.4), "dev=%s" % c.pdf.dev
# single data point. Must use Bayesian fit.
def test_custom_pdf_single_fit():
a = np.array([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, error=NormalPDF(0,.1)))
assert np.allclose(c.pdf.mean, 42), "mean=%s" % c.pdf.mean
assert np.allclose(c.pdf.dev, .1, atol=.01), "dev=%s" % c.pdf.dev
def test_custom_pdf_single():
a = np.array([42])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 42
assert c.pdf.dev == 0
assert c.pdf.mode == 42
def test_custom_pdf_zero():
a = np.array([0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozero():
a = np.array([0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zeroze
|
rozero():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert
|
c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_zerozerozero_fit():
a = np.array([0, 0, 0])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 0
assert c.pdf.dev == 0
assert c.pdf.mode == 0
def test_custom_pdf_const():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
def test_custom_pdf_const_fit():
a = np.array([2,2,2,2,2,2,2,2,2,2,2])
c = CustomParameter('x', 'unknown', pdf=ExperimentalPDF(a, fit=True))
assert c.pdf.mean == 2
assert c.pdf.dev == 0
assert c.pdf.mode == 2
#### EXCEPTION TESTING
# forget to include pdf
def test_custom_pdf_exception():
ok = False
try:
c = CustomParameter('x', 'X, the unknown')
except ValueError:
ok = True
except:
assert False, 'Wrong Exception'
if not ok:
assert False, 'No Exception when one was expected'
if __name__ == "__main__":
test_custom_pdf_meandev()
test_custom_pdf_lhs()
test_custom_pdf_lhs1()
test_custom_pdf_random()
test_custom_pdf_lhs_nofit()
test_custom_pdf_lhs1_nofit()
test_custom_pdf_random_nofit()
test_custom_pdf_exception()
test_custom_pdf_small()
test_custom_pdf_small_fit()
test_custom_pdf_single()
test_custom_pdf_single_fit()
test_custom_pdf_const()
test_custom_pdf_const_fit()
test_custom_pdf_zero()
test_custom_pdf_zerozero()
test_custom_pdf_zerozerozero()
test_custom_pdf_zerozerozero_fit()
|
PandaWei/tp-libvirt
|
libvirt/tests/src/guest_kernel_debugging/nmi_test.py
|
Python
|
gpl-2.0
| 3,444 | 0.000871 |
import logging
from virttest import virsh
from provider import libvirt_version
from autotest.client.shared import error
def run_cmd_in_guest(vm, cmd):
"""
Run command in the guest
:params vm: vm object
:params cmd: a command needs to be ran
"""
session = vm.wait_for_login()
status, output = session.cmd_status_output(cmd)
logging.debug("The '%s' output: %s", cmd, output)
if status:
session.close()
raise error.TestError("Can not run '%s' in guest: %s", cmd, output)
else:
session.close()
return output
def run(test, params, env):
"""
1. Configure kernel cmdline to support kdump
2. Start kdump service
3. Inject NMI to the guest
4. Check NMI times
"""
for cmd in 'inject-nmi', 'qemu-monitor-command':
if not virsh.has_help_command(cmd):
raise error.TestNAError("This version of libvirt does not "
" support the %s test", cmd)
vm_name = params.get("main_vm", "virt-tests-vm1")
vm = env.get_vm(vm_name)
start_vm = params.get("start_vm")
expected_nmi_times = params.get("expected_nmi_times", '0')
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
+ " libvirt version.")
if start_vm == "yes":
# start kdump service in the guest
cmd = "which kdump"
try:
run_cmd_in_guest(vm, cmd)
except:
try:
# try to install kexec-tools on fedoraX/rhelx.y guest
run_cmd_in_guest(vm, "yum install -y kexec-tools")
except:
raise error.TestNAError("Requires kexec-tools(or the "
"equivalent for your distro)")
# enable kdump service in the guest
cmd = "service kdump start"
run_cmd_in_guest(vm, cmd)
# filter original 'NMI' information from the /proc/interrupts
cmd = "grep NMI /proc/interrupts"
nmi_str = run_cmd_in_guest(vm, cmd)
# filter CPU from the /proc/cpuinfo and count number
cmd = "grep -E '^process' /proc/cpuinfo | wc -l"
vcpu_num = run_cmd_in_guest(vm, cmd).strip()
logging.info("Inject NMI to the guest via virsh inject_nmi")
virsh.inject_nmi(vm_name, debug=True, ignore_status=False)
logging.info("Inject NMI to the guest via virsh qemu_monitor_command")
virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}')
# injects a Non-Maskable Interrupt into the default CPU (x86/s390)
# or all CPUs (ppc64), as usual, the default CPU index is 0
cmd = "grep NMI /proc/interrupts | awk '{print $2}'"
nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd)
real_nmi_times = nmi_from_default_vcpu.splitlines()[0]
logging.debug("The current Non-Maskable Interrupts: %s", real_nmi_times)
#
|
check Non-maskable interrupts times
if real_nmi_times != expected_nmi_times:
raise error.TestFail("NMI times aren't expected %s:%s",
real_nmi_times, expec
|
ted_nmi_times)
|
LMescheder/AdversarialVariationalBayes
|
avb/inputs.py
|
Python
|
mit
| 4,913 | 0.002239 |
import numpy as np
import tensorflow as tf
import os
def get_inputs(split, config):
split_dir = config['split_dir']
data_dir = config['data_dir']
dataset = config['dataset']
split_file = os.path.join(split_dir, dataset, split + '.lst')
filename_queue = get_filename_queue(split_file, os.path.join(data_dir, dataset))
if dataset == 'mnist':
image = get_inputs_mnist(filename_queue, config)
config['output_size'] = 28
config['c_dim'] = 1
elif dataset == "cifar-10":
image = get_inputs_cifar10(filename_queue, config)
config['output_size'] = 32
config['c_dim'] = 3
else:
image = get_inputs_image(filename_queue, config)
image_batch = create_batch([image], config['batch_size'])
return image_batch
def get_inputs_image(filename_queue, config):
output_size = config['output_size']
image_size = config['image_size']
c_dim = config['c_dim']
# Read a record, getting filenames from the filename_queue.
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_image(value, channels=c_dim)
image = tf.cast(image, tf.float32)/255.
image_shape = tf.shape(image)
image_height, image_width = image_shape[0], image_shape[1]
offset_height = tf.cast((image_height - image_size)/2, tf.int32)
offset_width = tf.cast((image_width - image_size)/2, tf.int32)
image = tf.image.crop_to_bounding_box(image, offset_height, offset_width, image_size, image_size)
image = tf.image.resize_images(image, [output_size, output_size])
image.set_shape([output_size, output_size, c_dim])
return image
def get_inputs_mnist(filename_queue, config):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since all keys are required.
features={
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([784])
image = tf.reshape(image, [28, 28, 1])
image = tf.cast(image, tf.float32) / 255.
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
binary_image = (tf.random_uniform(image.get_shape()) <= image)
binary_image = tf.cast(binary_image, tf.float32)
return binary_image
def get_inputs_cifar10(filename_queue, config):
output_size = config['output_size']
image_size = config['image_size']
c_dim = config['c_dim']
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
image_bytes = 32 * 32 * 3
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
key, value = reader.read(filename_queue)
record = tf.decode_raw(value, tf.uin
|
t8)
# The first bytes represent the label, which we convert from uint8->int32.
label = tf.cast(record[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
#tf.strided_slice(record, [label_bytes], [label_bytes + image_bytes])
image = tf.reshape(record[label_bytes:label_bytes+image_bytes], [3, 32, 32])
im
|
age = tf.cast(image, tf.float32)/255.
# Convert from [depth, height, width] to [height, width, depth].
image = tf.transpose(image, [1, 2, 0])
return image
def get_filename_queue(split_file, data_dir):
with open(split_file, 'r') as f:
filenames = f.readlines()
filenames = [os.path.join(data_dir, f.strip()) for f in filenames]
for f in filenames:
if not os.path.exists(f):
raise ValueError('Failed to find file: ' + f)
filename_queue = tf.train.string_input_producer(filenames)
return filename_queue
def create_batch(inputs, batch_size=64, min_queue_examples=1000, num_preprocess_threads=12, enqueue_many=False):
# Generate a batch of images and labels by building up a queue of examples.
batch = tf.train.shuffle_batch(
inputs,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
enqueue_many=enqueue_many,
)
return batch
|
mikehankey/fireball_camera
|
scan-stills2.py
|
Python
|
gpl-3.0
| 42,716 | 0.043426 |
#!/usr/bin/python3
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
video_dir = "/mnt/ams2/SD/"
def stack_stack(pic1, pic2):
frame_pil = Image.fromarray(pic1)
stacked_image = pic2
if stacked_image is None:
stacked_image = frame_pil
else:
stacked_image=ImageChops.lighter(stacked_image,frame_pil)
return(stacked_image)
def compute_straight_line(x1,y1,x2,y2,x3,y3):
print ("COMP STRAIGHT", x1,y1,x2,y2,x3,y3)
if x2 - x1 != 0:
a = (y2 - y1) / (x2 - x1)
else:
a = 0
if x3 - x1 != 0:
b = (y3 - y1) / (x3 - x1)
else:
b = 0
straight_line = a - b
if (straight_line < 1):
straight = "Y"
else:
straight = "N"
return(straight_line)
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2) +12
starty = y//2-(cropy//2) + 4
return img[starty:starty+cropy,startx:startx+cropx]
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def kmeans_cluster(points, num_clusters):
points = np.array(points)
print(points)
clusters = []
cluster_points = []
colors = ('r', 'g', 'b')
est = KMeans(n_clusters=num_clusters)
est.fit(points)
print (est.labels_)
print (len(points))
({i: np.where(est.labels_ == i)[0] for i in range(est.n_clusters)})
for i in set(est.labels_):
index = est.labels_ == i
cluster_idx = np.where(est.labels_ == i)
for idxg in cluster_idx:
for idx in idxg:
idx = int(idx)
point = points[idx]
#print ("IDX:",i, idx, point)
cluster_points.append(point)
clusters.append(cluster_points)
cluster_points = []
#print(points[:,0])
#print(points[:,1])
int_lb = est.labels_.astype(float)
#fig = gcf()
fig = Figure()
canvas = FigureCanvas(fig)
plot = fig.add_subplot(1,1,1)
plot.scatter(points[:,0], points[:,1], c=[plt.cm.Spectral(float(i) / 10) for i in est.labels_])
for cluster in clusters:
cxs = []
cys = []
for cp in cluster:
x,y,w,h = cp
cxs.append(x)
cys.append(y)
if len(cxs) > 3:
plot.plot(np.unique(cxs), np.poly1d(np.polyfit(cxs, cys, 1))(np.unique(cxs)))
plt.xlim(0,640)
plt.ylim(0,480)
plot.invert_yaxis()
fig.canvas.draw()
fig.savefig("/tmp/plot.png", dpi=fig.dpi)
#plt.show()
return(clusters)
def calc_dist(x1,y1,x2,y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
def find_angle(x1,x2,y1,y2):
if x2 - x1 != 0:
a1 = (y2 - y1) / (x2 - x1)
else:
a1 = 0
angle = math.atan(a1)
angle = math.degrees(angle)
return(angle)
def closest_node(node, nodes):
return nodes[cdist([node], nodes).argmin()]
def find_objects(index, points):
apoints = []
unused_points = []
cl_sort = []
sorted_points = []
last_angle = None
objects = []
group_pts = []
line_segments = []
stars = []
obj_points = []
big_cnts = []
count = 0
x1,y1,w1,h1 = points[index]
print ("Total Points found in image: ", len(points))
used_pts = {}
for i in range(0,len(points)-1):
x1,y1,w1,h1 = points[i]
for i in range(0,len(points)-1):
x2,y2,w2,h2 = points[i]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
used_pts[key] = 0
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
used_pts[key2] = 0
possible_stars = []
for i in range(0,len(points)-1):
closest = []
x1,y1,w1,h1 = points[i]
for j in range(0,len(points)-1):
x2,y2,w2,h2 = points[j]
key = str(x1)+"."+str(y1)+"."+str(x2)+"."+str(y2)
key2 = str(x2)+"."+str(y2)+"."+str(x1)+"."+str(y1)
dist = calc_dist(x1,y1,x2,y2)
angle = find_angle(x1,y1,x2,y2)
if x1 != x2 and y1 != y2:
if used_pts[key] == 0 and used_pts[key2] == 0 :
#print("Closest Point:", (int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
closest.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
used_pts[key] = 1
used_pts[key2] = 1
#print("Key has been used:", key, key2)
#else:
# print("Key already used try another one:", key, key2)
#else:
# print ("this point has already been used")
count = count + 1
# of all the close points, make sure that at least 2 points < 25 px dist exist.
conf_closest = []
for cls in closest:
if cls[0] < 100:
conf_closest.append(cls)
if len(closest) > 0:
distsort = np.unique(closest, axis=0)
dist,angle,x1,y1,x2,y2 = distsort[0]
if dist < 50 and len(conf_closest) > 1:
line_segments.append((int(dist),int(angle),int(x1),int(y1),int(x2),int(y2)))
obj_points.append((int(x1),int(y1), int(w1), int(h1)))
else:
possible_stars.append((int(x1),int(y1),int(w1),int(h1)))
#print("CLOSEST LINE SEGMENT FOR PT: ", distsort[0])
#else:
#print("ERROR! no close points to this one!", x1,y1)
if w1 > 15 or h1 > 15:
# print ("BIG!!! We have a big object here likely containing many line segments.")
big_cnts.append((int(x1),int(y1),int(w1),int(h1)))
for star in possible_stars:
close = 0
for line in line_segments:
dist,angle,x1,y1,x2,y2 = line
star_dist = calc_dist(star[0], star[1], x1,y1)
#print ("STARDIST: ", star_dist, star[0], star[1], x1,y1)
if star_dist < 60:
close = 1
if close == 1:
obj_points.append(star)
else:
stars.append(star)
#print ("OBJECT POINTS")
if len(line_segments) > 0:
sorted_lines = sorted(line_segments, key=lambda x: x[2])
else:
sorted_lines = []
#print ("LINE SEGMENTS:")
#for line in sorted_lines:
# print (line)
last_ang = 0
last_dist = 0
line_groups = []
|
line_group = []
orphan_lines = []
if len(sorted_lines) > 0:
|
for segment in sorted_lines:
dist,angle,x1,y1,x2,y2 = segment
if last_ang != 0 and (angle -5 < last_ang < angle + 5) and dist < 100:
#print ("Line Segment Part of Existing Group: ", segment)
line_group.append((dist,angle,x1,y1,x2,y2))
else:
#print ("New Group Started!", last_ang, angle )
# print ("Line Segment Part of New Group: ", segment)
if len(line_group) >= 3:
line_groups.append(line_group)
else:
#print("Last line segment was too small to be part of a group! These are random points or stars. Skip for now.")
for line in line_group:
orphan_lines.append(line)
line_group = []
line_group.append((dist,angle,x1,y1,x2,y2))
last_ang = angle
if len(line_group) >= 2:
line_groups.append(line_group)
else:
for line in line_group:
orphan_lines.
|
anomen-s/programming-challenges
|
coderbyte.com/easy/Multiplicative Persistence/solve.py
|
Python
|
gpl-2.0
| 820 | 0.02439 |
'''
Using the Python l
|
anguage, have the function MultiplicativePersistence(num)
take the num parameter being passed which will always be a positive integer
and return its multiplicative persistence which is the number of times
you must multiply the digits in num until you reach a single digit.
For example: if num is 39 then your program should retur
|
n 3
because 3 * 9 = 27 then 2 * 7 = 14 and finally 1 * 4 = 4 and you stop at 4.
'''
def MultiplicativePersistence(num):
steps = 0
while num > 9:
snum = str(num)
sdigits = list(snum)
num = 1
for snum in sdigits:
n = int(snum)
num = num * n
steps = steps + 1
return steps
# keep this function call here
# to see how to enter arguments in Python scroll down
print MultiplicativePersistence(raw_input())
|
OCA/OpenUpgrade
|
openupgrade_scripts/scripts/sale/14.0.1.1/pre-migration.py
|
Python
|
agpl-3.0
| 1,334 | 0.001499 |
# Copyright 2021 ForgeFlow S.L. <https://www.forgeflow.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
_xmlid_renames = [
(
"sale.access_product_product_attribute_cu
|
stom_value",
"sale.access_product_product_attribute_custom_value_sale_manager",
),
("sale.account_move_see_all", "sale.account_invoice_rule_see_all"),
("sale.account_move_personal_rule", "sale.account_invoice_rule_see_personal"),
("sale.account_move_line_see_all", "sale.
|
account_invoice_line_rule_see_all"),
(
"sale.account_move_line_personal_rule",
"sale.account_invoice_line_rule_see_personal",
),
]
def fast_fill_sale_order_currency_id(env):
if not openupgrade.column_exists(env.cr, "sale_order", "currency_id"):
openupgrade.logged_query(
env.cr,
"""
ALTER TABLE sale_order
ADD COLUMN currency_id integer""",
)
openupgrade.logged_query(
env.cr,
"""
UPDATE sale_order so
SET currency_id = pp.currency_id
FROM product_pricelist pp
WHERE so.pricelist_id = pp.id""",
)
@openupgrade.migrate()
def migrate(env, version):
openupgrade.rename_xmlids(env.cr, _xmlid_renames)
fast_fill_sale_order_currency_id(env)
|
MarcoDalFarra/semseg
|
DataGenerators.py
|
Python
|
mit
| 48,218 | 0.001307 |
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import IPython
import os
import threading
import warnings
import scipy.ndimage as ndi
import cv2
import random
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
from keras import backend as K
from sklearn.preprocessing import OneHotEncoder
from itertools import islice, chain
from sklearn.model_selection import train_test_split
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def augment_brightness_camera_images(image):
image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image1 = np.array(image1)
random_bright = 0.5+np.random.uniform()
image1[:, :, 2] = image1[:, :, 2]*random_bright
image1[:, :, 2][image1[:, :, 2] > 255] = 255
image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)
return image1
def add_random_shadow(image):
top_y = 320*np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320*np.random.uniform()
image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
shadow_mask = 0*image_hls[:, :, 1]
X_m = np.mgrid[0:image.shape[0], 0:image.shape[1]][0]
Y_m = np.mgrid[0:image.shape[0], 0:image.shape[1]][1]
shadow_mask[((X_m-top_x)*(bot_y-top_y) - (bot_x - top_x)*(Y_m-top_y) >= 0)] = 1
if np.random.randint(2) == 1:
random_bright = .5
cond1 = shadow_mask == 1
cond0 = shadow_mask == 0
if np.random.randint(2) == 1:
image_hls[:, :, 1][cond1] = image_hls[:, :, 1][cond1]*random_bright
else:
image_hls[:, :, 1][cond0] = image_hls[:, :, 1][cond0]*random_bright
image = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)
return image
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Perform
|
s a random sp
|
atial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Apply the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channel
|
davelab6/pyfontaine
|
fontaine/charsets/internals/google_extras.py
|
Python
|
gpl-3.0
| 372 | 0.016129 |
# -*- cod
|
ing: utf-8 -*-
class Charset:
common_name = u'Google Fonts: Extras'
native_name = u''
def glyphs(self):
glyphs = [0xe0ff] # PUA: Font logo
glyphs += [0xeffd] # PUA: Font version number
glyphs += [0xf000] # PUA: font ppem size indicator: run `ftview -f 1255 10 Ubuntu
|
-Regular.ttf` to see it in action!
return glyphs
|
DiUS/Physiognomy
|
python/align_faces.py
|
Python
|
mit
| 4,189 | 0.020769 |
#!/usr/bin/env python
# Software License Agreement (BSD License)
|
#
# Copyright (c) 2012, Philipp Wagner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must reta
|
in the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, math
from PIL import Image
import facedetect
def Distance(p1,p2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.sqrt(dx*dx+dy*dy)
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):
if (scale is None) and (center is None):
return image.rotate(angle=angle, resample=resample)
nx,ny = x,y = center
sx=sy=1.0
if new_center:
(nx,ny) = new_center
if scale:
(sx,sy) = (scale, scale)
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine/sx
b = sine/sx
c = x-nx*a-ny*b
d = -sine/sy
e = cosine/sy
f = y-nx*d-ny*e
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
# calculate offsets in original image
offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
# get the direction
eye_direction = (eye_right[0] - eye_left[0], eye_right[1] - eye_left[1])
# calc rotation angle in radians
rotation = -math.atan2(float(eye_direction[1]),float(eye_direction[0]))
# distance between them
dist = Distance(eye_left, eye_right)
# calculate the reference eye-width
reference = dest_sz[0] - 2.0*offset_h
# scale factor
scale = float(dist)/float(reference)
# rotate original around the left eye
image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)
# crop the rotated image
crop_xy = (eye_left[0] - scale*offset_h, eye_left[1] - scale*offset_v)
crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)
image = image.crop((int(crop_xy[0]), int(crop_xy[1]), int(crop_xy[0]+crop_size[0]), int(crop_xy[1]+crop_size[1])))
# resize it
image = image.resize(dest_sz, Image.ANTIALIAS)
return image
if __name__ == "__main__":
f = open(sys.argv[1], 'r')
csv = open(sys.argv[2], "w")
for line in f:
lineArray = line.split(";")
fileName = lineArray[0]
label = lineArray[1]
print "aligning %s to aligned" % (fileName)
aligned_file_name = "aligned/%s" % fileName
face = facedetect.detect_faces(fileName)['face'][0]
print(face)
CropFace(Image.open(fileName), eye_left=(face[0],face[1]), eye_right=(face[2],face[1]), offset_pct=(0.08,0.08), dest_sz=(200,200)).save(aligned_file_name)
# CropFace(Image.open(fileName), eye_left=(252,364), eye_right=(420,366), offset_pct=(0.1,0.1), dest_sz=(200,200)).save(aligned_file_name)
csv.write("%s;%s" % (aligned_file_name, label))
f.close()
csv.close()
|
claashk/python-config
|
tests/context/attribute.py
|
Python
|
gpl-3.0
| 3,198 | 0.014071 |
# -*- coding: utf-8 -*-
import unittest
from config.context import Attribute,
|
attr
class Data(object):
pass
class AttributeTestCase(unittest.TestCase):
def setUp(self):
self.data= Data()
self.data.int2= 1
self.integer= 3
self.int1= Attribute("int1", destObj= self.data, valueType=int)
self.int2= Attribute("int2",
|
destObj= self.data)
self.int3= Attribute("integer", destObj= self)
self.flt1= Attribute("flt",
destObj= self.data,
destName="float",
valueType=float )
self.flt2= Attribute("value", valueType= float)
self.str = Attribute("string", destObj=self.data)
def test_construction(self):
self.assertEqual(self.int1.name, "int1")
self.assertEqual(self.int2.name, "int2")
self.assertEqual(self.int3.name, "integer")
self.assertEqual(self.flt1.name, "flt")
self.assertEqual(self.flt2.name, "value")
self.assertEqual(self.int1.data, self.data.int1)
self.assertEqual(self.int2.data, self.data.int2)
self.assertEqual(self.int3.data, self.integer)
self.assertEqual(self.flt1.data, self.data.float)
self.assertEqual(self.flt2.data, self.flt2.value)
self.assertEqual(self.str.data, self.data.string)
def test_fromString(self):
value="123"
x=int(value)
self.int1.fromString(value)
self.int2.fromString(value)
self.int3.fromString(value)
self.str.fromString(value)
self.assertEqual(self.int1.data, x)
self.assertEqual(self.int2.data, x)
self.assertEqual(self.int3.data, x)
self.assertEqual(self.data.int1, x)
self.assertEqual(self.data.int2, x)
self.assertEqual(self.integer, x)
self.assertEqual(self.data.string, value)
value="1.23"
self.flt1.fromString(value)
self.flt2.fromString(value)
x= float(value)
self.assertEqual(self.flt1.data, x)
self.assertEqual(self.data.float, x)
self.assertEqual(self.flt2.data, x)
self.assertEqual(self.flt2.value, x)
value="a1.23"
self.assertRaises(ValueError, self.int1.fromString, value)
def test_contextInterface(self):
self.assertIsNone(self.int1.parent)
self.assertIs(self.int1, self.int1.decorator)
self.assertEqual(self.flt1.about, "")
self.assertIsNone(self.flt2.default)
self.int1.open()
self.int1.close()
self.int1.clear()
self.assertRaises(NotImplementedError, self.int2.getContext, "xx")
self.assertRaises(NotImplementedError, self.int2.insert, self.int3)
def test_attr(self):
dmc= attr("int1", destObj= self.data, valueType=int)
self.assertIs(dmc._ctx.data, self.data.int1)
def suite():
"""Get Test suite object
"""
return unittest.TestLoader().loadTestsFromTestCase(AttributeTestCase)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run( suite() )
|
luistorresm/sale-workflow
|
sale_exceptions/__openerp__.py
|
Python
|
agpl-3.0
| 2,062 | 0 |
# -*- coding: u
|
tf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# Authors: Raphaël Valyi, Renato Lima
# Copyright (C) 2011 Akretion LTDA.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
|
.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{'name': 'Sale Exceptions',
'summary': 'Custom exceptions on sale order',
'version': '1.0',
'category': 'Generic Modules/Sale',
'description': """
Sale Exceptions
===============
This module allows you attach several customizable exceptions to your
sale order in a way that you can filter orders by exceptions type and fix them.
This is especially useful in an order importation scenario such as with
the base_sale_multi_channels module, because it's likely a few orders have
errors when you import them (like product not found in Odoo, wrong line
format etc...)
Contributors
------------
* Raphaël Valyi <raphael.valyi@akretion.com>
* Renato Lima <renato.lima@akretion.com>
* Sébastien BEAU <sebastien.beau@akretion.com>
* Guewen Baconnier <guewen.baconnier@camptocamp.com>
* Yannick Vaucher <yannick.vaucher@camptocamp.com>
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['sale'],
'data': ['sale_workflow.xml',
'sale_view.xml',
'sale_exceptions_data.xml',
'wizard/sale_exception_confirm_view.xml',
'security/ir.model.access.csv',
'settings/sale.exception.csv'],
'installable': True,
}
|
bhermanmit/openmc
|
examples/python/boxes/build-xml.py
|
Python
|
mit
| 4,480 | 0.000446 |
import numpy as np
import openmc
###############################################################################
# Simulation Input File Parameters
###############################################################################
# OpenMC simulation parameters
batches = 15
inactive = 5
particles = 10000
###############################################################################
# Exporting to OpenMC materials.xml File
###############################################################################
# Instantiate some Materials and register the appropriate Nuclides
fuel1 = openmc.Material(material_id=1, name='fuel')
fuel1.set_density('g/cc', 4.5)
fuel1.add_nuclide('U235', 1.)
fuel2 = openmc.Material(material_id=2, name='depleted fuel')
fuel2.set_density('g/cc', 4.5)
fuel2.add_nuclide('U238', 1.)
moderator = openmc.Material(material_id=3, name='moderator')
moderator.set_density('g/cc', 1.0)
moderator.ad
|
d_element('H', 2.)
moderator.add_element('O', 1.)
moderator.add_s_alpha_beta('c_H_in_H2O')
# Instant
|
iate a Materials collection and export to XML
materials_file = openmc.Materials([fuel1, fuel2, moderator])
materials_file.export_to_xml()
###############################################################################
# Exporting to OpenMC geometry.xml file
###############################################################################
# Instantiate planar surfaces
x1 = openmc.XPlane(surface_id=1, x0=-10)
x2 = openmc.XPlane(surface_id=2, x0=-7)
x3 = openmc.XPlane(surface_id=3, x0=-4)
x4 = openmc.XPlane(surface_id=4, x0=4)
x5 = openmc.XPlane(surface_id=5, x0=7)
x6 = openmc.XPlane(surface_id=6, x0=10)
y1 = openmc.YPlane(surface_id=11, y0=-10)
y2 = openmc.YPlane(surface_id=12, y0=-7)
y3 = openmc.YPlane(surface_id=13, y0=-4)
y4 = openmc.YPlane(surface_id=14, y0=4)
y5 = openmc.YPlane(surface_id=15, y0=7)
y6 = openmc.YPlane(surface_id=16, y0=10)
z1 = openmc.ZPlane(surface_id=21, z0=-10)
z2 = openmc.ZPlane(surface_id=22, z0=-7)
z3 = openmc.ZPlane(surface_id=23, z0=-4)
z4 = openmc.ZPlane(surface_id=24, z0=4)
z5 = openmc.ZPlane(surface_id=25, z0=7)
z6 = openmc.ZPlane(surface_id=26, z0=10)
# Set vacuum boundary conditions on outside
for surface in [x1, x6, y1, y6, z1, z6]:
surface.boundary_type = 'vacuum'
# Instantiate Cells
inner_box = openmc.Cell(cell_id=1, name='inner box')
middle_box = openmc.Cell(cell_id=2, name='middle box')
outer_box = openmc.Cell(cell_id=3, name='outer box')
# Use each set of six planes to create solid cube regions. We can then use these
# to create cubic shells.
inner_cube = +x3 & -x4 & +y3 & -y4 & +z3 & -z4
middle_cube = +x2 & -x5 & +y2 & -y5 & +z2 & -z5
outer_cube = +x1 & -x6 & +y1 & -y6 & +z1 & -z6
outside_inner_cube = -x3 | +x4 | -y3 | +y4 | -z3 | +z4
# Use surface half-spaces to define regions
inner_box.region = inner_cube
middle_box.region = middle_cube & outside_inner_cube
outer_box.region = outer_cube & ~middle_cube
# Register Materials with Cells
inner_box.fill = fuel1
middle_box.fill = fuel2
outer_box.fill = moderator
# Instantiate root universe
root = openmc.Universe(universe_id=0, name='root universe')
root.add_cells([inner_box, middle_box, outer_box])
# Instantiate a Geometry, register the root Universe, and export to XML
geometry = openmc.Geometry(root)
geometry.export_to_xml()
###############################################################################
# Exporting to OpenMC settings.xml File
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Create an initial uniform spatial source distribution over fissionable zones
uniform_dist = openmc.stats.Box(*outer_cube.bounding_box, only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
###############################################################################
# Exporting to OpenMC plots.xml File
###############################################################################
plot = openmc.Plot(plot_id=1)
plot.origin = [0, 0, 0]
plot.width = [20, 20]
plot.pixels = [200, 200]
plot.color_by = 'cell'
# Instantiate a Plots collection and export to XML
plot_file = openmc.Plots([plot])
plot_file.export_to_xml()
|
ez-p/madness
|
tournament/engine/region.py
|
Python
|
gpl-3.0
| 2,088 | 0.005747 |
"""
Copyright 2016, Paul Powell, All rights reserved.
"""
import team
import round
class Region:
def __init__(self, name, teams, algorithm):
self.initialize(name, teams)
self.name = name
self.rounds = []
self.algorithm = algorithm
self.final = None
def __call__(self, madness):
round1 = round.Round(s
|
elf.name, 1, madness, self.algorithm, self.matchups)
round2 = round1.go()
round3 = round2.go()
round4 = round3.go()
self.rounds = [round1, round2, round3, round4]
# Special hacks for final round
self.final = self.algorithm(round4.games[0], madness)
round4.winner = self.final.winner
round4.results.append(self.final)
return self.final()[0]
def initiali
|
ze(self, name, teams):
# Looks like [((1,16), (8,9)), ((5,12), (4,13)), ((6,11), (3,14)), ((7,10), (2,15))]
sregion = name
game1 = (team.Team(teams[1], sregion, 1), team.Team(teams[16], sregion, 16))
game2 = (team.Team(teams[8], sregion, 8), team.Team(teams[9], sregion, 9))
game3 = (team.Team(teams[5], sregion, 5), team.Team(teams[12], sregion, 12))
game4 = (team.Team(teams[4], sregion, 4), team.Team(teams[13], sregion, 13))
game5 = (team.Team(teams[6], sregion, 6), team.Team(teams[11], sregion, 11))
game6 = (team.Team(teams[3], sregion, 3), team.Team(teams[14], sregion, 14))
game7 = (team.Team(teams[7], sregion, 7), team.Team(teams[10], sregion, 10))
game8 = (team.Team(teams[2], sregion, 2), team.Team(teams[15], sregion, 15))
self.matchups = [(game1, game2), (game3, game4), (game5, game6), (game7, game8)]
def set_sf(self, winner, second):
for matchup in self.matchups:
for game in matchup:
for team in game:
if team.name == winner:
print "found winner"
team.sf = 3
if team.name == second:
print "found second"
team.sf = 2
|
autosportlabs/RaceCapture_App
|
autosportlabs/racecapture/views/status/statusview.py
|
Python
|
gpl-3.0
| 14,474 | 0.002418 |
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.10.0')
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.app import Builder
from kivy.uix.screenmanager import Screen
from kivy.uix.treeview import TreeView, TreeViewLabel
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, StringProperty, NumericProperty
from datetime import timedelta
from utils import *
from fieldlabel import AutoShrinkFieldLabel
from kivy.logger import LoggerHistory, Logger
from autosportlabs.racecapture.theme.color import ColorScheme
from autosportlabs.uix.toast.kivytoast import toast
from main import RaceCaptureApp
from autosportlabs.widgets.scrollcontainer import ScrollContainer
STATUS_KV_FILE = 'autosportlabs/racecapture/views/status/statusview.kv'
RAW_STATUS_BGCOLOR_1 = ColorScheme.get_background()
RAW_STATUS_BGCOLOR_2 = ColorScheme.get_dark_background()
class StatusLabel(AutoShrinkFieldLabel):
backgroundColor = ObjectProperty(RAW_STATUS_BGCOLOR_1)
class StatusTitle(StatusLabel):
pass
class StatusValue(StatusLabel):
def __init__(self, **kwargs):
super(StatusLabel, self).__init__(**kwargs)
self.shorten = False
# Simple extension of Kivy's TreeViewLabel so we can add on our own properties
# to it for easier view tracking
class LinkedTreeViewLabel(TreeViewLabel):
id = None
# Shows RCP's entire status, getting the values by polling RCP for its status
class StatusView(Screen):
_bg_current = RAW_STATUS_BGCOLOR_1
# Dict object that contains the status of RCP
status = ObjectProperty(None)
# Currently selected menu item
_selected_item = None
_menu_built = False
# Track manager for getting track name
track_manager = None
# Connection to status pump
_status_pump = None
# Used for building the left side menu
_menu_keys = {
"app": "Application",
"system": "Device",
"GPS": "GPS",
"cell": "Cellular",
"bt": "Bluetooth",
"logging": "Logging",
"track": "Track",
"telemetry": "Telemetry",
"wifi": "WiFi",
"imu": "Accel/Gyro",
}
# Dict for getting English text for status enums
_enum_keys = {
'GPS': {
'init': [
'Not initialized',
'Initialized',
'Error initializing'
],
'qual': [
'No fix',
'Weak',
'Acceptable',
'Strong'
]
},
'cell': {
'init': [
'Not initialized',
'Initialized',
'Searching',
'Denied',
'Registered'
],
'sig_str': [
'Unknown',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'Marginal',
'OK',
'OK',
'OK',
'OK',
|
'OK',
'Good',
'Good',
'Good',
'Good',
'Good',
'Excellent',
'Excel
|
lent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent',
'Excellent'
]
},
'bt': {
'init': [
'Not initialized',
'Initialized',
'Error initializing'
]
},
'wifi': {
'init': [
'Not initialized',
'Initialized'
]
},
'logging': {
'status': [
'Not logging',
'Logging',
'Error logging'
]
},
'track': {
'status': [
'Searching',
'Fixed start/finish',
'Detected',
'Manually Set'
]
},
'telemetry': {
'status': [
'Idle',
'Connected',
'Connection terminated',
'Device ID rejected',
'Data connection failed. SIM card is valid, either no data plan is associated or the plan has expired.',
'Failed to connect to server',
'Data connection failed. APN settings possibly wrong.',
'Unable to join network'
]
}
}
_menu_node = None
menu_select_color = ColorScheme.get_primary()
def __init__(self, track_manager, status_pump, **kwargs):
Builder.load_file(STATUS_KV_FILE)
super(StatusView, self).__init__(**kwargs)
self.track_manager = track_manager
self.register_event_type('on_tracks_updated')
self._menu_node = self.ids.menu
self._menu_node.bind(selected_node=self._on_menu_select)
status_pump.add_listener(self.status_updated)
self._build_core_menu()
def _build_core_menu(self):
# build application status node
self._append_menu_node('Application', 'app')
# select the first node in the tree.
self._menu_node.select_node(self._menu_node.root.nodes[0])
def _build_menu(self):
if self._menu_built:
return
for item in self.status.iterkeys():
text = self._menu_keys[item] if item in self._menu_keys else item
self._append_menu_node(text, item)
self._menu_built = True
def _append_menu_node(self, text, item):
label = LinkedTreeViewLabel(text=text)
label.id = item
label.color_selected = self.menu_select_color
return self._menu_node.add_node(label)
def _on_menu_select(self, instance, value):
self._selected_item = value.id
self.update()
def status_updated(self, status):
self.status = status['status']
def update(self):
_bg_current = RAW_STATUS_BGCOLOR_1
if self._selected_item in self._menu_keys:
text = self._menu_keys[self._selected_item]
else:
text = self._selected_item
self.ids.name.text = text
self.ids.status_grid.clear_widgets()
function_name = ('render_' + self._selected_item).lower()
# Generic way of not having to create a long switch or if/else block
# to call each render function
if function_name in dir(self):
getattr(self, function_name)()
else:
self.render_generic(self._selected_item)
def render_generic(self, section):
status = self.status[section]
for item, value in status.iteritems():
self._add_item(item, value)
def render_app(self):
label_widget = StatusTitle(text='Application Log')
self.ids.status_grid.add_widget(label_widget)
self.ids.status_grid.add_widget(ApplicationLogView())
self._add_item('Application Version', RaceCaptureApp.get_app_version())
def render_system(self):
if 'git_info' in self.status['system']:
version = self.status['system']['g
|
jhprinz/openpathsampling
|
openpathsampling/visualize.py
|
Python
|
lgpl-2.1
| 92,760 | 0.000571 |
import openpathsampling.pathmover_inout
import svgwrite as svg
from svgwrite.container import Group
import openpathsampling as paths
import os
import ujson
from collections import namedtuple, OrderedDict, Counter
# TODO: Move TreeRenderer and Builder to a different file ???
class TreeRenderer(svg.Drawing):
"""
Helper Class to render SVG Drawings
Main use is that it is difficult to scale coordinates in SVG
without distort the content. What we want is to move objects further
apart of close while maintaining their size.
"""
def __init__(self):
super(TreeRenderer, self).__init__()
self.scale_x = 20.0
self.scale_y = 20.0
self.horizontal_gap = 0.05
def add_css_file(self, css_file='vis'):
css_file_name = os.path.join(
paths.resources_directory, css_file + '.css')
with open(css_file_name) as content_file:
vis_css = content_file.read()
# Add the CSS Stylesheet
self.defs.add(self.style(
vis_css
))
def add_css(self, css_style):
self.defs.add(self.style(
css_style
))
@staticmethod
def css_class(css_class):
"""
Generate a string that can be passed to the SVG class attribute
Parameters
----------
css_class : list of str
the class names as a list
Returns
-------
str
the actual string
"""
return ' '.join(css_class)
def x(self, x):
return self.w(x)
def y(self, y):
return self.h(y)
def w(self, y):
return self.scale_x * y
def h(self, y):
return self.scale_y * y
def xy(self, x, y):
return self.x(x), self.y(y)
def wh(self, w, h):
return self.w(w), self.h(h)
def connector(self, x, y, text="", css_class=None):
if css_class is None:
css_class = list()
css_class += ['connector']
return self.block(
x, y, text, False, False, css_class=css_class)
def block(self, x, y, text="",
extend_right=True, extend_left=True,
extend_top=False, extend_bottom=False,
w=1.0, color=None, css_class=None, data=None):
if css_class is None:
css_class = list()
css_class += ['block']
padding = self.horizontal_gap
group = self.g(
class_=self.css_class(css_class)
)
if color is not None:
adds = {'fill': color}
else:
adds = {}
if data is not None:
group.set_desc(desc=ujson.dumps(data))
group.add(self.rect(
insert=self.xy(x - 0.5 + padding, y - 0.3),
size=self.wh(1.0 * w - 2 * padding, 0.6),
**adds
))
if ex
|
tend_left:
group.add(self.circle(
center=self.xy(x - 0.5, y),
r=self.w(padding)
|
))
if extend_right:
group.add(self.circle(
center=(self.xy(x + w - 0.5, y)),
r=self.w(padding)
))
if extend_top:
group.add(self.circle(
center=self.xy(x, y - 0.3),
r=self.w(padding)
))
if extend_bottom:
group.add(self.circle(
center=(self.xy(x + w - 1.0, y + 0.3)),
r=self.w(padding)
))
group.add(self.text(
text=str(text),
insert=self.xy(x + (w - 1.0) / 2.0, y)
))
return group
def horizontal_region(
self, x, y, w=1.0, text="",
extend_right=False, extend_left=False, css_class=None):
if css_class is None:
css_class = list()
css_class += ['h-region']
if w == 0:
return []
padding = self.horizontal_gap
group = Group(
class_=self.css_class(css_class)
)
group.add(self.line(
start=self.xy(x - 0.5 + padding, y),
end=self.xy(x - 0.5 + w - padding, y)
))
if extend_left:
group.add(self.circle(
center=self.xy(x - 0.5, y),
r=self.w(padding)
))
group.add(self.line(
start=self.xy(x - 0.5, y - 0.3),
end=self.xy(x - 0.5, y + 0.3)
))
if extend_right:
group.add(self.circle(
center=(self.xy(x + w - 0.5, y)),
r=self.w(padding)
))
group.add(self.line(
start=self.xy(x + w - 0.5, y - 0.3),
end=self.xy(x + w - 0.5, y + 0.3)
))
text = str(text)
if self.w(w) < len(text) * 5:
text = text[0]
if self.w(w) < 10:
text = ''
group.add(self.text(
text=str(text),
insert=self.xy(x + (w - 1.0) / 2.0, y),
class_='shadow'
))
group.add(self.text(
text=str(text),
insert=self.xy(x + (w - 1.0) / 2.0, y)
))
return group
def vertical_region(
self, x, y, w=1.0, text="",
extend_top=True, extend_bottom=True, css_class=None):
if css_class is None:
css_class = list()
css_class += ['v-region']
# padding = self.horizontal_gap
width = 0.2
gap = 0.0
radius = 0.07
group = Group(
class_=self.css_class(css_class)
)
group.add(self.line(
start=self.xy(x, y - 0.5 + gap),
end=self.xy(x, y + w - 1 + 0.5 - gap)
))
if extend_top:
group.add(self.circle(
center=self.xy(x, y - 0.5 + gap),
r=self.w(radius)
))
group.add(self.line(
start=self.xy(x - 1.0 * width, y - 0.5 + gap),
end=self.xy(x + width, y - 0.5 + gap)
))
if extend_bottom:
group.add(self.circle(
center=(self.xy(x, y + (w - 1.0) + 0.5 - gap)),
r=self.w(radius)
))
group.add(self.line(
start=self.xy(x - 1.0 * width, y + w - 1.0 + 0.5 - gap),
end=self.xy(x + width, y + w - 1.0 + 0.5 - gap)
))
group.add(self.text(
text=str(text),
insert=self.xy(x - width, y + (w - 1.0) / 2.0)
))
return group
def shade(self, x, y, w, css_class=None, color=None):
if css_class is None:
css_class = list()
css_class += ['shade']
adds = {}
if color is not None:
adds = {'fill': color}
group = self.g(
class_=self.css_class(css_class)
)
group.add(self.rect(
insert=self.xy(x - 0.6, y + 0.10),
size=self.wh(w + 0.2, 0.25),
fill='white'
))
group.add(self.rect(
insert=self.xy(x - 0.6, y - 0.35),
size=self.wh(w + 0.2, 0.25),
fill='white'
))
group.add(self.rect(
insert=self.xy(x - 0.5, y + 0.15),
size=self.wh(w, 0.15),
**adds
))
group.add(self.rect(
insert=self.xy(x - 0.5, y - 0.30),
size=self.wh(w, 0.15),
**adds
))
return group
def vertical_connector(self, x, y1, y2, css_class=None):
if css_class is None:
css_class = list()
css_class += ['v-connector']
padding = self.horizontal_gap
return self.line(
class_=self.css_class(css_class),
start=self.xy(x - 0.5, y1 + padding),
end=self.xy(x - 0.5, y2 - padding)
)
def vertical_hook(self, x1, y1, x2, y2, css_class=None):
if css_class is None:
css_class = list()
css_class += ['v-hook']
padding = self.horizontal_gap
return self.line(
class_=self.css_class(css_class),
start=self.xy(x1, y1 + pad
|
akaszynski/vtkInterface
|
examples/01-filter/boolean-operations.py
|
Python
|
mit
| 3,395 | 0.000884 |
"""
Boolean Operations
~~~~~~~~~~~~~~~~~~
Perform boolean operations with closed surfaces (intersect, cut, etc.).
Boolean/topological operations (intersect, cut, etc.) methods are implemented
for :class:`pyvista.PolyData` mesh types only and are accessible directly from
any :class:`pyvista.PolyData` mesh. Check out :class:`pyvista.PolyDataFilters`
and take a look at the following filters:
* :func:`pyvista.PolyDataFilters.boolean_add`
* :func:`pyvista.PolyDataFilters.
|
boolean_cut`
* :func:`pyvista.PolyDataFilters.boolean_difference`
* :func:`pyvista.PolyDataFilters.boolean_union`
For merging, the ``+`` operator can be used between any two meshes in PyVista
which simply calls the ``.merge()`` filter to combine any two meshes.
Similarly, the ``-`` operator can be used between any two :class:`pyvista.PolyData`
meshes in PyVista to cut the first mesh by the second.
"""
# sphinx_gallery_thumbnail_number = 6
import pyvista as pv
import nu
|
mpy as np
def make_cube():
x = np.linspace(-0.5, 0.5, 25)
grid = pv.StructuredGrid(*np.meshgrid(x, x, x))
return grid.extract_surface().triangulate()
# Create to example PolyData meshes for boolean operations
sphere = pv.Sphere(radius=0.65, center=(0, 0, 0))
cube = make_cube()
p = pv.Plotter()
p.add_mesh(sphere, color="yellow", opacity=0.5, show_edges=True)
p.add_mesh(cube, color="royalblue", opacity=0.5, show_edges=True)
p.show()
###############################################################################
# Boolean Add
# +++++++++++
#
# Add all of the two meshes together using the
# :func:`pyvista.PolyDataFilters.boolean_add` filter or the ``+`` operator.
#
# Order of operations does not matter for boolean add as the entirety of both
# meshes are appended together.
add = sphere + cube
add.plot(opacity=0.5, color=True, show_edges=True)
###############################################################################
# Boolean Cut
# +++++++++++
#
# Perform a boolean cut of ``a`` using ``b`` with the
# :func:`pyvista.PolyDataFilters.boolean_cut` filter or the ``-`` operator
# since both meshes are :class:`pyvista.PolyData`.
#
# Order of operations does not matter for boolean cut.
cut = cube - sphere
p = pv.Plotter()
p.add_mesh(cut, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
# Boolean Difference
# ++++++++++++++++++
#
# Combine two meshes and retains only the volume in common between the meshes
# using the :func:`pyvista.PolyDataFilters.boolean_difference` method.
#
# Note that the order of operations for a boolean difference will affect the
# results.
diff = sphere.boolean_difference(cube)
p = pv.Plotter()
p.add_mesh(diff, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
diff = cube.boolean_difference(sphere)
p = pv.Plotter()
p.add_mesh(diff, opacity=0.5, show_edges=True, color=True)
p.show()
###############################################################################
# Boolean Union
# +++++++++++++
#
# Combine two meshes and attempts to create a manifold mesh using the
# :func:`pyvista.PolyDataFilters.boolean_union` method.
#
# Order of operations does not matter for boolean union.
union = sphere.boolean_union(cube)
p = pv.Plotter()
p.add_mesh(union, opacity=0.5, show_edges=True, color=True)
p.show()
|
conda-forge/conda-forge.github.io
|
scripts/update_teams.py
|
Python
|
bsd-3-clause
| 4,482 | 0.00357 |
#!/usr/bin/env conda-execute
# conda execute
# env:
# - python 2.7.*
# - conda-smithy
# - pygithub 1.*
# - six
# - conda-build
# channels:
# - conda-forge
# run_with: python
from __future__ import print_function
import argparse
import collections
import os
import six
from github import Github
import github
import yaml
from conda_build.metadata import MetaData
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('feedstocks_clone', help="The location of the feedstocks directory within the conda-forge/feedstocks clone.")
args = parser.parse_args()
from conda_smithy.github import gh_token
token = gh_token()
gh = Github(token)
conda_forge = gh.get_organization('conda-forge')
teams = {team.name: team for team in conda_forge.get_teams()}
feedstocks_path = args.feedstocks_clone
packages_visited = set()
all_members = set()
from random import choice
superlative = ['awesome', 'slick', 'formidable', 'awe-inspiring', 'breathtaking',
'magnificent', 'wonderous', 'stunning', 'astonishing', 'superb',
'splendid', 'impressive', 'unbeatable', 'excellent', 'top', 'outstanding',
'exalted', 'standout', 'smashing']
# Go through each of the feedstocks and ensure that the team is up to date and that
# there is nobody in the team which doesn't belong (i.e. isn't in the maintainers list).
for package_name in os.listdir(feedstocks_path):
print("Checking {}".format(package_name))
packages_visited.add(package_name)
feedstock = os.path.join(feedstocks_path, package_name)
recipe = os.path.join(feedstock, 'recipe', 'meta.yaml')
if not os.path.exists(recipe):
print("The {} feedstock is recipe less".format(package_name))
continue
meta = MetaData(os.path.dirname(recipe))
contributors = meta.meta.get('extra', {}).ge
|
t('recipe-maintainers', [])
if not isinstance(contributors, list):
# Deal with a contribution list which has dashes but no spaces
|
# (e.g. https://github.com/conda-forge/pandoc-feedstock/issues/1)
contributors = [contributors.lstrip('-')]
contributors = set(handle.lower() for handle in contributors)
all_members.update(contributors)
# If the team already exists, get hold of it.
team = teams.get(package_name)
if not team:
print("Team {} does not exist in conda-forge organization".format(package_name))
continue
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in contributors - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - contributors:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(old_member, package_name))
# The following works, it is just a bit scary!
# headers, data = team._requester.requestJsonAndCheck(
# "DELETE",
# team.url + "/memberships/" + old_member)
# Create and administer the all-members team.
team = teams.get('all-members')
if not team:
raise RuntimeError("Team all-members does not exist in conda-forge organization")
current_members = team.get_members()
member_handles = set([member.login.lower() for member in current_members])
for new_member in all_members - member_handles:
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/memberships/" + new_member)
for old_member in member_handles - all_members:
print("AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM all-members".format(old_member))
# Remove any teams which don't belong any more (because there is no longer a feedstock).
for team_to_remove in set(teams.keys()) - set(packages_visited):
if team_to_remove in ['Core',
'conda-forge.github.io',
'all-members',
'conda-forge-anvil',
'conda-forge-webservices',
'staged-recipes']:
print('Keeping ', team_to_remove)
continue
print("THE {} TEAM NEEDS TO BE REMOVED.".format(team_to_remove))
# The following works, it is just a bit scary!
# teams[team_to_remove].delete()
|
rsalmaso/django-cms
|
cms/admin/forms.py
|
Python
|
bsd-3-clause
| 49,683 | 0.001751 |
from django import forms
from django.apps import apps
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.template.defaultfilters import slugify
from django.utils.encoding import force_str
from django.utils.translation import gettext, gettext_lazy as _
from cms import api
from cms.apphook_pool import apphook_pool
from cms.cache.permissions import clear_permission_cache
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_DIRTY, ROOT_USER_LEVEL
from cms.exceptions import PluginLimitReached
from cms.extensions import extension_pool
from cms.forms.validators import (
validate_overwrite_url, validate_relative_url, validate_url_uniqueness,
)
from cms.forms.widgets import (
AppHookSelect, ApplicationConfigSelect, UserSelectAdminWidget,
)
from cms.models import (
CMSPlugin, GlobalPagePermission, Page, PagePermission, PageType, PageUser,
PageUserGroup, Placeholder, Title, TreeNode,
)
from cms.models.permissionmodels import User
from cms.plugin_pool import plugin_pool
from cms.signals.apphook import set_restart_trigger
from cms.utils.compat.forms import UserChangeForm
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_list, get_language_object
from cms.utils.permissions import (
get_current_user, get_subordinate_groups, get_subordinate_users,
get_user_permission_level,
)
from menus.menu_pool import menu_pool
def get_permission_accessor(obj):
User = get_user_model()
if isinstance(obj, (PageUser, User,)):
rel_name = 'user_permissions'
else:
rel_name = 'permissions'
return getattr(obj, rel_name)
def get_page_changed_by_filter_choices():
# This is not site-aware
# Been like this forever
# Would be nice for it to filter out by site
values = (
Page
.objects
.filter(publisher_is_draft=True)
.distinct()
.order_by('changed_by')
.values_list('changed_by', flat=True)
)
yield ('', _('All'))
for value in values:
yield (value, value)
def get_page_template_filter_choices():
yield ('', _('All'))
for value, name in get_cms_setting('TEMPLATES'):
yield (value, name)
def save_permissions(data, obj):
models = (
(Page, 'page'),
(PageUser, 'pageuser'),
(PageUserGroup, 'pageuser'),
(PagePermission, 'pagepermission'),
)
if not obj.pk:
# save obj, otherwise we can't assign permissions to him
obj.save()
permission_accessor = get_permission_accessor(obj)
for model, name in models:
content_type = ContentType.objects.get_for_model(model)
for key in ('add', 'change', 'delete'):
# add permission `key` for model `model`
codename = get_permission_codename(key, model._meta)
permission = Permission.objects.get(content_type=content_type, codename=codename)
field = 'can_%s_%s' % (key, name)
if data.get(field):
permission_accessor.add(permission)
elif field in data:
permission_accessor.remove(permission)
class CopyPermissionForm(forms.Form):
"""
Holds the specific field for permissions
"""
copy_permissions = forms.BooleanField(
label=_('Copy permissions'),
required=False,
initial=True,
)
class BasePageForm(forms.ModelForm):
_user = None
_site = None
_language = None
title = forms.CharField(label=_("Title"), max_length=255, widget=forms.TextInput(),
help_text=_('The default title'))
slug = forms.CharField(label=_("Slug"), max_length=255, widget=forms.TextInput(),
help_text=_('The part of the title that is used in the URL'))
menu_title = forms.CharField(label=_("Menu Title"), widget=forms.TextInput(),
help_text=_('Overwrite what is displayed in the menu'), required=False)
page_title = forms.CharField(label=_("Page Title"), widget=forms.TextInput(),
help_text=_('Overwrites what is displayed at the top of your browser or in bookmarks'),
required=False)
meta_description = forms.CharField(label=_('Description meta tag'), required=False,
widget=forms.Textarea(attrs={'maxlength': '320', 'rows': '4'}),
help_text=_('A description of the page used by search engines.'),
max_length=320)
class Meta:
model = Page
fields = []
def clean_slug(self):
slug = slugify(self.cleaned_data['slug'])
if not slug:
raise ValidationError(_("Slug must not be empty."))
return slug
class AddPageForm(BasePageForm):
source = forms.ModelChoiceField(
label=_(u'Page type'),
queryset=Page.objects.filter(
is_page_type=True,
publisher_is_draft=True,
),
required=False,
)
parent_node = forms.ModelChoiceField(
queryset=TreeNode.objects.all(),
required=False,
widget=forms.HiddenInput(),
)
class Meta:
model = Page
fields = ['source']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
source_field = self.fields.get('source')
if not source_field or source_field.widget.is_hidden:
return
root_page = PageType.get_root_page(site=self._site)
if root_page:
# Set the choicefield's choices to the va
|
rious page_types
descendants = root_page.get_descendant_pages().filter(is_page_type=True)
titles = Title.objects.filter(page__in=descendants, language=self._language)
choices = [('', '---------')]
choices.extend((title.page_id, title.title) for title in titles)
source_field.choices = choices
else:
|
choices = []
if len(choices) < 2:
source_field.widget = forms.HiddenInput()
def clean(self):
data = self.cleaned_data
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
parent_node = data.get('parent_node')
if parent_node:
slug = data['slug']
parent_path = parent_node.item.get_path(self._language)
path = u'%s/%s' % (parent_path, slug) if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def clean_parent_node(self):
parent_node = self.cleaned_data.get('parent_node')
if parent_node and parent_node.site_id != self._site.pk:
raise ValidationError("Site doesn't match the parent's page site")
return parent_node
def create_translation(self, page):
data = self.cleaned_data
title_kwargs = {
'page': page,
'language': self._language,
'slug': data['slug'],
'path': data['path'],
'title': data['title'],
}
if 'menu_title' in data:
title_kwargs['menu_title'] = data['menu_title']
if 'page_title' in data:
title_kwargs['page_title'] = data['page_title']
if 'meta_description' in data:
title_kwargs['meta_description'] = data['meta_description']
return api.create_title(**title_kwargs)
def from_source(self, source,
|
digifant/eMonitor
|
emonitor/signals.py
|
Python
|
bsd-3-clause
| 1,692 | 0.002364 |
from blinker import Namespace
import logging
import json
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MySignal:
def __init__(self):
self.signals = {}
self.signal = Namespace()
def init_app(self, app):
|
pass
def addSignal(self, classname, option):
logger.debug('add signal {}.{}'.format(classname, opti
|
on))
if '{}.{}'.format(classname, option) not in self.signals.keys():
self.signals['{}.{}'.format(classname, option)] = self.signal.signal('{}.{}'.format(classname, option))
def send(self, classname, option, **extra):
logger.debug('send signal {}.{} with: {}'.format(classname, option, extra))
logger.info('send signal {}.{}'.format(classname, option))
if '{}.{}'.format(classname, option) in self.signals.keys():
payload = '{}.{}'.format(classname, option)
if extra:
extra['sender'] = payload
payload = json.dumps(extra)
self.signals['{}.{}'.format(classname, option)].send(str(payload))
def connect(self, classname, option, func):
logger.debug('connect signal {}.{} with func: {}()'.format(classname, option, func.__name__))
if not '{}.{}'.format(classname, option) in self.signals.keys():
self.signals['{}.{}'.format(classname, option)] = self.signal.signal('{}.{}'.format(classname, option))
self.signals['{}.{}'.format(classname, option)].connect(func)
def disconnect(self, classname, option, func):
if '{}.{}'.format(classname, option) in self.signals.keys():
self.signals['{}.{}'.format(classname, option)].disconnect(func)
|
takearest118/coconut
|
common/hashers.py
|
Python
|
gpl-3.0
| 1,030 | 0.000971 |
# -*- coding: utf-8 -*-
import re
import string
import random
import pbkdf2
HASHING_ITERATIONS = 400
ALLOWED_IN_SALT = string.ascii_letters + string.digits + './'
ALLOWD_PASSWORD_PATTERN = r'[A-Za-z0-9@#$%^&+=]{8,}'
def generate_random_string(len=12, allowed_chars=string.ascii_letters+string.digits):
return ''.join(random.choice(allowed_chars) for i in range(len))
def make_password(password=None):
if password is None:
raise ValueError('password is required')
sa
|
lt = generate_random_string(len=32, allowed_chars=ALLOWED_IN_SALT)
return pbkdf2.crypt(password, salt=salt, iterations=HASHING_ITERATIONS)
def check_password(password, hashed_password):
return hashed_password == pbkdf2.crypt(password, hashed_password)
def validate_password(password=None):
"""
ALLOWED_PASSWORD_PATTERN = r'[A-Za-z0-9@#$%^&+=]{8,}'
|
"""
if password is None:
raise ValueError('password is required')
if re.match(ALLOWD_PASSWORD_PATTERN, password):
return True
return False
|
AutorestCI/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/partition_health.py
|
Python
|
mit
| 2,612 | 0.002297 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .entity_health import EntityHealth
class PartitionHealth(EntityHealth):
"""Information about the health of a Service Fabric partition.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str or :class:`enum
<azure.servicefabric.models.enum>`
:param health_events: The list of health events reported on the entity.
:type health_events: list of :class:`HealthEvent
<azure.servicefabric.models.HealthEvent>`
:param unhealthy_evaluations:
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
:param health_statistics:
:type health_statistics: :class:`HealthStatistics
<azure.servicefabric.models.HealthStatistics>`
:param partition_id:
:type partition_id: str
:param replica_health_states: The list of replica health states associated
with the partition.
:type replica_health_states: list of :class:`ReplicaHealthState
<azure.servicefabric.models.ReplicaHealthState>`
"""
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'health_events': {'key'
|
: 'HealthEvents', 'type': '[HealthEvent]'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[Health
|
EvaluationWrapper]'},
'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'},
}
def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None, replica_health_states=None):
super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics)
self.partition_id = partition_id
self.replica_health_states = replica_health_states
|
joegomes/deepchem
|
examples/chembl/chembl_graph_conv.py
|
Python
|
mit
| 1,974 | 0.00152 |
"""
Script that trains graph-conv models on ChEMBL dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from chembl_datasets import load_chembl
# Load ChEMBL dataset
chembl_tasks, datasets, transformers = load_chembl(
shard_size=2000, featurizer="GraphConv", set="5thresh", split="random")
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
# Do setup required for tf/keras models
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 128
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(128, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(128, 128, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=
|
1e-5, mode=1)
|
)
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(256, 128, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphRegressor(
graph_model,
len(chembl_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
|
pcmoritz/ray-1
|
rllib/agents/dreamer/dreamer_model.py
|
Python
|
apache-2.0
| 19,097 | 0.000052 |
import numpy as np
from typing import Any, List, Tuple
from ray.rllib.models.torch.misc import Reshape
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.framework import TensorType
torch, nn = try_import_torch()
if torch:
from torch import distributions as td
from ray.rllib.agents.dreamer.utils import Linear, Conv2d, \
ConvTranspose2d, GRUCell, TanhBijector
ActFunc = Any
# Encoder, part of PlaNET
class ConvEncoder(nn.Module):
"""Standard Convolutional Encoder for Dreamer. This encoder is used
to encode images frm an enviornment into a latent state for the
RSSM model in PlaNET.
"""
def __init__(self,
depth: int = 32,
act: ActFunc = None,
shape: Tuple[int] = (3, 64, 64)):
"""Initializes Conv Encoder
Args:
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
init_channels = self.shape[0]
self.layers = [
Conv2d(init_channels, self.depth, 4, stride=2),
self.act(),
Conv2d(self.depth, 2 * self.depth, 4, stride=2),
self.act(),
Conv2d(2 * self.depth, 4 * self.depth, 4, stride=2),
|
self.act(),
Conv2d(4 * self.depth, 8 * self.depth, 4, stride=2),
self.act(),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# Flatten to [batch*horizon, 3, 64, 64] in loss f
|
unction
orig_shape = list(x.size())
x = x.view(-1, *(orig_shape[-3:]))
x = self.model(x)
new_shape = orig_shape[:-3] + [32 * self.depth]
x = x.view(*new_shape)
return x
# Decoder, part of PlaNET
class ConvDecoder(nn.Module):
"""Standard Convolutional Decoder for Dreamer.
This decoder is used to decode images from the latent state generated
by the transition dynamics model. This is used in calculating loss and
logging gifs for imagined trajectories.
"""
def __init__(self,
input_size: int,
depth: int = 32,
act: ActFunc = None,
shape: Tuple[int] = (3, 64, 64)):
"""Initializes a ConvDecoder instance.
Args:
input_size (int): Input size, usually feature size output from
RSSM.
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
self.layers = [
Linear(input_size, 32 * self.depth),
Reshape([-1, 32 * self.depth, 1, 1]),
ConvTranspose2d(32 * self.depth, 4 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(4 * self.depth, 2 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(2 * self.depth, self.depth, 6, stride=2),
self.act(),
ConvTranspose2d(self.depth, self.shape[0], 6, stride=2),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# x is [batch, hor_length, input_size]
orig_shape = list(x.size())
x = self.model(x)
reshape_size = orig_shape[:-1] + self.shape
mean = x.view(*reshape_size)
# Equivalent to making a multivariate diag
return td.Independent(td.Normal(mean, 1), len(self.shape))
# Reward Model (PlaNET), and Value Function
class DenseDecoder(nn.Module):
"""FC network that outputs a distribution for calculating log_prob.
Used later in DreamerLoss.
"""
def __init__(self,
input_size: int,
output_size: int,
layers: int,
units: int,
dist: str = "normal",
act: ActFunc = None):
"""Initializes FC network
Args:
input_size (int): Input size to network
output_size (int): Output size to network
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, parameterized by FC output
logits.
act (Any): Activation function
"""
super().__init__()
self.layrs = layers
self.units = units
self.act = act
if not act:
self.act = nn.ELU
self.dist = dist
self.input_size = input_size
self.output_size = output_size
self.layers = []
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = units
self.layers.append(Linear(cur_size, output_size))
self.model = nn.Sequential(*self.layers)
def forward(self, x):
x = self.model(x)
if self.output_size == 1:
x = torch.squeeze(x)
if self.dist == "normal":
output_dist = td.Normal(x, 1)
elif self.dist == "binary":
output_dist = td.Bernoulli(logits=x)
else:
raise NotImplementedError("Distribution type not implemented!")
return td.Independent(output_dist, 0)
# Represents dreamer policy
class ActionDecoder(nn.Module):
"""ActionDecoder is the policy module in Dreamer.
It outputs a distribution parameterized by mean and std, later to be
transformed by a custom TanhBijector in utils.py for Dreamer.
"""
def __init__(self,
input_size: int,
action_size: int,
layers: int,
units: int,
dist: str = "tanh_normal",
act: ActFunc = None,
min_std: float = 1e-4,
init_std: float = 5.0,
mean_scale: float = 5.0):
"""Initializes Policy
Args:
input_size (int): Input size to network
action_size (int): Action space size
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, with tanh_normal implemented
act (Any): Activation function
min_std (float): Minimum std for output distribution
init_std (float): Intitial std
mean_scale (float): Augmenting mean output from FC network
"""
super().__init__()
self.layrs = layers
self.units = units
self.dist = dist
self.act = act
if not act:
self.act = nn.ReLU
self.min_std = min_std
self.init_std = init_std
self.mean_scale = mean_scale
self.action_size = action_size
self.layers = []
self.softplus = nn.Softplus()
# MLP Construction
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = self.units
if self.dist == "tanh_normal":
self.layers.append(Linear(cur_size, 2 * action_size))
elif self.dist == "onehot":
self.layers.append(Linear(cur_size, action_size))
self.model = nn.Sequential(*self.layers)
# Returns distribution
def forward(self, x):
raw_init_std = np.log(np.exp(self.init_std) - 1)
x = self.model(x)
if self.dist == "tanh_normal":
mean, std = torch.chunk(x, 2, dim=-1)
mean = self.mean_scale * torch.tanh(mean / self.mean_scale)
std = self.softplus(std + raw_init_std) + self.min_std
dist = td.Normal(mean, std)
transforms = [TanhBije
|
aykol/mean-square-displacement
|
xdatcar2xyz.1.04.py
|
Python
|
mit
| 2,939 | 0.008166 |
# The MIT License (MIT)
#
# Copyright (c) 2014 Muratahan Aykol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FO
|
R ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import numpy as np
xdatcar = open('XDATCAR', 'r')
xyz = open('XDATCAR.xyz', 'w')
xyz_fract = open('XDATCAR_fract.xyz', 'w')
|
system = xdatcar.readline()
scale = float(xdatcar.readline().rstrip('\n'))
print scale
#get lattice vectors
a1 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a2 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a3 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
print a1
print a2
print a3
#Save scaled lattice vectors
lat_rec = open('lattice.vectors', 'w')
lat_rec.write(str(a1[0])+' '+str(a1[1])+' '+str(a1[2])+'\n')
lat_rec.write(str(a2[0])+' '+str(a2[1])+' '+str(a2[2])+'\n')
lat_rec.write(str(a3[0])+' '+str(a3[1])+' '+str(a3[2]))
lat_rec.close()
#Read xdatcar
element_names = xdatcar.readline().rstrip('\n').split()
element_dict = {}
element_numbers = xdatcar.readline().rstrip('\n').split()
i = 0
N = 0
for t in range(len(element_names)):
element_dict[element_names[t]] = int(element_numbers[i])
N += int(element_numbers[i])
i += 1
print element_dict
while True:
line = xdatcar.readline()
if len(line) == 0:
break
xyz.write(str(N) + "\ncomment\n")
xyz_fract.write(str(N)+"\ncomment\n")
for el in element_names:
for i in range(element_dict[el]):
p = xdatcar.readline().rstrip('\n').split()
coords = np.array([ float(s) for s in p ])
# print coords
cartesian_coords = coords[0]*a1+coords[1]*a2+coords[2]*a3
xyz.write(el+ " " + str(cartesian_coords[0])+ " " + str(cartesian_coords[1]) + " " + str(cartesian_coords[2]) +"\n")
xyz_fract.write(el+ " " + str(coords[0])+ " " + str(coords[1]) + " " + str(coords[2]) +"\n")
xdatcar.close()
xyz.close()
xyz_fract.close()
|
joewashear007/jazzy
|
jazzy/functions/OutputFunc.py
|
Python
|
mit
| 462 | 0.008658 |
__all__ = ['jazPrint', 'jazShow']
class jazPrint:
def __init__(self):
|
self.command = "print";
def call(self,
|
interpreter, arg):
return interpreter.GetScope().GetStackTop()
class jazShow:
def __init__(self):
self.command = "show";
def call(self, interpreter, arg):
return arg;
# A dictionary of the classes in this file
# used to autoload the functions
Functions = {'jazShow': jazShow, 'jazPrint': jazPrint}
|
pyfisch/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/webdriver_server.py
|
Python
|
mpl-2.0
| 8,378 | 0.001432 |
import abc
import errno
import os
import platform
import socket
import time
import traceback
import mozprocess
__all__ = ["SeleniumServer", "ChromeDriverServer", "EdgeChromiumDriverServer", "OperaDriverServer",
"GeckoDriverServer", "InternetExplorerDriverServer", "EdgeDriverServer",
"ServoDriverServer", "WebKitDriverServer", "WebDriverServer"]
class WebDriverServer(object):
__metaclass__ = abc.ABCMeta
default_base_path = "/"
def __init__(self, logger, binary, host="127.0.0.1", port=None,
base_path="", env=None, args=None):
if binary is None:
raise ValueError("WebDriver server binary must be given "
|
"to --webdriver-binary argument")
self.logger = logger
self.binary = binary
self.host = host
if base_path == "":
self.base_path = self.default_base_path
else:
self.base_path = base_path
self.env = os.environ.copy() if env is None else env
self._port = port
self._cmd = None
self._args = args i
|
f args is not None else []
self._proc = None
@abc.abstractmethod
def make_command(self):
"""Returns the full command for starting the server process as a list."""
def start(self, block=False):
try:
self._run(block)
except KeyboardInterrupt:
self.stop()
def _run(self, block):
self._cmd = self.make_command()
self._proc = mozprocess.ProcessHandler(
self._cmd,
processOutputLine=self.on_output,
env=self.env,
storeOutput=False)
self.logger.debug("Starting WebDriver: %s" % ' '.join(self._cmd))
try:
self._proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise IOError(
"WebDriver executable not found: %s" % self.binary)
raise
self.logger.debug(
"Waiting for WebDriver to become accessible: %s" % self.url)
try:
wait_for_service((self.host, self.port))
except Exception:
self.logger.error(
"WebDriver was not accessible "
"within the timeout:\n%s" % traceback.format_exc())
raise
if block:
self._proc.wait()
def stop(self, force=False):
if self.is_alive:
return self._proc.kill()
return not self.is_alive
@property
def is_alive(self):
return hasattr(self._proc, "proc") and self._proc.poll() is None
def on_output(self, line):
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self._cmd))
@property
def pid(self):
if self._proc is not None:
return self._proc.pid
@property
def url(self):
return "http://%s:%i%s" % (self.host, self.port, self.base_path)
@property
def port(self):
if self._port is None:
self._port = get_free_port()
return self._port
class SeleniumServer(WebDriverServer):
default_base_path = "/wd/hub"
def make_command(self):
return ["java", "-jar", self.binary, "-port", str(self.port)] + self._args
class ChromeDriverServer(WebDriverServer):
def __init__(self, logger, binary="chromedriver", port=None,
base_path="", args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
def make_command(self):
return [self.binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
class EdgeChromiumDriverServer(WebDriverServer):
def __init__(self, logger, binary="msedgedriver", port=None,
base_path="", args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
def make_command(self):
return [self.binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
class EdgeDriverServer(WebDriverServer):
def __init__(self, logger, binary="microsoftwebdriver.exe", port=None,
base_path="", host="localhost", args=None):
WebDriverServer.__init__(
self, logger, binary, host=host, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class OperaDriverServer(ChromeDriverServer):
def __init__(self, logger, binary="operadriver", port=None,
base_path="", args=None):
ChromeDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
class InternetExplorerDriverServer(WebDriverServer):
def __init__(self, logger, binary="IEDriverServer.exe", port=None,
base_path="", host="localhost", args=None):
WebDriverServer.__init__(
self, logger, binary, host=host, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class GeckoDriverServer(WebDriverServer):
def __init__(self, logger, marionette_port=2828, binary="geckodriver",
host="127.0.0.1", port=None, args=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
self.marionette_port = marionette_port
def make_command(self):
return [self.binary,
"--marionette-port", str(self.marionette_port),
"--host", self.host,
"--port", str(self.port)] + self._args
class SafariDriverServer(WebDriverServer):
def __init__(self, logger, binary="safaridriver", port=None, args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class ServoDriverServer(WebDriverServer):
def __init__(self, logger, binary="servo", binary_args=None, host="127.0.0.1",
port=None, args=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
self.binary_args = binary_args
def make_command(self):
command = [self.binary,
"--webdriver=%s" % self.port,
"--hard-fail",
"--headless"] + self._args
if self.binary_args:
command += self.binary_args
return command
class WebKitDriverServer(WebDriverServer):
def __init__(self, logger, binary=None, port=None, args=None):
WebDriverServer.__init__(self, logger, binary, port=port, args=args)
def make_command(self):
return [self.binary, "--port=%s" % str(self.port)] + self._args
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port():
"""Get a random unbound port"""
while True:
s = socket.socket()
try:
s.bind(("127.0.0.1", 0))
except socket.error:
continue
else:
return s.getsockname()[1]
finally:
s.close()
def wait_for_service(addr, timeout=15):
"""Waits until network service given as a tuple of (host, port) becomes
available or the `timeout` duration is reached, at which point
``socket.error`` is raised."""
end = time.time() + timeout
while end > time.time():
so = socket.socket()
try:
so.connect(addr)
except socket.timeout:
pass
except socket.error as e:
if e[0] != errno.ECO
|
oneandoneis2/dd-agent
|
checks.d/sqlserver.py
|
Python
|
bsd-3-clause
| 17,085 | 0.002341 |
'''
Check the performance counters from SQL Server
See http://blogs.msdn.com/b/psssql/archive/2013/09/23/interpreting-the-counter-values-from-sys-dm-os-performance-counters.aspx
for information on how to report the metrics available in the sys.dm_os_performance_counters table
'''
# stdlib
import traceback
# 3rd party
import adodbapi
# project
from checks import AgentCheck
ALL_INSTANCES = 'ALL'
VALID_METRIC_TYPES = ('gauge', 'rate', 'histogram')
# Constant for SQLServer cntr_type
PERF_LARGE_RAW_BASE = 1073939712
PERF_RAW_LARGE_FRACTION = 537003264
PERF_AVERAGE_BULK = 1073874176
PERF_COUNTER_BULK_COUNT = 272696576
PERF_COUNTER_LARGE_RAWCOUNT = 65792
# Queries
COUNTER_TYPE_QUERY = '''select distinct cntr_type
from sys.dm_os_performance_counters
where counter_name = ?;'''
BASE_NAME_QUERY = '''select distinct counter_name
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?
or counter_name=?) and cntr_type=%s;''' % PERF_LARGE_RAW_BASE
INSTANCES_QUERY = '''select instance_name
from sys.dm_os_performance_counters
where counter_name=? and instance_name!='_Total';'''
VALUE_AND_BASE_QUERY = '''select cntr_value
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?)
and instance_name=?
order by cntr_type;'''
class SQLConnectionError(Exception):
"""
Exception raised for SQL instance connection issues
"""
pass
class SQLServer(AgentCheck):
SOURCE_TYPE_NAME = 'sql server'
SERVICE_CHECK_NAME = 'sqlserver.can_connect'
# FIXME: 6.x, set default to 5s (like every check)
DEFAULT_COMMAND_TIMEOUT = 30
METRICS = [
('sqlserver.buffer.cache_hit_ratio', 'Buffer cache hit ratio', ''), # RAW_LARGE_FRACTION
('sqlserver.buffer.page_life_expectancy', 'Page life expectancy', ''), # LARGE_RAWCOUNT
('sqlserver.stats.batch_requests', 'Batch Requests/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_compilations', 'SQL Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_recompilations', 'SQL Re-Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.connections', 'User Connections', ''), # LARGE_RAWCOUNT
('sqlserver.stats.lock_waits', 'Lock Waits/sec', '_Total'), # BULK_COUNT
('sqlserver.access.page_splits', 'Page Splits/sec', ''), # BULK_COUNT
('sqlserver.stats.procs_blocked', 'Processes blocked', ''), # LARGE_RAWCOUNT
('sqlserver.buffer.checkpoint_pages', 'Checkpoint pages/sec', '') # BULK_COUNT
]
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Cache connections
self.connections = {}
self.failed_connections = {}
self.instances_metrics = {}
# Pre-process the list of metrics to collect
custom_metrics = init_config.get('custom_metrics', [])
for instance in instances:
try:
self._make_metric_list_to_collect(instance, custom_metrics)
except SQLConnectionError:
self.log.exception("Skipping SQL Server instance")
continue
def _make_metric_list_to_
|
collect(self, instance, custom_metrics):
"""
Store the list of metrics to collect by instance_key.
Will also create and cache cursors to query the db.
|
"""
metrics_to_collect = []
for name, counter_name, instance_name in self.METRICS:
try:
sql_type, base_name = self.get_sql_type(instance, counter_name)
metrics_to_collect.append(self.typed_metric(name,
counter_name,
base_name,
None,
sql_type,
instance_name,
None))
except SQLConnectionError:
raise
except Exception:
self.log.warning("Can't load the metric %s, ignoring", name, exc_info=True)
continue
# Load any custom metrics from conf.d/sqlserver.yaml
for row in custom_metrics:
user_type = row.get('type')
if user_type is not None and user_type not in VALID_METRIC_TYPES:
self.log.error('%s has an invalid metric type: %s', row['name'], user_type)
sql_type = None
try:
if user_type is None:
sql_type, base_name = self.get_sql_type(instance, row['counter_name'])
except Exception:
self.log.warning("Can't load the metric %s, ignoring", row['name'], exc_info=True)
continue
metrics_to_collect.append(self.typed_metric(row['name'],
row['counter_name'],
base_name,
user_type,
sql_type,
row.get('instance_name', ''),
row.get('tag_by', None)))
instance_key = self._conn_key(instance)
self.instances_metrics[instance_key] = metrics_to_collect
def typed_metric(self, dd_name, sql_name, base_name, user_type, sql_type, instance_name, tag_by):
'''
Create the appropriate SqlServerMetric object, each implementing its method to
fetch the metrics properly.
If a `type` was specified in the config, it is used to report the value
directly fetched from SQLServer. Otherwise, it is decided based on the
sql_type, according to microsoft's documentation.
'''
metric_type_mapping = {
PERF_COUNTER_BULK_COUNT: (self.rate, SqlSimpleMetric),
PERF_COUNTER_LARGE_RAWCOUNT: (self.gauge, SqlSimpleMetric),
PERF_LARGE_RAW_BASE: (self.gauge, SqlSimpleMetric),
PERF_RAW_LARGE_FRACTION: (self.gauge, SqlFractionMetric),
PERF_AVERAGE_BULK: (self.gauge, SqlIncrFractionMetric)
}
if user_type is not None:
# user type overrides any other value
metric_type = getattr(self, user_type)
cls = SqlSimpleMetric
else:
metric_type, cls = metric_type_mapping[sql_type]
return cls(dd_name, sql_name, base_name,
metric_type, instance_name, tag_by, self.log)
def _get_access_info(self, instance):
''' Convenience method to extract info from instance
'''
host = instance.get('host', '127.0.0.1,1433')
username = instance.get('username')
password = instance.get('password')
database = instance.get('database', 'master')
return host, username, password, database
def _conn_key(self, instance):
''' Return a key to use for the connection cache
'''
host, username, password, database = self._get_access_info(instance)
return '%s:%s:%s:%s' % (host, username, password, database)
def _conn_string(self, instance):
''' Return a connection string to use with adodbapi
'''
host, username, password, database = self._get_access_info(instance)
conn_str = 'Provider=SQLOLEDB;Data Source=%s;Initial Catalog=%s;' \
% (host, database)
if username:
conn_str += 'User ID=%s;' % (username)
if password:
conn_str += 'Password=%s;' % (password)
if not username and not password:
conn_str += 'Integrated Security=SSPI;'
return conn_str
def get_curs
|
JshWright/home-assistant
|
homeassistant/components/media_player/webostv.py
|
Python
|
apache-2.0
| 12,114 | 0 |
"""
Support for interface with an LG webOS Smart TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.webostv/
"""
import logging
import asyncio
from datetime import timedelta
from urllib.parse import urlparse
import voluptuous as vol
import homeassistant.util as util
from homeassistant.components.media_player import (
SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_PLAY,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, MEDIA_TYPE_CHANNEL,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_MAC, CONF_CUSTOMIZE, STATE_OFF,
STATE_PLAYING, STATE_PAUSED,
STATE_UNKNOWN, CONF_NAME, CONF_FILENAME)
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pylgtv==0.1.7',
'websockets==3.2',
'wakeonlan==0.2.2']
_CONFIGURING = {} # type: Dict[str, str]
_LOGGER = logging.getLogger(__name__)
CONF_SOURCES = 'sources'
DEFAULT_NAME = 'LG webOS Smart TV'
WEBOSTV_CONFIG_FILE = 'webostv.conf'
SUPPORT_WEBOSTV = SUPPORT_TURN_OFF | \
SUPPORT_NEXT_TRACK | SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_STEP | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY_MEDIA | SUPPORT_PLAY
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
CUSTOMIZE_SCHEMA = vol.Schema({
vol.Optional(CONF_SOURCES):
vol.All(cv.ensure_list, [cv.string]),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_MAC): cv.string,
vol.Optional(CONF_CUSTOMIZE, default={}): CUSTOMIZE_SCHEMA,
vol.Optional(CONF_FILENAME, default=WEBOSTV_CONFIG_FILE): cv.string
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the LG WebOS TV platform."""
if discovery_info is not None:
host = urlparse(discovery_info[1]).hostname
else:
host = config.get(CONF_HOST)
if host is None:
_LOGGER.error("No TV found in configuration file or with discovery")
return False
# Only act if we are not already configuring this host
if host in _CONFIGURING:
return
mac = config.get(CONF_MAC)
name = config.get(CONF_NAME)
customize = config.get(CONF_CUSTOMIZE)
config = hass.config.path(config.get(CONF_FILENAME))
setup_tv(host, mac, name, customize, config, hass, add_devices)
def setup_tv(host, mac, name, customize, config, hass, add_devices):
"""Set up a LG WebOS TV based on host parameter."""
from pylgtv import WebOsClient
from pylgtv import PyLGTVPairException
from websockets.exceptions import ConnectionClosed
client = WebOsClient(host, config)
if not client.is_registered():
if host in _CONFIGURING:
# Try to pair.
try:
client.register()
except PyLGTVPairException:
_LOGGER.warning(
"Connected to LG webOS TV %s but not paired", host)
return
except (OSError, ConnectionClosed, TypeError,
asyncio.TimeoutError):
_LOGGER.error("Unable to connect to host %s", host)
return
else:
# Not registered, request configuration.
_LOGGER.warning("LG webOS TV %s needs to be paired", host)
request_configuration(
host, mac, name, customize, config, hass, add_devices)
return
# If we came here and configuring this host, mark as done.
if client.is_registered() and host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
add_devices([LgWebOSDevice(host, mac, name, customize, config)], True)
def request_configuration(
host, mac, name, customize, config, hass, add_devices):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if ho
|
st in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], 'Failed to pair, please try again.')
return
# pylint: disable=unused-argument
def lgtv_configuration_callback(data):
"""Handle configuration changes."""
setup_tv(host, mac, name, customize, config, hass, add_devices)
_CONFIGURING[host] = configurator.request_config(
hass, name, lgtv_configuration_callback,
description='Click start and accept the pairing request
|
on your TV.',
description_image='/static/images/config_webos.png',
submit_caption='Start pairing request'
)
class LgWebOSDevice(MediaPlayerDevice):
"""Representation of a LG WebOS TV."""
def __init__(self, host, mac, name, customize, config):
"""Initialize the webos device."""
from pylgtv import WebOsClient
from wakeonlan import wol
self._client = WebOsClient(host, config)
self._wol = wol
self._mac = mac
self._customize = customize
self._name = name
# Assume that the TV is not muted
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._volume = 0
self._current_source = None
self._current_source_id = None
self._state = STATE_UNKNOWN
self._source_list = {}
self._app_list = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Retrieve the latest data."""
from websockets.exceptions import ConnectionClosed
try:
current_input = self._client.get_input()
if current_input is not None:
self._current_source_id = current_input
if self._state in (STATE_UNKNOWN, STATE_OFF):
self._state = STATE_PLAYING
else:
self._state = STATE_OFF
self._current_source = None
self._current_source_id = None
if self._state is not STATE_OFF:
self._muted = self._client.get_muted()
self._volume = self._client.get_volume()
self._source_list = {}
self._app_list = {}
conf_sources = self._customize.get(CONF_SOURCES, [])
for app in self._client.get_apps():
self._app_list[app['id']] = app
if conf_sources:
if app['id'] == self._current_source_id:
self._current_source = app['title']
self._source_list[app['title']] = app
elif (app['id'] in conf_sources or
any(word in app['title']
for word in conf_sources) or
any(word in app['id']
for word in conf_sources)):
self._source_list[app['title']] = app
else:
self._current_source = app['title']
self._source_list[app['title']] = app
for source in self._client.get_inputs():
if conf_sources:
if source['id'] == self._current_source_id:
self._source_list[source['label']] = source
elif (source['label'] in conf_sources or
any(source['label'].find(word) != -1
for word in conf_sources)):
self._source_list[source['label']] = source
else:
self._source_list[source['label']] = source
except (OSError,
|
makiwara/onemoretime
|
settings/db_settings_sample.py
|
Python
|
mit
| 267 | 0.003745 |
# -*- coding: utf-8 -*-
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django',
'USER': 'django',
'PASSWORD': 'PUTPASSWORDHER
|
E',
'HOST': '127.0.0.1',
'PORT': '
|
5432',
}
}
|
JohnCremona/bianchi-progs
|
FD/H3.py
|
Python
|
gpl-3.0
| 72,028 | 0.008872 |
# Functions for working with H3 and hemispheres etc.
from itertools import chain
from sage.all import (Infinity, Matrix, ZZ, QQ, RR, CC, NumberField,
Graph, srange, Set, sign, var, implicit_plot3d, NFCusp, Integer, oo,
infinity, polygen, point, line, circle)
from utils import (nf, to_k, cusp, cusp_label, Imat, apply,
translate_cusp, negate_cusp, conj_cusp,
smallest_ideal_class_representatives,
alpha_index_with_translation)
from alphas import precomputed_alphas
def make_k(dk):
"""Given a negative fundamental discriminant, or positive square-free
d, constructs the associated imaginary quadratic field and returns
a dict containing this and useful other data
"""
x = polygen(QQ)
if dk>0:
assert dk.is_squarefree()
dk = -dk if dk%4==3 else -4*dk
if dk%4==1:
k = NumberField(x**2-x+(1-dk)//4, 'w')
else:
k = NumberField(x**2-dk//4, 'w')
assert k.discriminant() == dk
w = k.gen()
emb = next(e for e in k.embeddings(CC) if e(w).imag()>0)
return {'k': k, 'dk': dk, 'w': w, 'wbar': w.trace()-w, 'Ok': k.ring_of_integers(),
'emb': emb, 'Ymax': emb(w).imag()/2,
'Ireps': [c.ideal() for c in k.class_group()]}
# Points of H_3 are represented as pairs [z,t2] where z is in k and t2
# in QQ is the square of the height (so the actual point coordinates
# are (z,sqrt(t2))).
# Each principal cusp alpha=r/s with (r,s)=(1) determines the
# hemisphere S_alpha with equation |z-alpha|^2+t^2=1/|s|^2, or
# N(s*z-r)+N(s)*t^2=1.
def radius_squared(alpha):
"""
For a principal cusp alpha, return the square radius of S_alpha.
"""
return 1/alpha.denominator().norm()
def cusp_to_point(alpha):
"""
For a principal cusp alpha = a in k, return the point [a,
radius_squared(alpha)].
"""
return [to_k(alpha), radius_squared(alpha)]
def tri_inter(a0, a1, a2):
"""Returns the triple intersection point of the hemispheres S_a_i,
where a0, a1, a2 are principal cusps, if there is one, as a pair
[z,t2] where z is in k and t2 in QQ is the square of the vertical
coordinate.
"""
alist = [a0,a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
rho0, rho1, rho2 = [radius_squared(a) for a in alist]
al0, al1, al2 = [to_k(a) for a in alist]
n0, n1, n2 = [a.norm() for a in [al0, al1, al2]]
#
delta = al1*(al0-al2).conjugate() + al2*(al1-al0).conjugate() + al0*(al2-al1).conjugate()
if delta==0:
return None
z = (al1*(n0-n2+rho2-rho0) + al2*(n1-n0+rho0-rho1) + al0*(n2-n1+rho1-rho2)) / delta
t2 = rho0 - n0 - z.norm() + 2*(al0*z.conjugate()).real()
assert t2 == rho1 - n1 - z.norm() + 2*(al1*z.conjugate()).real()
assert t2 == rho2 - n2 - z.norm() + 2*(al2*z.conjugate()).real()
return None if t2<0 else [z,t2]
def bi_inter(a1, a2):
"""Returns the point on the intersection of the hemispheres S_a_i
(where a1, a2 are principal cusps) which is on the line from a1 to
a2, as a pair [z,t2] where z is in k and t2 in QQ is the square of
the vertical coordinate.
Use: when both S_a_i pass through a singular point.
"""
alist = [a1,a2]
# Check the cusps are principal, not infinity, and with unit ideal
assert all((not a.is_infinity()) and (a.ideal()==1) for a in alist)
# Define the square radii and centres
rho1, rho2 = [radius_squared(a) for a in alist]
al1, al2 = [to_k(a) for a in alist]
n1, n2 = [a.norm() for a in [al1, al2]]
#
delta = al2-al1
z = ((al1+al2) + (rho1-rho2)*delta/delta.norm())/2
t2 = rho1 - n1 - z.norm() + 2*(al1*z.conjugate()).real()
assert t2 == rho2 - n2 - z.norm() + 2*(al2*z.conjugate()).real()
return None if t2<0 else [z,t2]
def is_under(P, a):
"""
Returns -1,0,+1 according as P is over, on, under S_a (a principal)
"""
z, t2 = P
ad = a.denominator()
al = a.numerator()/ad
return sign(1/ad.norm() - (z-al).norm() - t2)
def is_inside(a, b, strict=False):
"""Returns True iff a is inside (or strictly inside) the circle
centred on b, where a,b are cusps with b principal.
"""
k = nf(a)
d2 = (to_k(a,k)-to_k(b,k)).norm()
r2 = radius_squared(b)
if strict:
return d2 < r2
else:
return d2 <= r2
def covering_hemispheres1(P, option=None):
"""For P=[z,t2] in H_3, returns a list of cusps alpha such that P lies
on or under S_alpha.
If option is 'exact' only returns alpha for which P is on S_alpha exactly.
If option is 'strict' only returns alpha for which P is strictly under S_alpha.
Otherwise (default), returns alpha for which P is under or on S_alpha.
"""
alphas = []
z, t2 = P
k = z.parent()
a = z.numerator() # in O_K
b = z.denominator() # in Z
sbound = (1/t2).floor()
for snorm in range(1,1+sbound):
umax = b*b*(1-snorm*t2)
for s in k.elements_of_norm(snorm):
#print("s = {}".format(s))
if option=='exact':
urange = [umax] if umax in ZZ else []
else:
urange = srange(umax.floor()+1)
sa = s*a
#print("umax={}, urange={}".format(umax,list(urange)))
for unorm in urange:
if unorm<umax or option != 'strict':
for u in k.elements_of_norm(unorm):
#print(" u = {}".format(u))
for rb in [sa+u, sa-u] if u else [sa]:
r = rb/b
#print(" r = {}".format(r))
if r.is_integral() and k.ideal(r,s)==1:
alphas.append(cusp(r/s, k))
return alphas
def covering_hemispheres2(P, option=None, debug=False):
"""For P=[z,t2] in H_3, returns a list of cusps alpha such that P lies
on or under S_alpha.
If option is 'exact' only returns alpha for which P is on S_alpha exactly.
If option is 'strict' only returns alpha for which P is strictly under S_alpha.
Otherwise (default), returns alpha for which P is under or on S_alpha.
"""
alphas = []
z, t2 = P
k = z.parent()
a = z.numerator() # in O_K
sbound = (1/t2).floor()
if debug:
print("t2={} so bound on N(s) = {}".format(t2, sbound))
for snorm in srange(1,1+sbound):
for s in k.elements_of_norm(snorm):
sz = s*z
d1 = 1/snorm - t2
assert d1>=0
if debug:
print("s = {}, norm {}: d1 = {}".format(s, snorm, d1))
rbound = ((RR(sz.norm()).sqrt()+1)**2).floor()
if debug:
print("Bound on N(r) = {}".format(rbound))
for rnorm in srange(1+rbound):
for r in k.elements_of_norm(rnorm):
if k.ideal(r,s)!=1:
continue
for pm in [-1,1] if r else [1]:
a = pm*r/s
d = d1 - (a-z).norm()
if debug and d>=0:
print("a = {}, d = {}".format(a, d))
# we need d==0 for exact, d>0 for strict, else d>=0
ok = (d>0) if option=='strict' else (d==0) if option=='exact' else (d>=0)
if ok:
a = cusp(a,k)
if debug:
print(" OK {}".format(a))
alphas.append(a)
return alphas
def covering_hemispheres_test(P, option=None):
res1 = covering_hemispheres1(P, option)
res2 = covering_hemispheres2(P, option)
if sorted(res1) != sorted(res2):
print("old and new disagree for P={}".format(P))
return res1
covering_hemispheres = covering_hemispheres2
def hemispheres_through(P):
return covering_hemis
|
pheres(P, 'exact')
def properly_covering_hemispheres(P):
return covering_hemispheres(P,
|
'strict')
d
|
dsaldana/roomba_sensor_network
|
localization_artrack/camera_controller/scripts/frame_broadcaster.py
|
Python
|
gpl-3.0
| 678 | 0.042773 |
#!/usr/bin/env python
import roslib
roslib.load_manifest('camera_controller')
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('frame_broadcaster')
|
br = tf.TransformBroadcaster()
rate = rospy.Rate(10.0)
target_frame = rospy.get_param("~target_frame")
# Camera position
# Translation
x = rospy.get_param("~x",0)
y = rospy.get_param("~y",0)
z = rospy.get_param("~z",0)
# Pose quaternion
qm = rospy.get_param("~qm",0)
qx = rospy.get_param("~qx",0)
qy = rospy.get_param("~qy",0)
qz
|
= rospy.get_param("~qz",1)
while not rospy.is_shutdown():
br.sendTransform((x,y,z), (qm, qx, qy, qz), rospy.Time.now(), target_frame, "world")
rate.sleep()
|
arielalmendral/ert
|
python/python/ert/test/test_run.py
|
Python
|
gpl-3.0
| 5,315 | 0.023518 |
# Copyright (C) 2013 Statoil ASA, Norway.
#
# The file 'test_run.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, teither version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import random
import os.path
import subprocess
import argparse
from .test_area import TestAreaContext
def path_exists( path ):
if os.path.exists( path ):
return (True , "Path:%s exists" % path)
else:
return (False , "ERROR: Path:%s does not exist" % path)
class TestRun(object):
default_ert_cmd = "ert"
default_ert_version = "stable"
default_path_prefix = None
def __init__(self , config_file , args = [] , name = None):
if os.path.exists( config_file ) and os.path.isfile( config_file ):
self.parseArgs(args)
self.__ert_cmd = TestRun.default_ert_cmd
self.path_prefix = TestRun.default_path_prefix
self.config_file = config_file
self.check_list = []
|
self.workflows = []
if name:
|
self.name = name
else:
self.name = config_file.replace("/" , ".")
while True:
if self.name[0] == ".":
self.name = self.name[1:]
else:
break
self.name += "/%08d" % random.randint(0,100000000)
else:
raise IOError("No such config file: %s" % config_file)
def parseArgs(self , args):
parser = argparse.ArgumentParser()
parser.add_argument("-v" , "--version" , default = self.default_ert_version)
parser.add_argument("args" , nargs="*")
result = parser.parse_args(args)
self.ert_version = result.version
self.args = result.args
def get_config_file(self):
return self.__config_file
def set_config_file(self , input_config_file):
self.__config_file = os.path.basename( input_config_file )
self.abs_config_file = os.path.abspath( input_config_file )
config_file = property( get_config_file , set_config_file )
#-----------------------------------------------------------------
def set_path_prefix(self , path_prefix):
self.__path_prefix = path_prefix
def get_path_prefix(self):
return self.__path_prefix
path_prefix = property( get_path_prefix , set_path_prefix )
#-----------------------------------------------------------------
def get_ert_cmd(self):
return self.__ert_cmd
def set_ert_cmd(self , cmd):
self.__ert_cmd = cmd
ert_cmd = property( get_ert_cmd , set_ert_cmd)
#-----------------------------------------------------------------
def get_workflows(self):
return self.workflows
def add_workflow(self , workflow):
self.workflows.append( workflow )
#-----------------------------------------------------------------
def get_args(self):
return self.args
#-----------------------------------------------------------------
def add_check( self , check_func , arg):
if callable(check_func):
self.check_list.append( (check_func , arg) )
else:
raise Exception("The checker:%s is not callable" % check_func )
#-----------------------------------------------------------------
def __run(self , work_area ):
argList = [ self.ert_cmd , "-v" , self.ert_version ]
for arg in self.args:
argList.append( arg )
argList.append( self.config_file )
for wf in self.workflows:
argList.append( wf )
status = subprocess.call( argList )
if status == 0:
return (True , "ert has run successfully")
else:
return (False , "ERROR:: ert exited with status code:%s" % status)
def run(self):
if len(self.workflows):
with TestAreaContext(self.name , prefix = self.path_prefix , store_area = False) as work_area:
test_cwd = work_area.get_cwd()
work_area.copy_parent_content( self.abs_config_file )
status = self.__run( work_area )
global_status = status[0]
status_list = [ status ]
if status[0]:
for (check_func , arg) in self.check_list:
status = check_func( arg )
status_list.append( status )
if not status[0]:
global_status = False
if not global_status:
work_area.set_store( True )
return (global_status , test_cwd , status_list)
else:
raise Exception("Must have added workflows before invoking start()")
|
lsbardel/flow
|
flow/db/instdata/pricers/future.py
|
Python
|
bsd-3-clause
| 158 | 0.031646 |
from equity import EquityP
|
ricer
class FuturePricer
|
(EquityPricer):
def __init__(self):
super(FuturePricer,self).__init__()
|
raonyguimaraes/mendelmd
|
individuals/views.py
|
Python
|
bsd-3-clause
| 23,385 | 0.008766 |
import gzip
import json
import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.paginator import Paginator
from django.db.models import Avg
from django.db.models import Count
from django.db.models import Max
from django.db.models import Min
from django.db.models import Q
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.template import RequestContext
from django.utils.text import slugify
from django.views.generic import DeleteView
from individuals.forms import IndividualForm, ComparisonForm, GroupForm, BrowserForm
from individuals.models import Individual, Group
from individuals.tasks import VerifyVCF, AnnotateVariants, PopulateVariants
from variants.models import Variant
def response_mimetype(request):
if "application/json" in request.META['HTTP_ACCEPT']:
return "application/json"
else:
return "text/plain"
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self,obj='',json_opts={},mimetype="application/json",*args,**kwargs):
content = json.dumps(obj,**json_opts)
super(JSONResponse,self).__init__(content,mimetype,*args,**kwargs)
def create(request):
if request.method == 'POST':
form = IndividualForm(request.POST, request.FILES)
if form.is_valid():
if request.user.is_authenticated:
individual = Individual.objects.create(user=request.user, status='new')
else:
individual = Individual.objects.create(user=None, status='new')
individual.vcf_file= request.FILES.get('file')
print('file')
print(request.FILES.get('file'))
filename = individual.vcf_file.name.split('.')
new_filename = []
for tag in filename:
new_filename.append(slugify(tag))
individual.vcf_file.name = ".".join(new_filename)
print('filename ', filename)
#get name from inside vcf file
individual.name= str(os.path.splitext(individual.vcf_file.name)[0]).replace('.vcf','').replace('.gz','').replace('.rar','').replace('.zip','').replace('._',' ').replace('.',' ')
# individual.shared_with_groups = form.cleaned_data['shared_with_groups']
individual.shared_with_groups.set(form.cleaned_data['shared_with_groups'])
individual.save()
f = individual.vcf_file
#fix permissions
#os.chmod("%s/genomes/%s/" % (settings.BASE_DIR, individual.user), 0777)
#if request.user.is_authenticated:
# os.chmod("%s/genomes/%s/%s" % (settings.BASE_DIR, slugify(individual.user), individual.id), 0o777)
#else:
# os.chmod("%s/genomes/public/%s" % (settings.BASE_DIR, individual.id), 0o777)
# AnnotateVariants.delay(individual.id)
# VerifyVCF.delay(individual.id)
data = {'files': [{'deleteType': 'DELETE', 'name': individual.name, 'url': '', 'thumbnailUrl': '', 'type': 'image/png', 'deleteUrl': '', 'size': f.size}]}
response = JSONResponse(data, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
else:
print(form.errors)
else:
form = IndividualForm()
return render(request, 'individuals/create.html', {'form':form})
# Create your views here.
@login_required
def edit(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
if request.method == 'POST':
form = IndividualForm(request.POST, instance=individual)
if form.is_valid():
form.save()
return redirect('dashboard')
# form = IndividualForm(request.POST, request.FILES)
# if form.is_valid():
# individual = form.save(commit=False)
# individual.user = request.user
# individual.save()
# return redirect('dashboard')
else:
form = IndividualForm(instance=individual)
return render(request, 'individuals/individual_form.html', {'form':form})
class IndividualDelete
|
View(DeleteView):
model = Individual
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.objec
|
t = self.get_object()
individual_id = self.object.id
if self.object.user:
username = self.object.user.username
else:
username = 'public'
#delete files
if self.object.vcf_file:
self.object.vcf_file.delete()
# if self.object.strs_file:
# self.object.strs_file.delete()
# if self.object.cnvs_file:
# self.object.cnvs_file.delete()
os.system('rm -rf %s/genomes/%s/%s' % (settings.BASE_DIR, username, individual_id))
self.object.delete()
# response = JSONResponse(True, {}, response_mimetype(self.request))
# response['Content-Disposition'] = 'inline; filename=files.json'
# return response
messages.add_message(request, messages.INFO, "Individual deleted with success!")
#return redirect('individuals_list')
return redirect('individuals_list')
def view(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
variant_list = Variant.objects.filter(individual=individual)
# snpeff = SnpeffAnnotation.objects.filter(individual=individual)
individual.n_variants = variant_list.count()
individual.novel_variants = variant_list.filter(variant_id = '.').count()
individual.summary = []
#get calculated values from database
summary_item = {
'type': 'Total SNVs',
'total': variant_list.values('genotype').count(),
'discrete': variant_list.values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
summary_item = {
'type': 'Total Gene-associated SNVs',
'total': variant_list.values('gene').exclude(gene="").count(),
'discrete': variant_list.exclude(gene="").values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
individual.snp_eff = variant_list.values('snpeff_effect').annotate(Count('snpeff_effect')).order_by('snpeff_effect')
# print 'individual.snp_eff', individual.snp_eff
# variant_list.values('snpeff__effect').annotate(Count('snpeff__effect')).order_by('snpeff__effect')
#
individual.functional_class = variant_list.values('snpeff_func_class').annotate(Count('snpeff_func_class')).order_by('snpeff_func_class')
individual.impact_variants = variant_list.values('snpeff_impact').annotate(Count('snpeff_impact')).order_by('snpeff_impact')
individual.filter_variants = variant_list.values('filter').annotate(Count('filter')).order_by('filter')
individual.quality = variant_list.aggregate(Avg('qual'), Max('qual'), Min('qual'))
individual.read_depth = variant_list.aggregate(Avg('read_depth'), Max('read_depth'), Min('read_depth'))
individual.clinvar_clnsig = variant_list.values('clinvar_clnsig').annotate(total=Count('clinvar_clnsig'))
individual.chromossome = variant_list.values('chr').annotate(total=Count('chr')).order_by('chr')
# variants_with_snpid = variant_list.values('variant_id').exclude(variant_id=".")
#print variants_with_snpid
# fields = Variant._meta.get_all_field_names()
paginator = Paginator(variant_list, 25) # Show 25 contacts per page
try:
page = int(request.GE
|
dymkowsk/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateWavelengthAndPixelAdjustmentTest.py
|
Python
|
gpl-3.0
| 7,906 | 0.004048 |
from __future__ import (absolute_import, division, print_function)
import unittest
import mantid
import os
import numpy as np
from sans.test_helper.test_director import TestDirector
from sans.state.wavelength_and_pixel_adjustment import get_wavelength_and_pixel_adjustment_builder
from sans.common.enums import (RebinType, RangeStepType, DetectorType)
from sans.common.general_functions import (create_unmanaged_algorithm)
from sans.common.constants import EMPTY_NAME
class SANSCalculateTransmissionTest(unittest.TestCase):
@staticmethod
def _create_test_wavelength_adjustment_file(file_name):
test_file = (" Tue 24-MAR-2015 00:02 Workspace: directbeam_new_hist\n"
"\n"
" 6 0 0 0 1 6 0\n"
" 0 0 0 0\n"
" 3 (F12.5,2E16.6)\n"
" 1.00000 5.000000e-01 5.000000e-01\n"
" 3.00000 5.000000e-01 5.000000e-01\n"
" 5.00000 5.000000e-01 5.000000e-01\n"
|
" 7.00000 5.000000e-01 5.000000e-01\n"
" 9.00000 5.000000e-01 5.000000e-01\n"
"
|
11.00000 5.000000e-01 5.000000e-01\n")
full_file_path = os.path.join(mantid.config.getString('defaultsave.directory'), file_name)
if os.path.exists(full_file_path):
os.remove(full_file_path)
with open(full_file_path, 'w') as f:
f.write(test_file)
return full_file_path
@staticmethod
def _remove_test_file(file_name):
if os.path.exists(file_name):
os.remove(file_name)
@staticmethod
def _get_state(lab_pixel_file=None, hab_pixel_file=None, lab_wavelength_file=None, hab_wavelength_file=None,
wavelength_low=None, wavelength_high=None, wavelength_step=None,
wavelength_step_type=None):
test_director = TestDirector()
state = test_director.construct()
data_state = state.data
wavelength_and_pixel_builder = get_wavelength_and_pixel_adjustment_builder(data_state)
if lab_pixel_file:
wavelength_and_pixel_builder.set_LAB_pixel_adjustment_file(lab_pixel_file)
if hab_pixel_file:
wavelength_and_pixel_builder.set_HAB_pixel_adjustment_file(hab_pixel_file)
if lab_wavelength_file:
wavelength_and_pixel_builder.set_LAB_wavelength_adjustment_file(lab_wavelength_file)
if hab_wavelength_file:
wavelength_and_pixel_builder.set_HAB_wavelength_adjustment_file(hab_wavelength_file)
if wavelength_step_type:
wavelength_and_pixel_builder.set_wavelength_step_type(wavelength_step_type)
if wavelength_low:
wavelength_and_pixel_builder.set_wavelength_low(wavelength_low)
if wavelength_high:
wavelength_and_pixel_builder.set_wavelength_high(wavelength_high)
if wavelength_step:
wavelength_and_pixel_builder.set_wavelength_step(wavelength_step)
wavelength_and_pixel_state = wavelength_and_pixel_builder.build()
state.adjustment.wavelength_and_pixel_adjustment = wavelength_and_pixel_state
return state.property_manager
@staticmethod
def _get_workspace(data):
create_name = "CreateSampleWorkspace"
create_options = {"NumBanks": 1,
"BankPixelWidth": 1,
"XMin": 1,
"XMax": 11,
"BinWidth": 2,
"XUnit": "Wavelength",
"OutputWorkspace": EMPTY_NAME}
create_alg = create_unmanaged_algorithm(create_name, **create_options)
create_alg.execute()
workspace = create_alg.getProperty("OutputWorkspace").value
data_y = workspace.dataY(0)
for index in range(len(data_y)):
data_y[index] = data[index]
return workspace
@staticmethod
def _run_test(transmission_workspace, norm_workspace, state, is_lab=True):
adjust_name = "SANSCreateWavelengthAndPixelAdjustment"
adjust_options = {"TransmissionWorkspace": transmission_workspace,
"NormalizeToMonitorWorkspace": norm_workspace,
"SANSState": state,
"OutputWorkspaceWavelengthAdjustment": "out_wavelength",
"OutputWorkspacePixelAdjustment": "out_pixels"}
if is_lab:
adjust_options.update({"Component": DetectorType.to_string(DetectorType.LAB)})
else:
adjust_options.update({"Component": DetectorType.to_string(DetectorType.HAB)})
adjust_alg = create_unmanaged_algorithm(adjust_name, **adjust_options)
adjust_alg.execute()
wavelength_adjustment = adjust_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
pixel_adjustment = adjust_alg.getProperty("OutputWorkspacePixelAdjustment").value
return wavelength_adjustment, pixel_adjustment
def test_that_gets_wavelength_workspace_when_no_files_are_specified(self):
# Arrange
data_trans = [3., 4., 5., 7., 3.]
data_norm = [9., 3., 8., 3., 1.]
transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
state = SANSCalculateTransmissionTest._get_state(wavelength_low=1., wavelength_high=11., wavelength_step=2.,
wavelength_step_type=RangeStepType.Lin)
# Act
wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
norm_workspace, state, True)
# Assert
self.assertTrue(pixel_adjustment is None)
self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
expected = np.array(data_trans)*np.array(data_norm)
data_y = wavelength_adjustment.dataY(0)
for e1, e2, in zip(expected, data_y):
self.assertTrue(e1 == e2)
def test_that_gets_adjustment_workspace_if_files_are_specified(self):
# Arrange
data_trans = [3., 4., 5., 7., 3.]
data_norm = [9., 3., 8., 3., 1.]
expected_direct_file_workspace = [0.5, 0.5, 0.5, 0.5, 0.5]
transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
direct_file_name = "DIRECT_test.txt"
direct_file_name = SANSCalculateTransmissionTest._create_test_wavelength_adjustment_file(direct_file_name)
state = SANSCalculateTransmissionTest._get_state(hab_wavelength_file=direct_file_name,
wavelength_low=1., wavelength_high=11., wavelength_step=2.,
wavelength_step_type=RangeStepType.Lin)
# Act
wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
norm_workspace, state, False)
# Assert
self.assertTrue(pixel_adjustment is None)
self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
expected = np.array(data_trans)*np.array(data_norm)*np.array(expected_direct_file_workspace)
data_y = wavelength_adjustment.dataY(0)
for e1, e2, in zip(expected, data_y):
self.assertTrue(e1 == e2)
# Clean up
SANSCalculateTransmissionTest._remove_test_file(direct_file_name)
if __name__ == '__main__':
unittest.main()
|
OrlyMar/gasistafelice
|
gasistafelice/rest/views/blocks/order.py
|
Python
|
agpl-3.0
| 7,205 | 0.010548 |
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.core import urlresolvers
from gasistafelice.rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction
from gasistafelice.consts import CREATE, EDIT, EDIT_MULTIPLE, VIEW
from gasistafelice.lib.shortcuts import render_to_xml_response, render_to_context_response
from gasistafelice.supplier.models import Supplier
from gasistafelice.gas.models import GASMemberOrder
from gasistafelice.gas.forms.order.gmo import SingleGASMemberOrderForm
from gasistafelice.lib.formsets import BaseFormSetW
|
ithRequest
from django.forms.formsets import formset_factory
import logging
log = logging.getLogger(__name__)
#-----------------------------------
|
-------------------------------------------#
# #
#------------------------------------------------------------------------------#
class Block(BlockSSDataTables):
# COMMENT fero: name of this block should be
# something different from "order" (p.e: "make_order")
# because usually we refer to "order" for GASSupplierOrder
BLOCK_NAME = "order"
BLOCK_DESCRIPTION = _("Order")
BLOCK_VALID_RESOURCE_TYPES = ["gasmember"]
COLUMN_INDEX_NAME_MAP = {
0: 'pk',
1: 'gasstock__stock__supplier__name',
2: 'gasstock__stock__product__name',
3: 'order_price',
4: 'tot_amount',
5: 'tot_price',
6: '',
7: 'gasstock__stock__product__category__name',
}
# 3: 'gasstock__stock__product__description',
def _get_resource_list(self, request):
selected_orders = request.GET.getlist('gfCP_order')
rv = request.resource.orderable_products
if (selected_orders):
rv = rv.filter(order__pk__in=map(int, selected_orders))
return rv
def options_response(self, request, resource_type, resource_id):
"""Get options for orders block. Check GAS configuration.
WARNING: call to this method doesn't pass through get_response
so you have to reset self.request and self.resource attribute if you want
"""
log.debug("order options_response")
self.request = request
self.resource = request.resource
gas = self.resource.gas
orders = gas.orders.open()
field_type = "checkbox"
if gas.config.order_show_only_next_delivery:
orders = orders.order_by('-delivery__date')
if orders[0].delivery:
orders.filter(delivery__date=orders[0].delivery.date)
else:
orders.filter(delivery__date__isnull=True)
if gas.config.order_show_only_one_at_a_time:
field_type = "radio"
fields = []
for i,open_order in enumerate(orders):
if field_type == "radio":
selected = i == 0
else:
selected = True
fields.append({
'field_type' : field_type,
'field_label' : open_order,
'field_name' : 'order',
'field_values' : [{ 'value' : open_order.pk, 'selected' : selected}]
})
ctx = {
'block_name' : self.description,
'fields': fields,
}
return render_to_xml_response('options.xml', ctx)
def _get_edit_multiple_form_class(self):
qs = self._get_resource_list(self.request)
return formset_factory(
form=SingleGASMemberOrderForm,
formset=BaseFormSetWithRequest,
extra=qs.count() - self.__get_gmos(qs).count()
)
def __get_gmos(self, gsop):
log.debug("order block __get_gmos (%s)" % (self.request.resource.gasmember))
return GASMemberOrder.objects.filter(
ordered_product__in=gsop,
purchaser=self.request.resource.gasmember
)
def _get_records(self, request, querySet):
"""Return records of rendered table fields."""
# [:] forces evaluation of the querySet
#FIXME: filtering by purchaser not ok --> return all orders for all gasmembers
gmos = self.__get_gmos(querySet)[:]
data = {}
i = 0
c = querySet.count()
# Store mapping between GSSOP-id and neededs info: formset_index and ordered_total
gmo_info = { }
gmo_lint = GASMemberOrder()
for i,el in enumerate(querySet):
try:
#TODO: to be improved in performance
gmo = el.gasmember_order_set.get(
purchaser=self.request.resource.gasmember
)
except GASMemberOrder.DoesNotExist:
gmo=gmo_lint
key_prefix = 'form-%d' % i
data.update({
'%s-id' % key_prefix : gmo.pk,
'%s-ordered_amount' % key_prefix : gmo.ordered_amount or 0,
'%s-ordered_price' % key_prefix : el.gasstock.price, #displayed as hiddend field
'%s-gsop_id' % key_prefix : el.pk, #displayed as hiddend field
'%s-note' % key_prefix : gmo.note,
})
gmo_info[el.pk] = {
'formset_index' : i,
'ordered_total' : (el.gasstock.price or 0)*(gmo.ordered_amount or 0), # This is the total computed NOW (with ordered_product.price)
}
data['form-TOTAL_FORMS'] = c
data['form-INITIAL_FORMS'] = gmos.count()
data['form-MAX_NUM_FORMS'] = 0
formset = self._get_edit_multiple_form_class()(request, data)
records = []
for i,el in enumerate(querySet):
#log.debug("order ordered_amount (%s)" % (i))
try:
form = formset[gmo_info[el.pk]['formset_index']]
total = gmo_info[el.pk]['ordered_total']
except KeyError:
# GASMember has not ordered this product: build an empty form
form = SingleGASMemberOrderForm(self.request)
total = 0
#try:
form.fields['ordered_amount'].widget.attrs = {
'class' : 'amount',
'step' : el.gasstock.step or 1,
'minimum_amount' : el.gasstock.minimum_amount or 1,
's_url' : el.supplier.urn,
'p_url' : el.gasstock.stock.urn,
}
#'p_url' : el.product.urn,
records.append({
'id' : "%s %s %s %s" % (el.pk, form['id'], form['gsop_id'], form['ordered_price']),
'supplier' : el.supplier,
'product' : el.gasstock,
'price' : el.gasstock.price,
'ordered_amount' : form['ordered_amount'], #field inizializzato con il minimo amount e che ha l'attributo step
'ordered_total' : total,
'note' : form['note'],
'category' : el.product.category
})
#'description' : el.product.description,
#except KeyError:
# log.debug("order ordered_amount (%s %s)" % (el.pk, i))
return formset, records, {}
|
randyhook/knynet
|
simulator/sensors/SimAudioSensor.py
|
Python
|
mit
| 325 | 0.006154 |
from simulator.sensors.SimSensor import SimSensor
from environment.SensoryData import SensoryData
class
|
SimAudioSensor(SimSensor):
def __init__(self, parentBot, name):
super().__init__('Audio', parentBot, name)
def receiveAudio(self, audio):
return Sensor
|
yData(self.name, 'Audio', audio)
|
dedeco/cnddh-denuncias
|
cnddh/decorators.py
|
Python
|
apache-2.0
| 593 | 0.005059 |
# coding=latin-1
from flask import request, g
from flask import abort, flash
from functools import wra
|
ps
def checa_permissao(permissao):
def decorator(f):
@wraps(f)
def inner(*args, **kwargs):
if g.user and g.user.checa_permissao(permissao):
return f(*args, **kwargs)
else:
flash(u'Atenção você não possui a permissão: %s. Se isto não estiver correto, entre em contato solicit
|
ando esta permissão.' % permissao.upper(),u'notice')
abort(401)
return inner
return decorator
|
JeffRoy/mi-dataset
|
mi/dataset/driver/velpt_ab/dcl/velpt_ab_dcl_recovered_driver.py
|
Python
|
bsd-2-clause
| 2,466 | 0.003244 |
#!/usr/bin/env python
"""
@package mi.dataset.driver.velpt_ab.dcl
@file mi-dataset/mi/dataset/driver/velpt_ab/dcl/velpt_ab_dcl_recovered_driver.py
@author Joe Padula
@brief Recovered driver for the velpt_ab_dcl instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.velpt_ab_dcl import VelptAbDclParser, \
VelptAbDclParticleClassKey
from mi.dataset.parser.velpt_ab_dcl_particles import VelptAbDclDiagnosticsDataParticleRecovere
|
d, \
VelptAbDclInstrumentDataParticleRecovered, \
VelptAbDclDiagnosticsHeaderParticleRecovered
from mi.core.versioning im
|
port version
@version("15.7.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rb') as stream_handle:
# create and instance of the concrete driver class defined below
driver = VelptAbDclRecoveredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class VelptAbDclRecoveredDriver(SimpleDatasetDriver):
"""
The velpt_ab_dcl driver class extends the SimpleDatasetDriver.
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
VelptAbDclParticleClassKey.METADATA_PARTICLE_CLASS: VelptAbDclDiagnosticsHeaderParticleRecovered,
VelptAbDclParticleClassKey.DIAGNOSTICS_PARTICLE_CLASS: VelptAbDclDiagnosticsDataParticleRecovered,
VelptAbDclParticleClassKey.INSTRUMENT_PARTICLE_CLASS: VelptAbDclInstrumentDataParticleRecovered
}
}
parser = VelptAbDclParser(parser_config,
stream_handle,
self._exception_callback)
return parser
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/test/test_stdio.py
|
Python
|
gpl-3.0
| 13,157 | 0.001216 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
@var properEnv: A copy of L{os.environ} which has L{bytes} keys/values on POSIX
platforms and native L{str} keys/values on Windows.
"""
from __future__ import absolute_import, division
import os
import sys
import itertools
from twisted.trial import unittest
from twisted.python import filepath, log
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
from twisted.python.compat import xrange, intToBytes, bytesEnviron
from twisted.internet import error, defer, protocol, stdio, reactor
from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
# A short string which is intended to appear here and nowhere else,
# particularly not in any random garbage output CPython unavoidable
# generates (such as in warning text and so forth). This is searched
# for in the output from stdio_test_lastwrite and if it is found at
# the end, the functionality works.
UNIQUE_LAST_WRITE_STRING = b'xyz123abc Twisted is great!'
skipWindowsNopywin32 = None
if platform.isWindows():
if requireModule('win32process') is None:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
else:
properEnv = bytesEnviron()
properEnv[b"PYTHONPATH"] = os.pathsep.join(sys.path).encode(
sys.getfilesystemencoding())
class StandardIOTestProcessProtocol(protocol.ProcessProtocol):
"""
Test helper for collecting output from a child process and notifying
something when it exits.
@ivar onConnection: A L{defer.Deferred} which will be called back with
C{None} when the connection to the child process is established.
@ivar onCompletion: A L{defer.Deferred} which will be errbacked with the
failure associated with the child process exiting when it exits.
@ivar onDataReceived: A L{defer.Deferred} which will be called back with
this instance whenever C{childDataReceived} is called, or C{None} to
suppress these callbacks.
@ivar data: A C{dict} mapping file descriptors to strings containing all
bytes received from the child process on each file descriptor.
"""
onDataReceived = None
|
def __init__(self):
self.onConnection = defer.Deferred()
self.onCompletion = defer.Deferred()
self.data = {}
def conn
|
ectionMade(self):
self.onConnection.callback(None)
def childDataReceived(self, name, bytes):
"""
Record all bytes received from the child process in the C{data}
dictionary. Fire C{onDataReceived} if it is not C{None}.
"""
self.data[name] = self.data.get(name, b'') + bytes
if self.onDataReceived is not None:
d, self.onDataReceived = self.onDataReceived, None
d.callback(self)
def processEnded(self, reason):
self.onCompletion.callback(reason)
class StandardInputOutputTests(unittest.TestCase):
skip = skipWindowsNopywin32
def _spawnProcess(self, proto, sibling, *args, **kw):
"""
Launch a child Python process and communicate with it using the
given ProcessProtocol.
@param proto: A L{ProcessProtocol} instance which will be connected
to the child process.
@param sibling: The basename of a file containing the Python program
to run in the child process.
@param *args: strings which will be passed to the child process on
the command line as C{argv[2:]}.
@param **kw: additional arguments to pass to L{reactor.spawnProcess}.
@return: The L{IProcessTransport} provider for the spawned process.
"""
args = [sys.executable,
b"-m", b"twisted.test." + sibling,
reactor.__class__.__module__] + list(args)
return reactor.spawnProcess(
proto,
sys.executable,
args,
env=properEnv,
**kw)
def _requireFailure(self, d, callback):
def cb(result):
self.fail("Process terminated with non-Failure: %r" % (result,))
def eb(err):
return callback(err)
return d.addCallbacks(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_loseconn', errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
with open(errorLogFile, 'r') as f:
for line in f:
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(
p, b'stdio_test_halfclose', errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the OS X bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, b'stdio_test_lastwrite', UNIQUE_LAST_WRITE_STRING,
usePTY=True)
except ValueError as e:
# Some platforms don't work with usePTY=True
raise unittest.SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (
p.data,))
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b'stdio_test_hostpeer')
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.assertTrue(host)
self.assertTrue(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
|
andaviaco/tronido
|
src/syntax/types/integer.py
|
Python
|
mit
| 370 | 0.002703 |
from lexer import lang
from ..tree import Node
class Integer(Node):
datatype = lang.SEMANTIC_INT_TYPE
"""docstring for Integer."""
def __init__(self, symbol, token):
super().__ini
|
t__(symbol, token)
def generate_code(self, **cond):
array, line = Node.assignated_array()
Node.array_append(array, f'{line} LIT {self.symbol}, 0')
| |
ambitioninc/django-tour
|
setup.py
|
Python
|
mit
| 1,710 | 0.003509 |
# import multiprocessing to avoid this bug (http://bugs.python.org/issue15881#msg170215)
import multiprocessing
assert multiprocessing
import re
from setuptools import setup, find_packages
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'tour/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
setup(
name='django-tour',
version=get_version(),
description='Require the django user to complete a series of steps with custom logic',
long_description=open('README.md').read(),
url='https://github.com/ambitioninc/dja
|
ngo-tour',
author='Wes Okes',
author_email='wes.okes@gmail.com',
keywords='',
p
|
ackages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
],
license='MIT',
install_requires=[
'Django>=1.7',
'djangorestframework>=2.3.13',
'django-manager-utils>=0.8.2',
'django_filter>=0.7',
],
tests_require=[
'psycopg2',
'django-nose>=1.4',
'mock==1.0.1',
'django_dynamic_fixture',
],
test_suite='run_tests.run_tests',
include_package_data=True,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.