repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
chepazzo/ansible-modules-extras
|
cloud/amazon/ec2_eni.py
|
Python
|
gpl-3.0
| 13,881 | 0.004322 |
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance.
version_added: "2.0"
author: Rob White, wimnat [at] gmail.com, @wimnat
options:
eni_id:
description:
- The ID of the ENI
required: false
default: null
instance_id:
description:
- Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'.
required: false
default: null
private_ip_address:
description:
- Private IP address.
required: false
default: null
subnet_id:
description:
- ID of subnet in which to create the ENI. Only required when state=present.
required: true
description:
description:
- Optional description of the ENI.
required: false
default: null
security_groups:
description:
- List of security groups associated with the interface. Only used when state=present.
required: false
default: null
state:
description:
- Create or delete ENI.
required: false
default: present
choices: [ 'present', 'absent' ]
device_index:
description:
- The index of the device for the network interface attachment on the instance.
required: false
default: 0
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
required: false
default: no
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
required: false
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: yes
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: {{ "eni.interface.id" }}
delete_on_termination: true
'''
import time
import xml.etree.ElementTree as ET
import re
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_eni_info(interface):
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
|
'instance_id': interface.attachment.instance_id,
|
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, module):
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
changed = False
try:
eni = compare_eni(connection, module)
if eni is None:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
changed = True
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, module):
eni_id = module.params.get("eni_id")
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
changed = False
try:
# Get the eni with the eni_id specified
eni_result_set = connection.get_all_network_interfaces(eni_id)
|
liminspace/dju-privateurl
|
tests/urls.py
|
Python
|
mit
| 188 | 0 |
from django.conf.urls import include, url
from django.contrib import admin
url
|
patterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('dju_privateurl.urls'))
|
,
]
|
ianjuma/usiu-app-dir
|
benchcare/patients/forms.py
|
Python
|
gpl-2.0
| 775 | 0.00129 |
from patients.m
|
odels import Patient, Next_of_Kin, Vitals, Visits, Diagnosis, Medication, History, Documents
from django import forms
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
class Next_of_KinForm(forms.ModelForm):
class Meta:
model = Next_of_Kin
class
|
VitalsForm(forms.ModelForm):
class Meta:
model = Vitals
class VisitsForm(forms.ModelForm):
class Meta:
model = Visits
class DiagnosisForm(forms.ModelForm):
class Meta:
model = Diagnosis
class MedicationForm(forms.ModelForm):
class Meta:
model = Medication
class HistoryForm(forms.ModelForm):
class Meta:
model = History
class DocumentsForm(forms.ModelForm):
class Meta:
model = Documents
|
AntoineAugusti/katas
|
rosalind/long.py
|
Python
|
mit
| 1,108 | 0.001805 |
# http://rosalind.in
|
fo/problems/long/
def superstring(arr, accumulator=''):
# We now have all strings
|
if len(arr) == 0:
return accumulator
# Initial call
elif len(accumulator) == 0:
accumulator = arr.pop(0)
return superstring(arr, accumulator)
# Recursive call
else:
for i in range(len(arr)):
sample = arr[i]
l = len(sample)
for p in range(l / 2):
q = l - p
if accumulator.startswith(sample[p:]):
arr.pop(i)
return superstring(arr, sample[:p] + accumulator)
if accumulator.endswith(sample[:q]):
arr.pop(i)
return superstring(arr, accumulator + sample[q:])
f = open("rosalind_long.txt", "r")
dnas = {}
currentKey = ''
for content in f:
# Beginning of a new sample
if '>' in content:
key = content.rstrip().replace('>', '')
currentKey = key
dnas[currentKey] = ''
else:
dnas[currentKey] += content.rstrip()
print superstring(dnas.values())
|
JohanComparat/pySU
|
spm/bin/combine_model_spectra_write_scripts.py
|
Python
|
cc0-1.0
| 1,246 | 0.028892 |
import glob
import os
from os.path import join
import numpy as n
def writeScript(rootName, plate, env):
f=open(rootName+".sh",'w')
f.write("#!/bin/bash \n")
f.write("#PBS -l walltime=40:00:00 \n")
f.write("#PBS -o "+plate+".o.$PBS_JOBID \n")
f.write("#PBS -e "+plate+".e$PBS_JOBID \n")
f.write("#PBS -M comparat@mpe.mpg.de \n")
f.write("module load apps/anaconda/2.4.1 \n")
f.write("module load apps/python/2.7.8/gcc-4.4.7 \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/galaxy/python/ \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/spm/python/ \n")
f.write(" \n")
f.write("cd /users/comparat/pySU/spm/bin \n")
specList = n.array(glob.glob(os.path.join(os.environ[env], 'stellarpop-m11-chabrier', 'stellarpop', plate, 'spFly*.fits')))
data = n.array([os.path.basename(specName).split('-') for specName in
|
specList])
for el in data :
f.write("python combine_model_spectra.py "+el[1]+" "+el[2]+" "+el[3]+" "+env+" \n")
f.write(" \n")
f
|
.close()
env="SDSSDR12_DIR"
plates = n.loadtxt( join(os.environ[env], "catalogs", "plateNumberList"), unpack=True, dtype='str')
for plate in plates:
rootName = join(os.environ['HOME'], "batch_combine_sdss", plate)
writeScript(rootName, plate, env)
|
ibarria0/Cactus
|
cactus/plugin/manager.py
|
Python
|
bsd-3-clause
| 2,122 | 0.001885 |
#coding:utf-8
import functools
from cactus.utils.internal import getargspec
from cactus.plugin import defaults
class PluginManager(object):
def __init__(self, site, loaders):
self.site = site
self.loaders = loaders
self.reload()
for plugin_method in defaults.DEFAULTS:
if not hasattr(self, plugin_method):
setattr(self, plugin_method, functools.partial(self.call, plugin_method))
def reload(self):
plugins = []
for loader in self.loaders:
plugins.extend(loader.load())
self.plugins = sorted(plugins, key=lambda plugin: plugin.ORDER)
def call(self, method, *args, **kwargs):
"""
Call each plugin
"""
for plugin in self.plugins:
_meth = getattr(plugin, method)
|
_meth(*args, **kwargs)
def preBuildPage(self, site, page, context, data):
"""
Special call as we have changed the API for this.
We have two calling conventions:
- The new one, which passes page, context, data
- The deprecated one, which also passes the site (Now accessible via the page)
"""
for plugin in self.plugins:
# F
|
ind the correct calling convention
new = [page, context, data]
deprecated = [site, page, context, data]
arg_lists = dict((len(l), l) for l in [deprecated, new])
try:
# Try to find the best calling convention
n_args = len(getargspec(plugin.preBuildPage).args)
# Just use the new calling convention if there's fancy usage of
# *args, **kwargs that we can't control.
arg_list = arg_lists.get(n_args, new)
except NotImplementedError:
# If we can't get the number of args, use the new one.
arg_list = new
# Call with the best calling convention we have.
# If that doesn't work, then we'll let the error escalate.
context, data = plugin.preBuildPage(*arg_list)
return context, data
|
predakanga/plugin.video.catchuptv.au.ninemsn
|
resources/lib/ninemsnvideo/objects.py
|
Python
|
mit
| 3,248 | 0.011084 |
#
# NineMSN CatchUp TV Video API Library
#
# This code is forked from Network Ten CatchUp TV Video API Library
# Copyright (c) 2013 Adam Malcontenti-Wilson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from brightcove.core import APIObject, Field, DateTimeField, ListField, EnumField
from brightcove.objects import ItemCollection, enum
ChannelNameEnum = enum('ten', 'eleven', 'one')
PlaylistTypeEnum = enum('full_episodes', 'web_extras', 'news', 'season', 'week', 'category', 'special', 'preview')
MediaDeliveryEnum = enum('default', 'http', 'http_ios')
class EnumNumField(Field):
def __init__(self, enum_cls, help=None):
self.help = help
self.enum_cls = enum_cls
def to_python(self, value):
for i, field in enumerate(self.enum_cls._fields):
if i == value:
return field
raise Exception('Invalid Enum: %s' % value)
def from_python(self, value):
return self.enum_cls._fields[value]
class Playlist(APIObject):
_fields = ['name', 'type', 'season', 'week', 'query']
type = EnumField(PlaylistTypeEnum)
def __repr__(self):
return '<Playlist name=\'{0}\'>'.format(self.name)
class Show(APIObject):
_fields = ['showName', '
|
channelName', 'videoLink', 'mobileLink', 'logo', 'fanart', 'playlists']
channelName = EnumField(ChannelNameEnum)
playlists = ListField(Playlist)
def __repr__(self):
return '<Show name=\'{0}\'>'.format(self.showName)
class AMFRendition(APIObject):
_fields = ['defaultURL', 'audioOnly', 'mediaDeliveryType', 'encodingRate',
'frameHeight', 'frameWidth', 'size',
|
'videoCodec', 'videoContainer']
mediaDeliveryType = EnumNumField(MediaDeliveryEnum)
def __repr__(self):
return '<Rendition bitrate=\'{0}\' type=\'{1}\' frameSize=\'{2}x{3}\'>'.format(self.encodingRate, self.mediaDeliveryType, self.frameWidth, self.frameHeight)
class ShowItemCollection(ItemCollection):
_item_class = Show
items = ListField(Show)
class PlaylistItemCollection(ItemCollection):
_item_class = Playlist
items = ListField(Playlist)
class MediaRenditionItemCollection(ItemCollection):
_item_class = AMFRendition
items = ListField(AMFRendition)
|
USStateDept/FPA_Core
|
openspending/model/dataorg.py
|
Python
|
agpl-3.0
| 5,452 | 0.004585 |
from datetime import datetime
from sqlalchemy.orm import reconstructor, relationship, backref
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, Unicode, Boolean, DateTime
from sqlalchemy import BigInteger
from sqlalchemy.sql.expression import false, or_
from sqlalchemy.ext.associationproxy import association_proxy
from openspending.core import db
from openspending.model.common import (MutableDict, JSONType,
DatasetFacetMixin)
class DataOrg(db.Model):
""" The dataset is the core entity of any access to data. All
requests to the actual data store are routed through it, as well
as data loading and model generation.
The dataset keeps an in-memory representation of the data model
(including all dimensions and measures) which can be used to
generate necessary queries.
"""
__tablename__ = 'dataorg'
__searchable__ = ['label', 'description']
id = Column(Integer, primary_key=True)
label = Column(Unicode(2000))
description = Column(Unicode())
ORTemplate = Column(MutableDict.as_mutable(JSONType), default=dict)
mappingTemplate = Column(MutableDict.as_mutable(JSONType), default=dict)
prefuncs = Column(MutableDict.as_mutable(JSONType), default=dict)
lastUpdated = Column(DateTime, onupdate=datetime.utcnow)
#metadataorg_id = Column(Integer, ForeignKey('metadataorg.id'))
# metadataorg = relationship(MetadataOrg,
# backref=backref('dataorgs', lazy='dynamic'))
def __init__(self, dataorg=None):
if not dataorg:
return
self.label = dataorg.get('label')
self.description = dataorg.get('description')
self.ORTemplate = dataorg.get('ORTemplate', {})
self.mappingTemplate = dataorg.get('mappingTemplate', {})
self.prefuncs = dataorg.get('prefuncs', {})
self.lastUpdated = datetime.utcnow()
def touch(self):
""" Update the dataset timestamp. This is used for cache
invalidation. """
self.updated_at = datetime.utcnow()
db.session.add(self)
def to_json_dump(self):
""" Returns a JSON representation of an SQLAlchemy-backed object.
"""
json = {}
json['fields'] = {}
json['pk'] = getattr(self, 'id')
json['model'] = "DataOrg"
fields = ['label','descripti
|
on','ORTemplate','mappingTemplate','prefuncs']
for field in fields:
json['fields'][field] = getattr(self, field)
return json
@classmethod
def import_json_dump(cls, theobj):
fields = ['label','description','ORTemplate','mappingTemplate','prefuncs']
classobj = cls()
for field in fields:
setattr(clas
|
sobj, field, theobj['fields'][field])
#classobj.set(field, theobj['fields'][field])
db.session.add(classobj)
db.session.commit()
return classobj.id
def __repr__(self):
return "<DataOrg(%r,%r)>" % (self.id, self.label)
def update(self, dataorg):
self.label = dataset.get('label')
self.description = dataset.get('description')
self.ORTemplate = dataset.get('ORTemplate', {})
self.mappingTemplate = dataset.get('mappingTemplate', {})
self.prefuncs = dataset.get('prefuncs', {})
self.lastUpdated = datetime.utcnow()
def as_dict(self):
return {
'id' : self.id,
'label': self.label,
'description': self.description,
'lastUpdated': self.lastUpdated
}
@classmethod
def get_all_admin(cls, order=True):
""" Query available datasets based on dataset visibility. """
q = db.session.query(cls)
if order:
q = q.order_by(cls.label.asc())
return q
@classmethod
def get_all(cls, order=True):
""" Query available datasets based on dataset visibility. """
q = db.session.query(cls)
if order:
q = q.order_by(cls.label.asc())
return q
@classmethod
def all(cls, order=True):
""" Query available datasets based on dataset visibility. """
q = db.session.query(cls)
if order:
q = q.order_by(cls.label.asc())
return q
@classmethod
def by_name(cls, label):
return db.session.query(cls).filter_by(label=label).first()
@classmethod
def by_id(cls, id):
return db.session.query(cls).filter_by(id=id).first()
#TODO
# class MetadataOrgSettings(colander.MappingSchema):
# fullname = colander.SchemaNode(colander.String())
# email = colander.SchemaNode(colander.String(),
# validator=colander.Email())
# public_email = colander.SchemaNode(colander.Boolean(), missing=False)
# twitter = colander.SchemaNode(colander.String(), missing=None,
# validator=colander.Length(max=140))
# public_twitter = colander.SchemaNode(colander.Boolean(), missing=False)
# password1 = colander.SchemaNode(colander.String(),
# missing=None, default=None)
# password2 = colander.SchemaNode(colander.String(),
# missing=None, default=None)
# script_root = colander.SchemaNode(colander.String(),
# missing=None, default=None)
|
wolverineav/neutron
|
neutron/tests/unit/agent/l3/test_router_info.py
|
Python
|
apache-2.0
| 16,514 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import router_info
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class TestRouterInfo(base.BaseTestCase):
def setUp(self):
super(TestRouterInfo, self).setUp()
conf = agent_config.setup_conf()
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.ri_kwargs = {'agent_conf': conf,
'interface_driver': mock.sentinel.interface_driver}
def _check_agent_method_called(self, calls):
self.mock_ip.netns.execute.assert_has_calls(
[mock.call(call, check_exit_code=False) for call in calls],
any_order=True)
def test_routing_table_update(self):
ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ri.router = {}
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
ri.update_routing_table('replace', fake_route1)
expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('delete', fake_route1)
expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('replace', fake_route2)
expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('delete', fake_route2)
expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
def test_update_routing_table(self):
# Just verify the correct namespace was used in the call
uuid = _uuid()
netns = 'qrouter-' + uuid
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
ri = router_info.RouterInfo(uuid, {'id': uuid}, **self.ri_kwargs)
ri._update_routing_table = mock.Mock()
ri.update_routing_table('replace', fake_route1)
ri._update_routing_table.assert_called_once_with('replace',
fake_route1,
netns)
def test_routes_updated(self):
ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ri.router = {}
fake_old_routes = []
fake_new_routes = [{'destination': "110.100.31.0/24",
'nexthop': "10.100.10.30"},
{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.routes = fake_old_routes
ri.
|
router['routes'] = fake_new_routes
ri.routes_updated(fake_old_routes, fake_new_routes)
expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
'via', '10.100.10.30'],
['ip', 'route', 'replace', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
ri.routes = fake_new_routes
fake_new_routes = [{'destination': "110.100.30.0/24",
|
'nexthop': "10.100.10.30"}]
ri.router['routes'] = fake_new_routes
ri.routes_updated(ri.routes, fake_new_routes)
expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
fake_new_routes = []
ri.router['routes'] = fake_new_routes
ri.routes_updated(ri.routes, fake_new_routes)
expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
def test_add_ports_address_scope_iptables(self):
ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs)
port = {
'id': _uuid(),
'fixed_ips': [{'ip_address': '172.9.9.9'}],
'address_scopes': {l3_constants.IP_VERSION_4: '1234'}
}
ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock()
ri.get_address_scope_mark_mask = mock.Mock(return_value='fake_mark')
ri.get_internal_device_name = mock.Mock(return_value='fake_device')
ri.rt_tables_manager = mock.MagicMock()
ri.process_external_port_address_scope_routing = mock.Mock()
ri.process_floating_ip_address_scope_rules = mock.Mock()
ri.iptables_manager._apply = mock.Mock()
ri.router[l3_constants.INTERFACE_KEY] = [port]
ri.process_address_scope()
ipv4_mangle.add_rule.assert_called_once_with(
'scope', ri.address_scope_mangle_rule('fake_device', 'fake_mark'))
class BasicRouterTestCaseFramework(base.BaseTestCase):
def _create_router(self, router=None, **kwargs):
if not router:
router = mock.MagicMock()
self.agent_conf = mock.Mock()
self.router_id = _uuid()
return router_info.RouterInfo(self.router_id,
router,
self.agent_conf,
mock.sentinel.interface_driver,
**kwargs)
class TestBasicRouterOperations(BasicRouterTestCaseFramework):
def test_get_floating_ips(self):
router = mock.MagicMock()
router.get.return_value = [mock.sentinel.floating_ip]
ri = self._create_router(router)
fips = ri.get_floating_ips()
self.assertEqual([mock.sentinel.floating_ip], fips)
def test_process_floating_ip_nat_rules(self):
ri = self._create_router()
fips = [{'fixed_ip_address': mock.sentinel.ip,
'floating_ip_address': mock.sentinel.fip}]
ri.get_floating_ips = mock.Mock(return_value=fips)
ri.iptables_manager = mock.MagicMock()
ipv4_nat = ri.iptables_manager.ipv4['nat']
ri.floating_forward_rules = mock.Mock(
return_value=[(mock.sentinel.chain, mock.sentinel.rule)])
ri.process_floating_ip_nat_rules()
# Be sure that the rules are cleared first and apply is called last
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_nat.mock_calls[0])
self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1])
# Be sure that add_rule is called somewhere in the middle
ipv4_nat.add_rule.assert_called_once_with(mock.sentinel.chain,
mock.sentinel.rule,
tag='floating_ip')
def test_process_floating_ip_nat_rules_removed(self):
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[])
ri.iptables_manager = mock.MagicMock()
ipv4_nat = ri.iptables_manager.ipv4['nat']
ri.process_floating_ip_n
|
devyn/unholy
|
decompyle/decompyle/dis_files.py
|
Python
|
mit
| 1,163 | 0.006019 |
import magics
__all__ = ['by_version', 'by_magic']
_fallback = {
'EXTENDED_ARG': None,
'hasfree': [],
}
class dis(object):
def __init__(self, version, module):
self._version = version
from __builtin__ import __import__
self._module = __import__('decompyle.%s' % module, globals(),
locals(), 'decompyle')
def __getattr__(self, attr):
try:
val = self._module.__dict__[attr]
except K
|
eyError, e:
if _fallback.has_key(attr):
val = _fallback[attr]
else:
raise e
return val
by_version = {
'1.5': dis('1.5', 'dis_15'),
'1.6': dis('1.6', 'dis_16'),
'2.0': dis('2.0', 'dis_20'),
'2.1': dis('2.1', 'dis_21'),
|
'2.2': dis('2.2', 'dis_22'),
'2.3': dis('2.3', 'dis_23'),
'2.4': dis('2.4', 'dis_24'),
'2.5': dis('2.5', 'dis_25'),
}
by_magic = dict( [ (mag, by_version[ver])
for mag, ver in magics.versions.iteritems() ] )
if __name__ == '__main__':
for m, ver in by_magic.items():
magics.__show(ver, m)
print by_version['2.2'].hasjrel
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/layout/xaxis/tickfont/_color.py
|
Python
|
mit
| 418 | 0.002392 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
|
self, plotly_name="color", parent_name="layout.xaxis.tickfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
**kwargs
|
)
|
insiderr/insiderr-app
|
app/modules/requests/packages/chardet/big5prober.py
|
Python
|
gpl-3.0
| 1,685 | 0.000593 |
# ####################### BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# Licens
|
e along with this library; if
|
not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
StrellaGroup/frappe
|
frappe/website/doctype/website_settings/website_settings.py
|
Python
|
mit
| 4,709 | 0.026545 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_request_site_address, encode
from frappe.model.document import Document
from six.moves.urllib.parse import quote
from frappe.website.router import resolve_route
from frappe.website.doctype.website_theme.website_theme import add_website_theme
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if frappe.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
frappe.msgprint(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
self.home_page = ''
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""validate url in top bar items"""
for footer_item in self.get("footer_items"):
if footer_item.parent_label:
parent_label_item = self.get("footer_items", {"label": footer_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(footer_item.parent_label, footer_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(footer_item.parent_label,
footer_item.idx))
def on_update(self):
self.clear_cache()
def clear_cache(self):
# make js and css
# clear web cache (for menus!)
frappe.clear_cache(user = 'Guest')
from frappe.website.render import clear_cache
clear_cache()
# clears role based home pages
frappe.clear_cache()
def get_website_settings():
hooks = frappe.get_hooks()
context = frappe._dict({
'top_bar_items': get_items('top_bar_items'),
'footer_items': get_items('footer_items'),
"post_login": [
{"label": _("My Account"), "url": "/me"},
# {"class": "divider"},
{"label": _("Logout"), "url": "/?cmd=web_logout"}
]
})
settings = frappe.get_single("Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "hide_footer_signup", "head_html", "title_prefix",
"navbar_search"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if frappe.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
frappe.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
|
via_hooks = frappe.get_hooks("website_context")
for key in via_hooks:
context[key] = via_hooks[key]
if key not in ("top_bar_items", "footer_items", "post_login") \
and isinstance(context[key], (list
|
, tuple)):
context[key] = context[key][-1]
add_website_theme(context)
if not context.get("favicon"):
context["favicon"] = "/assets/frappe/images/favicon.png"
if settings.favicon and settings.favicon != "attach_files:":
context["favicon"] = settings.favicon
return context
def get_items(parentfield):
all_top_items = frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield= %s
order by idx asc""", parentfield, as_dict=1)
top_items = [d for d in all_top_items if not d['parent_label']]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
return top_items
|
bsmr-eve/Pyfa
|
gui/builtinStatsViews/resourcesViewFull.py
|
Python
|
gpl-3.0
| 15,284 | 0.002748 |
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
from gui.statsView import StatsView
from gui.bitmap_loader import BitmapLoader
from gui.pyfa_gauge import PyGauge
import gui.mainFrame
from gui.chrome_tabs import EVT_NOTEBOOK_PAGE_CHANGED
from gui.utils import fonts
from eos.saveddata.module import Hardpoint
from gui.utils.numberFormatter import formatAmount
class ResourcesViewFull(StatsView):
name = "resourcesViewFull"
contexts = ["drone", "fighter", "cargo"]
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.mainFrame.additionsPane.notebook.Bind(EVT_NOTEBOOK_PAGE_CHANGED, self.pageChanged)
def pageChanged(self, event):
page = self.mainFrame.additionsPane.getName(event.GetSelection())
if page == "Cargo":
self.toggleContext("cargo")
elif page == "Fighters":
self.toggleContext("fighter")
else:
self.toggleContext("drone")
def toggleContext(self, context):
# Apparently you cannot .Hide(True) on a Window, otherwise I would just .Hide(context !== x).
# This is a gimpy way to toggle this shit
for x in self.contexts:
bitmap = getattr(self, "bitmapFull{}Bay".format(x.capitalize()))
base = getattr(self, "baseFull{}Bay".format(x.capitalize()))
if context == x:
bitmap.Show()
base.Show(True)
else:
bitmap.Hide()
base.Hide(True)
fighter_sizer = getattr(self, "boxSizerFighter")
drone_sizer = getattr(self, "boxSizerDrones")
if context != "fighter":
fighter_sizer.ShowItems(False)
drone_sizer.ShowItems(True)
else:
fighter_sizer.ShowItems(True)
drone_sizer.ShowItems(False)
self.panel.Layout()
self.headerPanel.Layout()
def getHeaderText(self, fit):
return "Resources"
def getTextExtentW(self, text):
width, height = self.parent.GetTextExtent(text)
return width
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
root = wx.BoxSizer(wx.VERTICAL)
contentSizer.Add(root, 0, wx.EXPAND, 0)
sizer = wx.BoxSizer(wx.HORIZONTAL)
root.Add(sizer, 0, wx.EXPAND)
root.Add(wx.StaticLine(contentPanel, wx.ID_ANY, style=wx.HORIZONTAL), 0, wx.EXPAND)
sizerResources = wx.BoxSizer(wx.HORIZONTAL)
root.Add(sizerResources, 1, wx.EXPAND, 0)
parent = self.panel = contentPanel
self.headerPanel = headerPanel
panel = "full"
base = sizerResources
sizer.AddStretchSpacer()
# Turrets & launcher hardslots display
tooltipText = {"turret": "Turret hardpoints", "launcher": "Launcher hardpoints", "drones": "Drones active",
"fighter": "Fighter squadrons active", "calibration": "Calibration"}
for type_ in ("turret", "launcher", "drones", "fighter", "calibration"):
box = wx.BoxSizer(wx.HORIZONTAL)
bitmap = BitmapLoader.getStaticBitmap("%s_big" % type_, parent, "gui")
tooltip = wx.ToolTip(tooltipText[type_])
bitmap.SetToolTip(tooltip)
box.Add(bitmap, 0, wx.ALIGN_CENTER)
sizer.Add(box, 0, wx.ALIGN_CENTER)
suffix = {'turret': 'Hardpoints', 'launcher': 'Hardpoints', 'drones': 'Active', 'fighter': 'Tubes',
'calibration': 'Points'}
lbl = wx.StaticText(parent, wx.ID_ANY, "0")
setattr(self, "label%s
|
Used%s%s" % (panel.capitalize(), type_.capitalize(), suffix[type_].capitalize()), lbl)
box.Add(lbl, 0, wx.ALIGN_CENTER | wx.LEFT, 5)
box.Ad
|
d(wx.StaticText(parent, wx.ID_ANY, "/"), 0, wx.ALIGN_CENTER)
lbl = wx.StaticText(parent, wx.ID_ANY, "0")
setattr(self, "label%sTotal%s%s" % (panel.capitalize(), type_.capitalize(), suffix[type_].capitalize()),
lbl)
box.Add(lbl, 0, wx.ALIGN_CENTER)
setattr(self, "boxSizer{}".format(type_.capitalize()), box)
# Hack - We add a spacer after each thing, but we are always hiding something. The spacer is stil there.
# This way, we only have one space after the drones/fighters
if type_ != "drones":
sizer.AddStretchSpacer()
gauge_font = wx.Font(fonts.NORMAL, wx.SWISS, wx.NORMAL, wx.NORMAL, False)
# PG, Cpu & drone stuff
tooltipText = {"cpu": "CPU", "pg": "PowerGrid", "droneBay": "Drone bay", "fighterBay": "Fighter bay",
"droneBandwidth": "Drone bandwidth", "cargoBay": "Cargo bay"}
for i, group in enumerate((("cpu", "pg"), ("cargoBay", "droneBay", "fighterBay", "droneBandwidth"))):
main = wx.BoxSizer(wx.VERTICAL)
base.Add(main, 1, wx.ALIGN_CENTER)
for type_ in group:
capitalizedType = type_[0].capitalize() + type_[1:]
bitmap = BitmapLoader.getStaticBitmap(type_ + "_big", parent, "gui")
tooltip = wx.ToolTip(tooltipText[type_])
bitmap.SetToolTip(tooltip)
stats = wx.BoxSizer(wx.VERTICAL)
absolute = wx.BoxSizer(wx.HORIZONTAL)
stats.Add(absolute, 0, wx.EXPAND)
b = wx.BoxSizer(wx.HORIZONTAL)
main.Add(b, 1, wx.ALIGN_CENTER)
b.Add(bitmap, 0, wx.ALIGN_BOTTOM)
b.Add(stats, 1, wx.EXPAND)
lbl = wx.StaticText(parent, wx.ID_ANY, "0")
setattr(self, "label%sUsed%s" % (panel.capitalize(), capitalizedType), lbl)
absolute.Add(lbl, 0, wx.ALIGN_LEFT | wx.LEFT, 3)
absolute.Add(wx.StaticText(parent, wx.ID_ANY, "/"), 0, wx.ALIGN_LEFT)
lbl = wx.StaticText(parent, wx.ID_ANY, "0")
setattr(self, "label%sTotal%s" % (panel.capitalize(), capitalizedType), lbl)
absolute.Add(lbl, 0, wx.ALIGN_LEFT)
units = {"cpu": " tf", "pg": " MW", "droneBandwidth": " mbit/s", "droneBay": " m\u00B3",
"fighterBay": " m\u00B3", "cargoBay": " m\u00B3"}
lbl = wx.StaticText(parent, wx.ID_ANY, "%s" % units[type_])
absolute.Add(lbl, 0, wx.ALIGN_LEFT)
# Gauges modif. - Darriele
gauge = PyGauge(parent, gauge_font, 1)
gauge.SetValueRange(0, 0)
gauge.SetMinSize((self.getTextExtentW("1.999M/1.99M MW"), 23))
gauge.SetFractionDigits(2)
setattr(self, "gauge%s%s" % (panel.capitalize(), capitalizedType), gauge)
stats.Add(gauge, 0, wx.ALIGN_CENTER)
setattr(self, "base%s%s" % (panel.capitalize(), capitalizedType), b)
setattr(self, "bitmap%s%s" % (panel.capitalize(), capitalizedType), bitmap)
self.toggleContext("drone")
def refreshPanel(self, fit):
# If we did anything intresting, we'd update our labels to reflect the new fit's stats here
stats = (
("label%sUsedTurretHardpoints", lambda: fit.getHardpointsUsed(Hardpoint.TURRET), 0, 0, 0),
|
raprasad/worldcup
|
worldcup/worldcup/predictions/views.py
|
Python
|
mit
| 6,415 | 0.018083 |
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.forms.models import modelformset_factory
from worldcup.common.msg_util import *
from worldcup.common.user_util import *
from worldcup.matches.models import *
from worldcup.predictions.models import *
from worldcup.predictions.forms import *
from worldcup.teams.models import get_team_not_determined
from worldcup.predictions.standings import get_current_standings
from datetime import datetime
def view_prediction_list(request):
"""
"Landing Page" with links to forms and some stats
"""
if not request.user.is_authenticated():
return view_auth_page(request)
lu = get_username(request)
lu.update({'match_types' :MatchType.objects.all()
, 'number_predictions' : Prediction.objects.all().count()
, 'number_users' : Prediction.objects.values('user').distinct().count()
, 'standings' : get_current_standings()
, 'num_matches_played' : Match.objects.filter(score_recorded=True).count()
})
return render_to_response('predictions/prediction_home.html', lu, context_instance=RequestContext(request) )
def get_new_prediction(user, match_type, match):
if user is None or matc
|
h_type is None:
return None
if match_type.name == MATCH_TYPE_KNOCKOUT_STAGE:
p = PredictionStage2(user=user
, match=match
, team1=get_team_not_determined()
|
, team2=get_team_not_determined()
)
else:
p = Prediction(user=user, match=m)
p.save()
return p
def get_users_predictions(request, user, match_type):
"""For a given user and match type, return Prediction objects.
If they don't exist, create them."""
if user is None or match_type is None:
return None
#msgt('get_users_predictions')
#msg('match_type: [%s]' % match_type)
# minimal check, either user has no predictions or all of them
num_matches = Match.objects.filter(match_type=match_type).count()
if match_type.name == MATCH_TYPE_KNOCKOUT_STAGE:
PredictionObj = eval('PredictionStage2')
else:
PredictionObj = eval('Prediction')
# get user's predictions for this match type
qset = PredictionObj.objects.filter(user=user, match__match_type=match_type)
#msg(qset)
if qset.count() == 0:
#
# need to create Predictions for this user
#msg('zero qset')
for m in Match.objects.filter(match_type=match_type):
get_new_prediction(user, match_type, m)
#p = PredictionObj(user=user, match=m)
#p.save()
return PredictionObj.objects.filter(user=user, match__match_type=match_type)
elif qset.count() == num_matches:
#
# correct number of Predictions
#msg('matched: %s' % num_matches)
return qset
else:
#
# wrong number of predictions, create new ones
#msg('wrong number of Predictions [%s]'% qset)
for m in Match.objects.filter(match_type=match_type):
#msg('match: %s' % m)
if PredictionObj.objects.filter(user=user, match=m).count() > 0:
pass
else:
get_new_prediction(user, match_type, m)
return PredictionObj.objects.filter(user=request.user, match__match_type=match_type)
#msg('wrong number of Predictions: %s' % qset.count())
#assert(False, "wrong number of Predictions")
#return None
def view_prediction_saved_success(request, match_type_slug):
if not request.user.is_authenticated():
return view_auth_page(request)
lu = get_username(request)
try:
match_type = MatchType.objects.get(slug=match_type_slug)
lu.update({ 'match_type' : match_type })
except MatchType.DoesNotExist:
lu.update({ 'Err_found':True
, 'MatchType_not_Found': True})
return render_to_response('predictions/add_prediction_success.html', lu, context_instance=RequestContext(request))
qset = get_users_predictions(request, request.user, match_type)
lu.update({ 'predictions':qset })
return render_to_response('predictions/add_prediction_success.html', lu, context_instance=RequestContext(request) )
def view_prediction_form2(request, match_type_slug):
"""
Prediction form for the group stage
"""
if not request.user.is_authenticated():
return view_auth_page(request)
lu = get_username(request)
try:
match_type = MatchType.objects.get(slug=match_type_slug)
lu.update({ 'match_type' : match_type })
except MatchType.DoesNotExist:
lu.update({ 'Err_found':True
, 'MatchType_not_Found': True})
return render_to_response('predictions/add_prediction.html', lu, context_instance=RequestContext(request) )
# Is it too late to make a prediction?
#
if datetime.now() > match_type.last_day_to_predict:
lu.update({ 'Err_found':True
, 'Too_late_to_predict': True})
return render_to_response('predictions/add_prediction.html', lu, context_instance=RequestContext(request) )
lu.update({ 'user' : request.user })
PredictionFormSet = modelformset_factory(Prediction, form=PredictionForm, extra=0)
qset = get_users_predictions(request, request.user, match_type)
if request.method == 'POST':
#deal with posting the data
formset = PredictionFormSet(request.POST, queryset=qset)
if formset.is_valid():
formset.save()
redirect_url = reverse('view_prediction_saved_success'
, kwargs={ 'match_type_slug':match_type.slug })
return HttpResponseRedirect(redirect_url)
#else:
# msg(formset.errors)
else:
formset = PredictionFormSet(queryset=qset)
lu.update({ 'formset':formset })
return render_to_response('predictions/add_prediction.html', lu, context_instance=RequestContext(request) )
|
recurly/recurly-client-python
|
recurly/base_errors.py
|
Python
|
mit
| 353 | 0 |
import recurly
class RecurlyError(Exception):
@classmethod
def
|
error_from_status(cls, status):
return recurly.errors.ERROR_MAP.get(status, "")
class ApiError(RecurlyError):
def __init__(self, message, error):
super(ApiError, self).__init__(message)
self.error = error
class NetworkError(RecurlyError):
pass
| |
kmunve/pysenorge
|
pysenorge/constants.py
|
Python
|
gpl-3.0
| 1,844 | 0.005965 |
'''
Contains physical constants used in snow modeling.
@var a_gravity: Gravitational acceleration [m s-2]
@var eta0: Viscosity of snow at T=0C and density=0 [N s m- 2 = kg m-1 s-1]
@var rho_air: Density of air [kg m-3], dry air at 0 C and 100 kPa
@var rho_water: Density of water [kg m-3]
@var rho_ice: Density of ice [kg m-3]
@var k_ice0: Thermal conductivity of ice [W m-1 K-1] at 0 C
|
@var k_ice10: Thermal conductivity of ice [W m-1 K-1] at -10 C
@var secperday: Seconds per day [s]
@var boltzmann: Boltzmann constant [J K-1].
The Boltzmann constant (k or kB) is the physical constant relating energy
at the particle level with temperature observed at the bulk level.
It is the gas constant R divided by the Avogadro constant NA: k = \frac{R}{N_{\rm A}}\,
It has the same units as entropy.
@var boltzmann_eV: Boltzmann constant [eV K-1]
@author: kmu
|
@since: 25. mai 2010
'''
# gravitational acceleration [m s-2]
a_gravity = 9.81
# viscosity of snow at T=0C and density=0 [N s m- 2= kg m-1 s-1]
eta0 = 3.6e6
# Density of air [kg m-3], dry air at 0 C and 100 kPa
rho_air = 1.2754
# Density of water [kg m-3]
rho_water = 1000.0
# Density of ice [kg m-3]
rho_ice = 916.0
# Thermal conductivity of ice [W m-1 K-1]
k_ice0 = 2.22 # at 0 C
k_ice10 = 2.30 # at -10 C
# Seconds per day [s]
secperday = 86400.0
# Boltzmann constant [J K-1]
# The Boltzmann constant (k or kB) is the physical constant relating energy
# at the particle level with temperature observed at the bulk level.
# It is the gas constant R divided by the Avogadro constant NA:
# k = \frac{R}{N_{\rm A}}\,
# It has the same units as entropy.
boltzmann = 1.380650424e-23
boltzmann_eV = 8.61734315e-5 # [eV K-1]
# Stefan-Boltzmann constant [W m-2 K-4]
stefan_boltzmann = 5.67040004e-8
|
imron/scalyr-agent-2
|
benchmarks/micro/test_json_serialization.py
|
Python
|
apache-2.0
| 5,318 | 0.00188 |
# Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Benchmarks which test JSON serialization and deserialization with various json libraries.
"""
from __future__ import absolute_import
import json
import six
import pytest
from scalyr_agent.util import set_json_lib
from scalyr_agent.util import get_json_lib
from scalyr_agent.util import json_encode
from scalyr_agent.util import json_decode
import scalyr_agent.util
from .utils import generate_random_dict
from .utils import read_bytes_from_log_fixture_file
# We cache some data to avoid loading it for each test. Keep in mind that actual "setup" / loading
# phase is not included in the actual benchmarking timing data.
CACHED_TEST_DATA = {
"encode": {},
"decode": {},
} # type: dict
@pytest.mark.parametrize("sort_keys", [False, True], ids=["no_sort_keys", "sort_keys"])
@pytest.mark.parametrize("keys_count", [10, 100, 1000])
@pytest.mark.parametrize("json_lib", ["json", "ujson", "orjson"])
@pytest.mark.benchmark(group="json_encode")
def test_json_encode_with_custom_options(benchmark, json_lib, keys_count, sort_keys):
# NOTE: orjson doesn't support sort_keys=True
if json_lib == "orjson":
if not six.PY3:
pytest.skip(
"Skipping under Python 2, orjson is only available for Python 3"
)
elif sort_keys is True:
pytest.skip("orjson doesn't support sort_keys=True")
set_json_lib(json_lib)
scalyr_agent.util.SORT_KEYS = sort_keys
data = generate_random_dict(keys_count=keys_count)
def run_benchmark():
return json_encode(data)
result = benchmark.pedantic(run_benchmark, iterations=50, rounds=100)
assert get_json_lib(
|
) == json_lib
assert scalyr_agent.util.SORT_KEYS == sort_keys
assert isinstance(result, six.text_type)
assert json_decode(result) == data
# fmt: off
@pytest.mark.parametrize("log_tuple",
[
("agent_debug_5_mb.log", 3 * 1024),
("agent_debug_5_mb.log", 500 * 1024),
],
ids=[
"agent_debug_log_3k",
"agent_debug_log_500k",
],
)
# fmt: on
@pytest.mark.parametrize("j
|
son_lib", ["json", "ujson", "orjson"])
@pytest.mark.benchmark(group="json_encode")
def test_json_encode(benchmark, json_lib, log_tuple):
if not six.PY3 and json_lib == "orjson":
pytest.skip("Skipping under Python 2, orjson is only available for Python 3")
return
_test_json_encode(benchmark, json_lib, log_tuple)
# fmt: off
@pytest.mark.parametrize("log_tuple",
[
("json_log_5_mb.log", 3 * 1024),
("json_log_5_mb.log", 500 * 1024),
],
ids=[
"json_log_3k",
"json_log_500k",
],
)
# fmt: on
@pytest.mark.parametrize("json_lib", ["json", "ujson", "orjson"])
@pytest.mark.benchmark(group="json_decode")
def test_json_decode(benchmark, json_lib, log_tuple):
if not six.PY3 and json_lib == "orjson":
pytest.skip("Skipping under Python 2, orjson is only available for Python 3")
return
_test_json_decode(benchmark, json_lib, log_tuple)
def _test_json_encode(benchmark, json_lib, log_tuple):
"""
:param json_lib: JSON library to use.
:param log_tuple: Tuple with (log_filename, log_bytes_to_use).
"""
set_json_lib(json_lib)
file_name, bytes_to_read = log_tuple
if log_tuple not in CACHED_TEST_DATA["encode"]:
data = read_bytes_from_log_fixture_file(file_name, bytes_to_read)
data = six.ensure_text(data)
CACHED_TEST_DATA["encode"][log_tuple] = data
data = CACHED_TEST_DATA["encode"][log_tuple]
def run_benchmark():
return json_encode(data)
result = benchmark.pedantic(run_benchmark, iterations=20, rounds=50)
assert get_json_lib() == json_lib
assert isinstance(result, six.text_type)
# assert json.dumps(data) == result
def _test_json_decode(benchmark, json_lib, log_tuple):
"""
:param json_lib: JSON library to use.
:param log_tuple: Tuple with (log_filename, log_bytes_to_use).
"""
set_json_lib(json_lib)
file_name, bytes_to_read = log_tuple
if log_tuple not in CACHED_TEST_DATA["decode"]:
data = read_bytes_from_log_fixture_file(file_name, bytes_to_read).strip()
obj = {"lines": []}
for line in data.split(b"\n"):
line_decoded = json.loads(six.ensure_text(line))
obj["lines"].append(line_decoded)
data = json.dumps(obj)
CACHED_TEST_DATA["decode"][log_tuple] = six.ensure_text(data)
data = CACHED_TEST_DATA["decode"][log_tuple]
def run_benchmark():
return json_decode(data)
result = benchmark.pedantic(run_benchmark, iterations=20, rounds=50)
assert get_json_lib() == json_lib
assert isinstance(result, dict)
# assert json.loads(result) == data
|
genialis/resolwe
|
resolwe/storage/management/__init__.py
|
Python
|
apache-2.0
| 180 | 0 |
""".. Ignore pydocsty
|
le D400.
==================
Storage Management
==================
.. autom
|
odule:: resolwe.storage.management.commands.run_storage_manager
:members:
"""
|
krafczyk/spack
|
var/spack/repos/builtin/packages/py-lrudict/package.py
|
Python
|
lgpl-2.1
| 1,591 | 0.001257 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the
|
hope
|
that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyLrudict(PythonPackage):
""" A fast LRU cache"""
homepage = "https://github.com/amitdev/lru-dict"
url = "https://pypi.io/packages/source/l/lru-dict/lru-dict-1.1.6.tar.gz"
version('1.1.6', 'b33f54f1257ab541f4df4bacc7509f5a')
depends_on('python@2.7:')
depends_on('py-setuptools', type=('build'))
|
tanghaibao/goatools
|
goatools/test_data/sections/gjoneska_pfenning.py
|
Python
|
bsd-2-clause
| 9,848 | 0.017465 |
"""Sections variable used for grouping Gjoneska 2015 GO IDs."""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
SECTIONS = [ # 18 sections
("immune", [ # 15 GO-headers
"GO:0002376", # BP 564 L01 D01 M immune system process
"GO:0002682", # BP 1,183 L02 D02 AB regulation of immune system process
"GO:0030155", # BP 246 L02 D02 AB regulation of cell adhesion
"GO:0006955", # BP 100 L02 D02 GM immune response
"GO:0001817", # BP 476 L03 D03 AB regulation of cytokine production
"GO:0001775", # BP 162 L03 D03 CD cell activation
"GO:0001816", # BP 110 L03 D03 DK cytokine production
"GO:1903037", # BP 155 L04 D04 AB regulation of leukocyte cell-cell adhesion
"GO:0034097", # BP 59 L04 D04 G response to cytokine
"GO:0006954", # BP 25 L04 D04 G inflammatory response
"GO:0045087", # BP 25 L03 D04 GM innate immune response
"GO:0002521", # BP 72 L05 D05 CDF leukocyte differentiation
"GO:0007229", # BP 0 L05 D05 AB integrin-mediated signaling pathway
"GO:0050900", # BP 57 L02 D06 CDMN leukocyte migration
"GO:0042130", # BP 9 L07 D08 AB negative regulation of T cell proliferation
#"GO:0002252", # BP 138 L02 D02 L immune effector process
]),
("viral/bacteria", [ # 4 GO-headers
"GO:0016032", # BP 301 L03 D04 CJ viral process
"GO:0050792", # BP 119 L03 D04 AB regulation of viral process
"GO:0098542", # BP 37 L03 D05 GJ defense response to other organism
"GO:0009617", # BP 12 L03 D05 GJ response to bacterium
]),
("neuro", [ # 25 GO-headers
"GO:0099531", # BP 32 L01 D01 U presynaptic process in chemical synaptic Xmission
"GO:0042391", # BP 117 L03 D03 A regulation of membrane potential
"GO:0050877", # BP 96 L03 D03 K neurological system process
"GO:0050808", # BP 20 L03 D03 CDI synapse organization
"GO:0007272", # BP 13 L03 D03 CD ensheathment of neurons
"GO:0051960", # BP 236 L04 D04 AB regulation of nervous system development
"GO:0050804", # BP 120 L03 D04 AB modulation of synaptic transmission
"GO:0097485", # BP 34 L04 D04 CD neuron projection guidance
"GO:0031644", # BP 30 L04 D04 AB regulation of neurological system process
"GO:0031175", # BP 14 L04 D04 CDI neuron projection development
"GO:0035418", # BP 14 L04 D04 H protein localization to synapse
"GO:0007399", # BP 0 L04 D04 F nervous system development
"GO:0050767", # BP 192 L05 D05 AB regulation of neurogenesis
"GO:0030182", # BP 71 L05 D05 CDF neuron differentiation
"GO:0099536", # BP 40 L04 D05 CDR synaptic signaling
"GO:0048666", # BP 29 L04 D05 CDF neuron development
"GO:0010001", # BP 17 L05 D05 CDF glial cell differentiation
"GO:0051969", # BP 5 L03 D05 AB regulation of transmission of nerve impulse
"GO:0022008", # BP 3 L05 D05 CDF neurogenesis
"GO:0007158", # BP 0 L04 D05 DP neuron cell-cell adhesion
"GO:0014002", # BP 1 L05 D06 CDF astrocyte development
"GO:0048812", # BP 27 L05 D07 CDFI neuron projection morphogenesis
"GO:0048667", # BP 6 L06 D07 CDFI cell morphogenesis involved in neuron differen.
"GO:0072578", # BP 5 L05 D07 CDHI neurotransmitter-gated ion channel clustering
"GO:0007409", # BP 23 L06 D08 CDFI axonogenesis
]),
("cell death", [ # 6 GO-headers
"GO:0010941", # BP 316 L03 D03 AB regulation of cell death
"GO:0008219", # BP 104 L03 D03 CD cell death
"GO:0060548", # BP 103 L04 D04 AB negative regulation of cell death
"GO:0097190", # BP 22 L04 D04 AB apoptotic signaling pathway
"GO:0097527", # BP 0 L04 D04 AB necroptotic signaling pathway
"GO:0008637", # BP 7 L05 D05 CI apoptotic mitochondrial changes
]),
("lipid", [ # 7 GO-headers
"GO:0006629", # BP 623 L03 D03 DE lipid metabolic process
"GO:0019216", # BP 243 L04 D04 AB regulation of lipid metabolic process
"GO:0032368", # BP 130 L04 D04 AB regulation of lipid transport
"GO:0033993", # BP 112 L04 D04 G response to lipid
"GO:0006869", # BP 93 L04 D05 DH lipid transport
"GO:0055088", # BP 10 L05 D05 A lipid homeostasis
"GO:0042158", # BP 3 L05 D06 CE lipoprotein bi
|
osynthetic process
]),
("adhesion", [ # 3 GO-headers
"GO:0022610", # BP 194 L01 D01 P biological adhesion
"GO:0030155", # BP 246 L02 D02 AB regulation of cell adhesion
"GO:0007155", # BP 165 L02 D02 P cell adhesion
]),
("cell cycle", [ # 9 GO-headers
"GO:0022402", # BP 463 L02 D02 C cell cycle process
"GO:0022403", # BP 46 L02 D02 S cell cycle phase
|
"GO:0051726", # BP 411 L03 D03 AB regulation of cell cycle
"GO:0051301", # BP 54 L03 D03 CD cell division
"GO:0007049", # BP 12 L03 D03 CD cell cycle
"GO:0070192", # BP 17 L03 D05 CIL chromosome organization in meiotic cell cycle
"GO:0007051", # BP 19 L03 D06 CDI spindle organization
"GO:0007067", # BP 1 L04 D06 CI mitotic nuclear division
"GO:0030071", # BP 11 L06 D09 AB regulation of mitotic metaphase/anaphase transition
]),
("chromosome", [ # 9 GO-headers
"GO:0032259", # BP 119 L02 D02 E methylation
"GO:0051983", # BP 108 L03 D03 AB regulation of chromosome segregation
"GO:0007059", # BP 11 L03 D03 CD chromosome segregation
"GO:0006325", # BP 184 L04 D04 CI chromatin organization
"GO:0051276", # BP 107 L04 D04 CI chromosome organization
"GO:0032204", # BP 29 L03 D06 AB regulation of telomere maintenance
"GO:0034502", # BP 21 L06 D06 H protein localization to chromosome
"GO:0031497", # BP 11 L05 D06 CI chromatin assembly
"GO:0006334", # BP 3 L06 D07 CI nucleosome assembly
]),
("development", [ # 10 GO-headers
"GO:0032502", # BP 3,173 L01 D01 F developmental process
"GO:0022414", # BP 847 L01 D01 L reproductive process
"GO:0050793", # BP 1,881 L02 D02 AB regulation of developmental process
"GO:0048856", # BP 1,016 L02 D02 F anatomical structure development
"GO:0048646", # BP 331 L02 D02 F anatomical structure formation in morphogenesis
"GO:0007568", # BP 18 L03 D03 DF aging
"GO:0022604", # BP 129 L04 D04 AB regulation of cell morphogenesis
"GO:0000902", # BP 65 L04 D05 CDFI cell morphogenesis
"GO:0045765", # BP 14 L04 D05 AB regulation of angiogenesis
]),
("extracellular matrix", [ # 1 GO-headers
"GO:0030198", # BP 27 L04 D04 CDI extracellular matrix organization
]),
("ion", [ # 3 GO-headers
"GO:0006811", # BP 422 L04 D04 H ion transport
"GO:0055085", # BP 330 L04 D04 H transmembrane transport
"GO:0006874", # BP 33 L08 D09 ACD cellular calcium ion homeostasis
]),
("localization", [ # 3 GO-headers
"GO:0051179", # BP 2,142 L01 D01 H localization
"GO:0040011", # BP 394 L01 D01 N locomotion
"GO:0032879", # BP 1,682 L02 D02 AB regulation of localization
]),
("membrane", [ # 1 GO-headers
"GO:0061024", # BP 273 L03 D03 CI membrane organization
]),
("metabolic", [ # 7 GO-headers
"GO:0008152", # BP 6,418 L01 D01 E metabolic process
"GO:0019222", # BP 3,243 L02 D02 AB regulation of metabolic process
"GO:0009056", # BP 1,369 L02 D02 E c
|
huyphan/pyyawhois
|
test/record/parser/test_response_whois_biz_status_available.py
|
Python
|
mit
| 1,928 | 0.002593 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.biz/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisBizStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.biz/
|
status_available.txt"
host = "whois.biz"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.s
|
tatus, None)
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, "u34jedzcq.biz")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on, None)
|
eljost/pysisyphus
|
deprecated/optimizers/RSRFOptimizer.py
|
Python
|
gpl-3.0
| 5,961 | 0.001342 |
#!/usr/bin/env python3
# See [1] https://pubs.acs.org/doi/pdf/10.1021/j100247a015
# Banerjee, 1985
# [2] https://aip.scitation.org/doi/abs/10.1063/1.2104507
# Heyden, 2005
# [3] https://onlinelibrary.wiley.com/doi/abs/10.1002/jcc.540070402
# Baker, 1985
# [4] 10.1007/s002140050387
# Bofill, 1998, Restricted-Step-RFO
# [5] https://link.springer.com/article/10.1007/s00214-016-1847-3
# Birkholz, 2016
import numpy as np
from pysisyphus.optimizers.HessianOptimizer import HessianOptimizer
class RSRFOptimizer(HessianOptimizer):
"""Optimizer to find first-order saddle points."""
rfo_dict = {
"min": (0, "min"),
"max": (-1, "max"),
}
def __init__(self, geometry, max_micro_cycles=50, **kwargs):
super().__init__(geometry, **kwargs)
self.max_micro_cycles = int(max_micro_cycles)
assert max_micro_cycles >= 1
self.alpha0 = 1
self.alpha_max = 1e8
def solve_rfo(self, rfo_mat, kind="min"):
# So if I use eig instead of eigh here it even works ...
# my bad, ahhh! The unscaled RFO matrix may be symmetric,
# but the scaled ones aren't anymore.
eigenvalues, eigenvectors = np.linalg.eig(rfo_mat)
eigenvalues = eigenvalues.real
eigenvectors = eigenvectors.real
sorted_inds = np.argsort(eigenvalues)
# Depending on wether we want to minimize (maximize) along
# the mode(s) in the rfo mat we have to select the smallest
# (biggest) eigenvalue and corresponding eigenvector.
first_or_last, verbose = self.rfo_dict[kind]
ind = sorted_inds[first_or_last]
# Given sorted eigenvalue-indices (sorted_inds) use the first
# (small
|
est eigenvalue) or the last (largest eigenvalue) index.
step_nu = eigenvectors.T[ind]
nu = step_nu[-1]
self.log(f"nu_{verbose}={nu:.4e}")
# Scale eigenvector so that its last element equals 1. The
# final is step is the scaled eigenvector without
|
the last element.
step = step_nu[:-1] / nu
eigval = eigenvalues[ind]
self.log(f"eigenvalue_{verbose}={eigval:.4e}")
return step, eigval, nu
def optimize(self):
forces = self.geometry.forces
self.forces.append(forces)
self.energies.append(self.geometry.energy)
if self.cur_cycle > 0:
self.update_trust_radius()
self.update_hessian()
H = self.H
if self.geometry.internal:
H = self.geometry.internal.project_hessian(self.H)
eigvals, eigvecs = np.linalg.eigh(H)
# Transform to eigensystem of hessian
forces_trans = eigvecs.T.dot(forces)
# Minimize energy along all modes
min_mat = np.asarray(np.bmat((
(np.diag(eigvals), -forces_trans[:,None]),
(-forces_trans[None,:], [[0]])
)))
alpha = self.alpha0
min_diag_indices = np.diag_indices(eigvals.size)
for mu in range(self.max_micro_cycles):
assert alpha > 0, "alpha should not be negative"
self.log(f"RS-RFO micro cycle {mu:02d}, alpha={alpha:.6f}")
# We only have to update one eigenvalue
min_mat_scaled = min_mat.copy()
min_mat_scaled[min_diag_indices] /= alpha
min_mat_scaled[:-1,-1] /= alpha
rfo_step, eigval_min, nu_min = self.solve_rfo(min_mat_scaled, "min")
# As of Eq. (8a) of [4] max_eigval and min_eigval also
# correspond to:
# eigval_min_ = -forces_trans.dot(rfo_step)
# np.testing.assert_allclose(eigval_min, eigval_min_)
# Create the full PRFO step
rfo_norm = np.linalg.norm(rfo_step)
self.log(f"rfo_norm={rfo_norm:.6f}")
inside_trust = rfo_norm < self.trust_radius + 1e-3
if inside_trust:
self.log("step is inside trust radius. breaking.")
break
elif alpha > self.alpha_max:
print("alpha > alpha_max. breaking.")
break
# Derivative of the squared step w.r.t. alpha
tval = 2*eigval_min/(1+rfo_norm**2 * alpha)
numer = forces_trans**2
denom = (eigvals - eigval_min * alpha)**3
quot = np.sum(numer / denom)
self.log(f"quot={quot:.6f}")
dstep2_dalpha = (2*eigval_min/(1+rfo_norm**2 * alpha)
* np.sum(forces_trans**2
/ ((eigvals - eigval_min * alpha)**3)
)
)
self.log(f"analytic deriv.={dstep2_dalpha:.6f}")
# Update alpha
alpha_step = (2*(self.trust_radius*rfo_norm - rfo_norm**2)
/ dstep2_dalpha
)
self.log(f"alpha_step={alpha_step:.4f}")
alpha += alpha_step
self.log("")
# Right now the step is still given in the Hessians eigensystem. We
# transform it back now.
step = eigvecs.dot(rfo_step)
step_norm = np.linalg.norm(step)
# This would correspond to "pure" RFO without the iterative
# step-restriction. Here we will just scale down the step, if it
# is too big.
if self.max_micro_cycles == 1 and step_norm > self.trust_radius:
self.log("Scaled down step")
step = step / step_norm * self.trust_radius
step_norm = np.linalg.norm(step)
self.log(f"norm(step)={np.linalg.norm(step):.6f}")
# Calculating the energy change from eigval_min and nu_min seems to give
# big problems.
# predicted_energy_change = 1/2 * eigval_min / nu_min**2
predicted_change = step.dot(-forces) + 0.5 * step.dot(self.H).dot(step)
self.predicted_energy_changes.append(predicted_change)
self.log("")
return step
|
paplorinc/intellij-community
|
python/testData/testRunner/env/pytest/test1.py
|
Python
|
apache-2.0
| 209 | 0.014354 |
from time import slee
|
p
class TestPyTest:
def testOne(self):
sleep(1.5) # To check duration
assert 4 == 2*2
def testTwo(self):
|
assert True
def testThree():
assert 4 == 2*2
|
fbradyirl/home-assistant
|
homeassistant/components/facebook/notify.py
|
Python
|
apache-2.0
| 3,953 | 0.000253 |
"""Facebook platform for notify component."""
import json
import logging
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.const import CONTENT_TYPE_JSON
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
CONF_PAGE_ACCESS_TOKEN = "page_access_token"
BASE_URL = "https://graph.facebook.com/v2.6/me/messages"
CREATE_BROADCAST_URL = "https://graph.facebook.com/v2.11/me/message_creatives"
SEND_BROADCAST_URL = "https://graph.facebook.com/v2.11/me/broadcast_messages"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PAGE_ACCESS_TOKEN): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Get the Facebook notification service."""
return FacebookNotificationService(config[CONF_PAGE_ACCESS_TOKEN])
class FacebookNotificationService(BaseNotificationService):
"""Implementation of a notification service for the Facebook service."""
def __init__(self, access_token):
"""Initialize the service."""
self.page_access_token = access_token
def send_message(self, message="", **kwargs):
"""Send some message."""
payload = {"access_token": self.page_access_token}
targets = kwargs.get(ATTR_TARGET)
data = kwargs.get(ATTR_DATA)
body_message = {"text": message}
if data is not None:
body_message.update(data)
# Only one of text or attachment can be specified
if "attachment" in body_message:
body_message.pop("text")
if not targets:
_LOGGER.error("At least 1 target is required")
return
# broadcast message
if targets[0].lower() == "broadcast":
broadcast_cre
|
ate_body = {"messages": [body_message]}
_LOGGER.debug("Broadcast body %s : ", broadcast_create_body)
resp = requests.post(
CREATE_BROADCAST_URL,
data=json.dumps(broadcast_create_body),
params=payload,
headers={CONTENT_TYPE: CONTENT_TYPE_JSON},
timeout=10,
)
_LOGGER.debug("FB Messager broadcast id %s : ", resp.json())
# at this point we get broadcast id
br
|
oadcast_body = {
"message_creative_id": resp.json().get("message_creative_id"),
"notification_type": "REGULAR",
}
resp = requests.post(
SEND_BROADCAST_URL,
data=json.dumps(broadcast_body),
params=payload,
headers={CONTENT_TYPE: CONTENT_TYPE_JSON},
timeout=10,
)
if resp.status_code != 200:
log_error(resp)
# non-broadcast message
else:
for target in targets:
# If the target starts with a "+", it's a phone number,
# otherwise it's a user id.
if target.startswith("+"):
recipient = {"phone_number": target}
else:
recipient = {"id": target}
body = {"recipient": recipient, "message": body_message}
resp = requests.post(
BASE_URL,
data=json.dumps(body),
params=payload,
headers={CONTENT_TYPE: CONTENT_TYPE_JSON},
timeout=10,
)
if resp.status_code != 200:
log_error(resp)
def log_error(response):
"""Log error message."""
obj = response.json()
error_message = obj["error"]["message"]
error_code = obj["error"]["code"]
_LOGGER.error(
"Error %s : %s (Code %s)", response.status_code, error_message, error_code
)
|
custode/reviewboard
|
reviewboard/webapi/errors.py
|
Python
|
mit
| 4,753 | 0 |
from __future__ import unicode_literals
from djblets.webapi.errors import WebAPIError
class WebAPITokenGenerationError(Exception):
"""An error generating a Web API token."""
pass
#
# Standard error messages
#
UNSPECIFIED_DIFF_REVISION = WebAPIError(
200,
'Diff revision not specified.',
http_status=400) # 400 Bad Request
INVALID_DIFF_REVISION = WebAPIError(
201,
'Invalid diff revision.',
http_status=404) # 404 Not Found
INVALID_ACTION = WebAPIError(
202,
'Invalid action specified.',
http_status=400) # 400 Bad Request
INVALID_CHANGE_NUMBER = WebAPIError(
203,
'The commit ID specified could not be found.',
http_status=404) # 404 Not Found
CHANGE_NUMBER_IN_USE = WebAPIError(
204,
'The co
|
mmit ID specified has already been used.',
http_status=409) # 409 Conflict
MISSING_REPOSITORY = WebAPIError(
205,
'There was no repository found at the specified path.',
http_status=400) # 400 Bad Request
INVALID_REPOSITORY = WebAPIError(
206,
'The repository path specified is not in the list of known repositories.',
http_status=400) # 400 Bad Request
REPO_FILE_NOT_FOUND = WebAPIError(
207,
'The file was not found in the repository.',
http_status=400) # 400
|
Bad Request
INVALID_USER = WebAPIError(
208,
'User does not exist.',
http_status=400) # 400 Bad Request
REPO_NOT_IMPLEMENTED = WebAPIError(
209,
'The specified repository is not able to perform this action.',
http_status=501) # 501 Not Implemented
REPO_INFO_ERROR = WebAPIError(
210,
'There was an error fetching extended information for this repository.',
http_status=500) # 500 Internal Server Error
NOTHING_TO_PUBLISH = WebAPIError(
211,
'You attempted to publish a review request without any modifications.',
http_status=400) # 400 Bad Request
EMPTY_CHANGESET = WebAPIError(
212,
'The commit ID specified represents an empty changeset.',
http_status=400) # 400 Bad Request
SERVER_CONFIG_ERROR = WebAPIError(
213,
'There was an error storing configuration on the server.',
http_status=500) # 500 Internal Server Error
BAD_HOST_KEY = WebAPIError(
214,
'The SSH key on the host does ot match the stored key.',
http_status=403) # 403 Forbidden
UNVERIFIED_HOST_KEY = WebAPIError(
215,
'The SSH key on the host is unverified.',
http_status=403) # 403 Forbidden
UNVERIFIED_HOST_CERT = WebAPIError(
216,
'The HTTPS certificate on the host is unverified.',
http_status=403) # 403 Forbidden
MISSING_USER_KEY = WebAPIError(
217,
'A public SSH key was requested, but no SSH key was available to send.',
http_status=403) # 403 Forbidden
REPO_AUTHENTICATION_ERROR = WebAPIError(
218,
'Unable to authenticate with the repository using the provided '
'credentials.',
http_status=403) # 403 Forbidden
DIFF_EMPTY = WebAPIError(
219,
'The specified diff file is empty.',
http_status=400) # 400 Bad Request
DIFF_TOO_BIG = WebAPIError(
220,
'The specified diff file is too large.',
http_status=400) # 400 Bad Request
FILE_RETRIEVAL_ERROR = WebAPIError(
221,
'There was an error fetching a source file.',
http_status=500) # 500 Internal Server Error
HOSTINGSVC_AUTH_ERROR = WebAPIError(
222,
'There was an error authorizing with a service.',
http_status=403) # 403 Forbidden
GROUP_ALREADY_EXISTS = WebAPIError(
223,
'A group with this name already exists.',
http_status=409) # 409 Conflict
DIFF_PARSE_ERROR = WebAPIError(
224,
'The specified diff file could not be parsed.',
http_status=400) # 400 Bad Request
PUBLISH_ERROR = WebAPIError(
225,
'An error occurred during publishing.',
http_status=500) # 500 Internal Server Error
USER_QUERY_ERROR = WebAPIError(
226,
'An error occurred querying the user list.',
http_status=500) # 500 Internal Server Error
COMMIT_ID_ALREADY_EXISTS = WebAPIError(
227,
'Review request with this commit ID already exists in the repository.',
http_status=409) # 409 Conflict
TOKEN_GENERATION_FAILED = WebAPIError(
228,
'There was an error generating the API token. Please try again.',
http_status=500) # 500 Internal Server Error.
REPOSITORY_ALREADY_EXISTS = WebAPIError(
229,
'A repository with this name already exists.',
http_status=409) # 409 Conflict
CLOSE_ERROR = WebAPIError(
230,
'An error occurred while closing the review request.',
http_status=500) # 500 Internal Server Error
REOPEN_ERROR = WebAPIError(
231,
'An error occurred while reopening the review request.',
http_status=500) # 500 Internal Server Error
|
levilucio/SyVOLT
|
t_core/tc_python/arule.py
|
Python
|
mit
| 2,673 | 0.001871 |
from t_core.composer import Co
|
mposer
from t_core.matcher import Matcher
from t_core.iterator import Iterator
from t_core.rewriter import Rewriter
|
from t_core.resolver import Resolver
class ARule(Composer):
'''
Applies the transformation on one match.
'''
def __init__(self, LHS, RHS):
'''
Applies the transformation on one match.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
'''
super(ARule, self).__init__()
self.M = Matcher(condition=LHS, max=1)
self.I = Iterator(max_iterations=1)
self.W = Rewriter(condition=RHS)
def packet_in(self, packet):
self.exception = None
self.is_success = False
# Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
self.exception = self.M.exception
return packet
# Choose the only match
packet = self.I.packet_in(packet)
if not self.I.is_success:
self.exception = self.I.exception
return packet
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
self.exception = self.W.exception
return packet
# Output success packet
self.is_success = True
return packet
class ARule_r(ARule):
'''
Applies the transformation on one match.
'''
def __init__(self, LHS, RHS, external_matches_only=False, custom_resolution=lambda packet: False):
'''
Applies the transformation on one match.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param external_matches_only: Resolve conflicts ignoring the matches found in this ARule.
@param custom_resolution: Override the default resolution function.
'''
super(ARule_r, self).__init__()
self.R = Resolver(external_matches_only=external_matches_only,
custom_resolution=custom_resolution)
def packet_in(self, packet):
packet = super(ARule_r, self).packet_in(packet)
# is_success is True
if self.exception is None:
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Output success packet
else:
self.is_success = False
return packet
|
MSEMJEJME/Get-Dumped
|
renpy/display/imagelike.py
|
Python
|
gpl-2.0
| 10,715 | 0.004666 |
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy.display
from renpy.display.render import render, Render, Matrix2D
# This file contains displayables that are image-like, because they take
# up a rectangular area of the screen, and do not respond to input.
class Solid(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
A displayable that fills the area its assigned with `color`.
::
image white = Solid("#fff")
"""
def __init__(self, color, **properties):
super(Solid, self).__init__(**properties)
if color is not None:
self.color = renpy.easy.color(color)
else:
self.color = None
def visit(self):
return [ ]
def render(self, width, height, st, at):
color = self.color or self.style.color
rv = Render(width, height)
if color is None or width <= 0 or height <= 0:
return rv
SIZE = 10
if width < SIZE or height < SIZE:
tex = renpy.display.draw.solid_texture(width, height, color)
else:
tex = renpy.display.draw.solid_texture(SIZE, SIZE, color)
rv.forward = Matrix2D(1.0 * SIZE / width, 0, 0, 1.0 * SIZE / height)
rv.reverse = Matrix2D(1.0 * width / SIZE, 0, 0, 1.0 * height / SIZE)
rv.blit(tex, (0, 0))
return rv
class Frame(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
:args: (image, xborder, yborder, tile=False, **properties)
A displayable that resizes an image to fill the available area,
while preserving the width and height of its borders. is often
used as the background of a window or button.
.. figure:: frame_example.png
Using a frame to resize an image to double its size.
`image`
An image manipulator that will be resized by this frame.
`left`
The size of the border on the left side.
`top`
The size of the border on the top.
`right`
The size of the border on the right side. If None, defaults
to `left`.
`bottom`
The side of the border on the bottom. If None, defaults to `top`.
`tile`
If true, tiling is used to resize sections of the image,
rather than scaling.
::
# Resize the background of the text window if it's too small.
init python:
style.window.background = Frame("frame.png", 10, 10)
"""
__version__ = 1
def after_upgrade(self, version):
if version < 2:
self.left = self.xborder
self.right = self.xborder
self.top = self.yborder
self.bottom = self.yborder
def __init__(self, image, left, top, right=None, bottom=None, bilinear=True, tile=False, **properties):
super(Frame, self).__init__(**properties)
self.image = renpy.easy.displayable(image)
self.tile = tile
if right is None:
right = left
if bottom is None:
bottom = top
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def render(self, width, height, st, at):
crend = render(self.image, width, height, st, at)
sw, sh = crend.get_size()
sw = int(sw)
sh = int(sh)
dw = int(width)
dh = int(height)
bw = self.left + self.right
bh = self.top + self.bottom
xborder = min(bw, sw - 2, dw)
if xborder:
left = self.left * xborder / bw
right = self.right * xborder / bw
else:
left = 0
right = 0
yborder = min(bh, sh - 2, dh)
if yborder:
top = self.top * yborder / bh
bottom = self.bottom * yborder / bh
else:
top = 0
bottom = 0
if renpy.display.draw.info["renderer"] == "sw":
return self.sw_render(crend, dw, dh, left, top, right, bottom)
def draw(x0, x1, y0, y1):
# Compute the coordinates of the left, right, top, and
# bottom sides of the region, for both the source and
# destination surfaces.
# left side.
if x0 >= 0:
dx0 = x0
sx0 = x0
else:
dx0 = dw + x0
sx0 = sw + x0
# right side.
if x1 > 0:
dx1 = x1
sx1 = x1
else:
dx1 = dw + x1
sx1 = sw + x1
# top side.
if y0 >= 0:
dy0 = y0
sy0 = y0
else:
dy0 = dh + y0
sy0 = sh + y0
# bottom side
if y1 > 0:
dy1 = y1
sy1 = y1
else:
dy1 = dh + y1
sy1 = sh + y1
# Quick exit.
if sx0 == sx1 or sy0 == sy1:
return
# Compute sizes.
csw = sx1 - sx0
csh = sy1 - sy0
cdw = dx1 - dx0
cdh = dy1 - dy0
if csw <= 0 or csh <= 0 or cdh <= 0 or cdw <= 0:
return
# Get a subsurface.
cr = crend.subsurface((sx0, sy0, csw, csh))
# Scale or tile if we have to.
if csw != cdw or csh != cdh:
if self.tile:
newcr = Render(cdw, cdh)
newcr.clipping = True
for x in xrange(0, cdw, csw):
for y in xrange(0, cdh, csh):
newcr.bli
|
t(cr, (x, y))
cr = newcr
else:
newcr = Render(cdw, cdh)
newcr.forward = Matrix2D(1.0 * csw / cdw, 0, 0, 1.0 * csh / cdh)
newcr.reverse = Matrix2D(1.0 * cdw / csw, 0, 0, 1.0 * cdh / csh)
newcr.blit(cr, (0, 0))
cr = newcr
# Blit.
rv.blit(cr, (dx0, dy
|
0))
return
rv = Render(dw, dh)
self.draw_pattern(draw, left, top, right, bottom)
return rv
def draw_pattern(self, draw, left, top, right, bottom):
# Top row.
if top:
if left:
draw(0, left, 0, top)
draw(left, -right, 0, top)
if right:
draw(-right, 0, 0, top)
# Middle row.
if left:
draw(0, left, top, -bottom)
draw(left, -right, top, -bottom)
if right:
draw(-right, 0, top, -bottom)
# Bottom row.
if bottom:
if left:
draw(0, left, -bottom, 0)
draw(left, -right, -bottom, 0)
if right:
draw(-right, 0, -bottom, 0)
|
jaap-karssenberg/zim-desktop-wiki
|
zim/main/__init__.py
|
Python
|
gpl-2.0
| 28,210 | 0.026728 |
# Copyright 2013-2016 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module defines the L{main()} function for executing the zim
application. It also defines a number of command classes that implement
specific commandline commands and an singleton application object that
takes core of the process life cycle.
'''
# TODO:
# - implement weakvalue dict to ensure uniqueness of notebook objects
import os
import sys
import logging
import signal
logger = logging.getLogger('zim')
import zim
import zim.fs
import zim.errors
import zim.config
import zim.config.basedirs
from zim import __version__
from zim.utils import get_module, lookup_subclass
from zim.errors import Error
from zim.notebook import Notebook, Path, \
get_notebook_list, resolve_notebook, build_notebook
from zim.formats import get_format
from zim.config import ConfigManager
from zim.plugins import PluginManager
from .command import Command, GtkCommand, UsageError, GetoptError
from .ipc import dispatch as _ipc_dispatch
from .ipc import start_listening as _ipc_start_listening
class HelpCommand(Command):
'''Class implementing the C{--help} command'''
usagehelp = '''\
usage: zim [OPTIONS] [NOTEBOOK [PAGE]]
or: zim --server [OPTIONS] [NOTEBOOK]
or: zim --export [OPTIONS] NOTEBOOK [PAGE]
or: zim --search NOTEBOOK QUERY
or: zim --index NOTEBOOK
or: zim --plugin PLUGIN [ARGUMENTS]
or: zim --manual [OPTIONS] [PAGE]
or: zim --help
'''
optionhelp = '''\
General Options:
--gui run the editor (this is the default)
--server run the web server
--export export to a different format
--search run a search query on a notebook
--index build an index for a notebook
--plugin call a specific plugin function
--manual open the user manual
-V, --verbose print information to terminal
-D, --debug print debug messages
-v, --version print version and exit
-h, --help print this text
GUI Options:
--list show the list with notebooks instead of
opening the default notebook
--geometry window size and position as WxH+X+Y
--fullscreen start in fullscreen mode
--standalone start a single instance, no background process
Server Options:
--port port to use (defaults to 8080)
--template name of the template to use
--private serve only to localhost
--gui run the gui wrapper for the server
Export Options:
-o, --output output directory (mandatory option)
--format format to use (defaults to 'html')
--template name of the template to use
--root-url url to use for the document root
--index-page index page name
-r, --recursive when exporting a page, also export sub-pages
-s, --singlefile export all pages to a single output file
-O, --overwrite force overwriting existing file(s)
Search Options:
None
Index Options:
-f, --flush flush the index first and force re-building
Try 'zim --manual' for more help.
'''
def run(self):
print(self.usagehelp)
print(self.optionhelp) # TODO - generate from commands
class VersionCommand(Command):
'''Class implementing the C{--version} command'''
def run(self):
print('zim %s\n' % zim.__version__)
print(zim.__copyright__, '\n')
print(zim.__license__)
class NotebookLookupError(Error):
'''Error when failing to locate a notebook'''
description = _('Could not find the file or folder for this notebook')
# T: Error verbose description
class NotebookCommand(Command):
'''Base class for commands that act on a notebook'''
def get_default_or_only_notebook(self):
'''Helper to get a default notebook'''
notebooks = get_notebook_list()
if notebooks.default:
uri = notebooks.default.uri
elif len(notebooks) == 1:
uri = notebooks[0].uri
else:
return None
return resolve_notebook(uri, pwd=self.pwd) # None if not found
def get_notebook_argument(self):
'''Get the notebook and page arguments for this command
@returns: a 2-tuple of an L{NotebookInfo} object and an
optional L{Path} or C{(None, None)} if the notebook
argument is optional and not given
@raises NotebookLookupError: if the notebook is mandatory and
not given, or if it is given but could not be resolved
'''
assert self.arguments[0] in ('NOTEBOOK', '[NOTEBOOK]')
args = self.get_arguments()
notebook = args[0]
if notebook is None:
if self.arguments[0] == 'NOTEBOOK': # not optional
raise NotebookLookupError(_('Please specify a notebook'))
# T: Error when looking up a notebook
else:
return None, None
notebookinfo = resolve_notebook(notebook, pwd=self.pwd)
if not notebookinfo:
raise NotebookLookupError(_('Could not find notebook: %s') % notebook)
# T: error message
if len(self.arguments) > 1 \
and self.arguments[1] in ('PAGE', '[PAGE]') \
and args[1] is not None:
pagename = Path.makeValidPageName(args[1])
return notebookinfo, Path(pagename)
else:
return notebookinfo, None
def build_notebook(self, ensure_uptodate=True):
'''Get the L{Notebook} object for this command
Tries to automount the file location if needed.
@param ensure_uptodate: if C{True} index is updated when needed.
Only set to C{False} when index update is handled explicitly
(e.g. in the main gui).
@returns: a L{Notebook} object and a L{Path} object or C{None}
@raises NotebookLookupError: if the notebook could not be
resolved or is not given
@raises FileNotFoundError: if the notebook location does not
exist and could not be mounted.
'''
# Explicit page argument has priority over implicit from uri
# mounting is attempted by zim.notebook.build_notebook()
notebookinfo, page = self.get_notebook_argument() # can raise NotebookLookupError
if not notebookinfo:
raise NotebookLookupError(_('Please specify a notebook'))
notebook, uripage = build_notebook(notebookinfo) # can raise FileNotFound
if ensure_uptodate and not notebook.index.is_uptodate:
for info in notebook.index.update_iter():
#logger.info('Indexing %s', info)
pass # TODO meaningful info for above message
return notebook, page or uripage
class GuiCommand(NotebookCommand, GtkCommand):
'''Class implementing the C{--gui} command and run the gtk interface'''
arguments = ('[NOTEBOOK]', '[PAGE]')
options = (
('list', '', 'show the list with notebooks instead of\nopening the default notebook'),
('geometry=', '', 'window size and position as WxH+X+Y'),
('fullscreen', '', 'start in fullscreen mode'),
('standalone', '', 'start a single instance, no background process'),
)
def build_notebook(self, ensure_uptodate=False):
# Bit more complicated here due to options to use default and
# allow using notebookdialog to prompt
# Explicit page argument has priority over implicit from uri
# mounting is attempted by zim.notebook.build_notebook()
from zim.notebook import FileNotFoundError
def prompt_notebook_list():
import zim.gui.notebookdialog
return zim.gui.notebookdialog.prompt_notebook()
# Can return None if dialog is cancelled
used_default = False
page = None
if self.opts.get('list'):
notebookinfo = prompt_notebook_list()
else:
notebookinfo, page = self.get_notebook_argument()
if notebookinfo is None:
notebookinfo = self.get_default_or_only_notebook()
used_default = notebookinfo is not None
if notebookinfo is None:
notebookinfo = prompt_notebook_list()
if notebo
|
okinfo is None:
return None, None # Cancelled prompt
try:
notebook, uripage = build_notebook(notebookinfo) # can raise FileNotFound
except FileNotFoundError:
if used_default:
# Default notebook went missing? Fallback to dialog to allow changi
|
ng it
notebookinfo = prompt_notebook_list()
if notebookinfo is None:
return None, None # Cancelled prompt
notebook, uripage = build_notebook(notebookinfo) # can raise FileNotFound
else:
raise
if ensure_uptodate and not notebook.index.is_uptodate:
for info in notebook.index.update_iter():
#logger.info('Indexing %s', info)
pass # TODO meaningful info for above message
|
dbk138/ImageRegionRecognition-FrontEnd
|
PythonScripts/LocationLookup.py
|
Python
|
mit
| 2,163 | 0.017568 |
__author__ = 'jhala'
import Helpers
import json
import os
''' creates a location lookup, and an associated image lookup '''
''' main '''
if __name__ == "__main__":
fil = r"c:\capstone\featureInfo.csv"
outLoc = r"c:\capstone\locationLookup.json"
imageBaseDir="C:\\Users\\jhala\\angular-seed\\app\\images\\"
fileArr = Helpers.fileInfo(fil)
headerArr = Helpers.getHeader(fileArr)
locationsDict = {}
locations=[]
locationsFinal=[]
locationId = 0
rowId = 0
existingLoc={}
for row in fileArr[1:]:
colCounter = 0
thisImageCount=0
imagesForThisLocation=[]
imagesForThisLocationTmp=[]
for col in row:
if headerArr[colCounter] == 'imgName':
imgName = col.replace("E:/GeoImages/", "")
locationArr = imgName.split("/")
locationName = locationArr[0]
if not os.path.exists(imageBaseDir + locationName ):
break
if len(locationArr[0:len(locationArr) - 1]) > 1:
print "Nested loc alert"
print locationArr[0]
try:
locIndex=locati
|
ons.index(locationName)
imagesForThisLocationTmp = locationsFinal[locIndex]['images']
imagesForThisLocationTmp.append( { 'name' : imgName})
locationsFinal[locIndex] = { 'name' : locationsFinal[locIndex]['name'] , 'id' : locationsFinal[locIndex]['id'] , 'numImages' : locationsFinal[locI
|
ndex]['numImages']+1 , 'images' : imagesForThisLocationTmp }
except ValueError:
locationId += 1
locations.append(locationName)
thisImageCount += 1
imagesForThisLocation = { 'name': imgName}
locationsFinal.append({ 'name' : locationName , 'id' : locationId, 'numImages' : thisImageCount, 'images': [ imagesForThisLocation ]})
break
colCounter += 1
rowId += 1
ol = open(outLoc,'w')
json.dump(locationsFinal,ol,indent=4, separators=(',', ': '))
ol.close()
|
GbalsaC/bitnamiP
|
venv/src/oauth2-provider/oauth2_provider/tests/base.py
|
Python
|
agpl-3.0
| 5,353 | 0.000374 |
# pylint: disable=missing-docstring
import json
from urlparse import urlparse
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test import TestCase
import jwt
import provider.scope
from oauth2_provider.models import TrustedClient
from oauth2_provider.tests.util import normpath
from oauth2_provider.tests.factories import (
UserFactory,
ClientFactory,
AccessTokenFactory,
TrustedClientFactory
)
class BaseTestCase(TestCase):
def setUp(self):
self.client_secret = 'some_secret'
self.auth_client = ClientFactory(client_secret=self.client_secret)
self.password = 'some_password'
self.user_factory = UserFactory
self.user = None
self.access_token = None
self.set_user(self.make_user())
def make_user(self):
return self.user_factory(password=self.password)
def set_user(self, user):
self.user = user
def set_trusted(self, client, trusted=True):
if trusted:
TrustedClientFactory.create(client=client)
else:
TrustedClient.objects.filter(client=client).delete()
class OAuth2TestCase(BaseTestCase):
def setUp(self):
super(OAuth2TestCase, self).setUp()
def login_and_authorize(self, scope
|
=None, claims=None, trusted=False):
""" Login into client using OAuth2 authorization flow. """
self.set_trusted(self.auth_client, trusted)
self.client.login(username=self.user.username, password=self.password)
payload = {
'client_id': self.auth_client.client_id,
'redirect_uri': self.auth_client.redirect_uri
|
,
'response_type': 'code',
'state': 'some_state',
}
_add_values(payload, 'id_token', scope, claims)
response = self.client.get(reverse('oauth2:capture'), payload)
self.assertEqual(302, response.status_code)
response = self.client.get(reverse('oauth2:authorize'), payload)
return response
def get_access_token_response(self, scope=None, claims=None):
""" Get a new access token using the OAuth2 authorization flow. """
response = self.login_and_authorize(scope, claims, trusted=True)
self.assertEqual(302, response.status_code)
self.assertEqual(reverse('oauth2:redirect'), normpath(response['Location']))
response = self.client.get(reverse('oauth2:redirect'))
self.assertEqual(302, response.status_code)
query = QueryDict(urlparse(response['Location']).query)
payload = {
'grant_type': 'authorization_code',
'client_id': self.auth_client.client_id,
'client_secret': self.client_secret,
'code': query['code'],
}
_add_values(payload, 'id_token', scope, claims)
response = self.client.post(reverse('oauth2:access_token'), payload)
return response
class IDTokenTestCase(OAuth2TestCase):
def get_id_token_values(self, scope=None, claims=None):
""" Get a new id_token using the OIDC authorization flow. """
self.assertIn('openid', scope.split())
response = self.get_access_token_response(scope, claims)
self.assertEqual(response.status_code, 200)
values = json.loads(response.content)
self.assertIn('access_token', values)
id_token = values['id_token']
secret = self.auth_client.client_secret
audience = self.auth_client.client_id
self.assertValidIDToken(id_token, secret, audience)
scopes = values['scope'].split()
claims = self.parse_id_token(id_token)
# Should always be included
self.assertIn('iss', claims)
self.assertIn('sub', claims)
return scopes, claims
def parse_id_token(self, id_token):
claims = jwt.decode(id_token, verify=False)
return claims
def assertValidIDToken(self, id_token, secret, audience):
try:
jwt.decode(id_token, secret, audience=audience)
except jwt.DecodeError:
assert False
class UserInfoTestCase(BaseTestCase):
def setUp(self):
super(UserInfoTestCase, self).setUp()
self.path = reverse('oauth2:user_info')
self.set_user(self.user)
def set_user(self, user):
super(UserInfoTestCase, self).set_user(user)
self.access_token = AccessTokenFactory(user=self.user, client=self.auth_client)
def set_access_token_scope(self, scope):
self.access_token.scope = provider.scope.to_int(*scope.split())
self.access_token.save() # pylint: disable=no-member
def get_with_authorization(self, path, access_token=None, payload=None):
kwargs = {}
if access_token:
kwargs['HTTP_AUTHORIZATION'] = 'Bearer %s' % access_token
return self.client.get(path, payload, **kwargs)
def get_userinfo(self, token=None, scope=None, claims=None):
payload = _add_values({}, 'userinfo', scope, claims)
response = self.get_with_authorization(self.path, token, payload)
values = json.loads(response.content)
return response, values
def _add_values(data, endpoint, scope=None, claims=None):
if scope:
data['scope'] = scope
if claims:
data['claims'] = json.dumps({endpoint: claims})
return data
|
jimklo/LearningRegistry
|
LR/lr/lib/oauth.py
|
Python
|
apache-2.0
| 6,907 | 0.007963 |
import logging, couchdb, oauth2, json, sys
from decorator import decorator
from pylons import config, request as r, response as res, session
from pylons.controllers.util import abort
from functools import wraps
log = logging.getLogger(__name__)
appConfig = config['app_conf']
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class BadOAuthSignature(Error):
pass
class OAuthJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, (oauth2.Consumer, oauth2.Token)):
return { "key": o.key, "secret": o.secret }
elif isinstance(o, Exception):
return {
"type": type(o).__name__,
"message": o.message }
try:
return json.JSONEncoder.default(self, o)
except Exception as e:
log.exception("Encoded Type: {0}\nrepr: {1}".format
|
(type(o), repr(o)))
raise e
class CouchDBOAuthUtil():
def __init__(self, couchdb_dba_url=appConfig['couchdb.url.dbadmin'], users_d
|
b=appConfig['couchdb.db.users'], oauth_view=appConfig['couchdb.db.users.oauthview']):
self.server = couchdb.Server(couchdb_dba_url)
self.users = self.server[users_db]
self.oauth_view = oauth_view
def find_possible(self, consumer, token, mapper=None):
def wrap_row(row):
# log.error("wrap_row: "+json.dumps(row))
row_result = {}
if "doc" in row:
row_result["name"] = row["doc"]["name"]
row_result["consumer"] = oauth2.Consumer(key=consumer, secret=row["doc"]["oauth"]["consumer_keys"][consumer])
row_result["token"] = oauth2.Token(key=token, secret=row["doc"]["oauth"]["tokens"][token])
row_result["id"] = row["doc"]["_id"]
row_result["roles"] = row["doc"]["roles"]
if mapper:
mapper(row_result, row)
return row_result
view_opts = {
"key":[consumer, token],
"include_docs":True
}
view_results = self.users.view(self.oauth_view, wrapper=wrap_row, **view_opts)
return view_results.rows
def check_request(self, request, mapper=None):
http_method = request.method
http_url = request.host_url + request.path_info
headers = request.headers
query_string = request.query_string
info = None
parameters = None
# log.error("*** CHECK_REQUEST *** "+json.dumps({
# "query_string": query_string,
# "headers": {}.update(headers),
# "http_method": http_method,
# "http_url": http_url
# }))
oa_request = oauth2.Request.from_request(http_method, http_url, headers, query_string=query_string)
if oa_request and all([ x in oa_request for x in ['oauth_consumer_key', 'oauth_token']]):
server = oauth2.Server()
server.add_signature_method(oauth2.SignatureMethod_HMAC_SHA1())
last_exc = None
for row in self.find_possible(oa_request['oauth_consumer_key'], oa_request['oauth_token'], mapper):
try:
parameters = server.verify_request(oa_request, row["consumer"], row["token"])
except oauth2.Error as e:
last_exc = BadOAuthSignature("OAuth2 Error: %s" % e.message)
except:
import sys
log.exception("Caught Exception in CouchDBOAuthUtil")
last_exc = BadOAuthSignature(sys.exc_info()[1])
if parameters != None:
info = row
break
if parameters == None and last_exc != None:
raise last_exc
return (parameters, info)
_authobj = CouchDBOAuthUtil()
DEFAULT_SESSION_KEY = "oauth"
class status(object):
Okay = "Okay"
NoSignature = "No Signature"
BadSignature = "Bad Signature"
Error = "Error"
Unknown = "Unknown"
def authorize(session_key=DEFAULT_SESSION_KEY, service_doc=None, roles=None, mapper=None, realm=None, pre_cond=None, post_cond=None):
_roles = roles
_mapper = mapper
_session_key=session_key
_realm = realm or ""
_pre_cond = pre_cond
_post_cond = post_cond
_service_doc = service_doc
def wrapper(fn, self, *args, **kwargs):
if _service_doc:
sdoc = _service_doc()
try:
if "oauth" not in sdoc["service_auth"]["service_authz"]:
return fn(self, *args, **kwargs)
except:
raise ValueError("Missing service_document for checking if OAUTH access is enabled.")
if _pre_cond:
precond = cont = _pre_cond()
else:
precond = cont = True
if precond:
success = { "status": status.Unknown, "user": None, "parameters": None }
try:
success["parameters"], success["user"] = _authobj.check_request(r._current_obj(), _mapper)
if success["parameters"] is None:
success["status"] = status.NoSignature
else:
success["status"] = status.Okay
except BadOAuthSignature as e:
success["status"] = status.BadSignature
success["detail"] = e.message
cont = False
except:
success["status"] = status.Error
success["detail"] = repr(sys.exc_info())
log.exception("Caught Exception in authorize")
cont = False
sess = session._current_obj()
sess[_session_key] = success
# log.error("in wrap:"+repr(sess[_session_key]))
if cont and _roles:
cont = UserHasRoles(_session_key, _roles)
if _post_cond:
cont = _post_cond(cont)
if cont:
try:
return fn(self, *args, **kwargs)
finally:
pass
else:
h = {"WWW-Authenticate": "OAuth realm=\"{0}\"".format(_realm)}
log.error("Authorization Required")
res.headers.update(h)
abort(401, "OAuth Authorization Required", headers=h)
return decorator(wrapper)
def UserHasRoles(session_key, roles=[] ):
hasRoles = False
try:
s = session._current_obj()
hasRoles = all([role in s[session_key]["user"]["roles"] for role in roles])
except:
pass
return hasRoles
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kio/KIconDialog.py
|
Python
|
gpl-2.0
| 1,319 | 0.010614 |
# encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python3/dist-packages/PyKDE4/kio.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KIconDialog(__PyKDE4_kdeui.KDialog):
# no doc
def getIcon(self, *args, **kwargs): # real signature unknown
pass
def iconSize(self, *args, **kwargs): # real signature
|
unknown
pass
def newIconName(self, *args, **kwarg
|
s): # real signature unknown
pass
def openDialog(self, *args, **kwargs): # real signature unknown
pass
def setCustomLocation(self, *args, **kwargs): # real signature unknown
pass
def setIconSize(self, *args, **kwargs): # real signature unknown
pass
def setStrictIconSize(self, *args, **kwargs): # real signature unknown
pass
def setup(self, *args, **kwargs): # real signature unknown
pass
def showDialog(self, *args, **kwargs): # real signature unknown
pass
def slotOk(self, *args, **kwargs): # real signature unknown
pass
def strictIconSize(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
steve-ord/daliuge
|
daliuge-engine/dlg/runtime/tool_commands.py
|
Python
|
lgpl-2.1
| 1,948 | 0.00462 |
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2020
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
|
# MA 02111-1307 USA
#
from ..common import tool
def include_dir(_parser, _args):
from . import get_include_dir
print(get_include_dir())
def register_command
|
s():
tool.cmdwrap('nm', 'Starts a Node Manager', 'dlg.manager.cmdline:dlgNM')
tool.cmdwrap('dim', 'Starts a Drop Island Manager', 'dlg.manager.cmdline:dlgDIM')
tool.cmdwrap('mm', 'Starts a Master Manager', 'dlg.manager.cmdline:dlgMM')
tool.cmdwrap('replay', 'Starts a Replay Manager', 'dlg.manager.cmdline:dlgReplay')
tool.cmdwrap('daemon', 'Starts a DALiuGE Daemon process', 'dlg.manager.proc_daemon:run_with_cmdline')
tool.cmdwrap('proxy', 'A reverse proxy to be used in restricted environments to contact the Drop Managers', 'dlg.deploy.pawsey.dfms_proxy:run')
tool.cmdwrap('monitor', 'A proxy to be used in conjunction with the dlg proxy in restricted environments', 'dlg.deploy.pawsey.dfms_monitor:run')
tool.cmdwrap('include_dir', 'Print the directory where C header files can be found', include_dir)
|
sqlalchemy/sqlalchemy
|
test/ext/mypy/plugin_files/issue_7321.py
|
Python
|
mit
| 427 | 0 |
from typing import Any
from typing import Dict
from
|
sqlalchemy.orm import declarative_base
from s
|
qlalchemy.orm import declared_attr
Base = declarative_base()
class Foo(Base):
@declared_attr
def __tablename__(cls) -> str:
return "name"
@declared_attr
def __mapper_args__(cls) -> Dict[Any, Any]:
return {}
@declared_attr
def __table_args__(cls) -> Dict[Any, Any]:
return {}
|
yongwen/makahiki
|
makahiki/apps/widgets/smartgrid_play_tester/migrations/__init__.py
|
Python
|
mit
| 25 | 0 |
""
|
"Schema migrations."""
| |
wooyek/django-tasker
|
django_tasker/apps.py
|
Python
|
mit
| 197 | 0 |
from django.apps import AppConfig
from django.utils.translation
|
import ugettext as __, ugettext_lazy as _
class TaskerConfig(AppConfig):
name = 'django_tasker'
|
verbose_name = _('tasker')
|
shagunsodhani/powerlaw
|
powerlaw/regression.py
|
Python
|
mit
| 7,543 | 0.012727 |
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
from scipy.special import zeta
from .distribution import frequency_distribution, powerlaw_series, random_series
from .utils import unique
from math import pow, e, log, sqrt
import sys
import random
def least_square_regression(x, y, xlabel
|
= "x", ylabel = "y", prefix="", suffix=""):
"""
Perform least square regression to find the best fit line and returns the slope of the line.
**Parameters**
x : List of values along x axis.
y : List of values along y axis.
"""
X = np.asarray(x).reshape((len(x), 1))
Y = np.asarray(y).reshape((len(y), 1))
regr = linear_model.LinearRegression()
regr
|
.fit(X, Y)
label_string = "Best fit line, y = "+str(regr.coef_[0][0])+" * x + "+str(regr.intercept_[0])
print(label_string)
print("Residual sum of squares: %.2f" % np.mean((regr.predict(X) - Y) ** 2))
print("Variance score: %.2f" % regr.score(X, Y))
# Plot outputs
original_data, = plt.plot(X, Y,'go', label="original data")
# best_fit_line, = plt.plot(X, map(lambda x: pow(e, -x), X), 'bo', label=label_string)
best_fit_line, = plt.plot(X, regr.predict(X), color='blue', linewidth=3, label=label_string)
plt.title("Least Square Regression"+suffix)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
curves = [original_data, best_fit_line]
labels = [curve.get_label() for curve in curves]
plt.legend(curves, labels)
plt.savefig(prefix+"least_square_regression_fit"+suffix+".png")
plt.show()
return regr.coef_[0][0]
def estimate_scaling_parameter(series, xmin = 1, discrete = False):
"""
Perform Method of Maximum Liklihood (MLE) to find the best fit value of Alpha.
**Parameters**
series : series of data to be fit.
xmin : Float/Integer, xmin for the distribution - assumed to be known before-hand. Default value is 1.0
discrete : Boolean, whether to treat series as discrete or continous. Default value is False.
**Returns**
Estimated Alpha value.
"""
normalizing_constant = 0.0
if(discrete):
normalizing_constant = 0.5
partial_sum = 0.0
count = 0.0
# print series
for x in series:
partial_sum += log (x/(xmin - normalizing_constant))
count+=1
Alpha = 1.0 + count*(1/partial_sum)
return Alpha
def estimate_parameters(series, min_size_series = 50, discrete = False):
"""
Apply Clauset et al.'s method to find the best fit value of xmin and Alpha.
**Parameters**
series : series of data to be fit.
min_size_series : Minimum possible size of the distribution to which power-law fit will be attempted. Fitting power-law to a very small series would give biased results where power-law may appear to be a good fit even when data is not drawn from power-law distribution. The default value is taken to be 50 as suggested in the paper.
discrete : Boolean, whether to treat series as discrete or continous. Default value is False
**Returns**
Tuple of (Estimated xmin, Estimated Alpha value, minimum KS statistics score).
"""
sorted_series = sorted(series)
xmin_candidates = []
x_prev = sorted_series[0]
xmin_candidates.append(x_prev)
for x in sorted_series:
if(x>x_prev):
x_prev = x
xmin_candidates.append(x_prev)
ks_statistics_min = sys.maxsize;
xmin_result = 0
Alpha_result = 2
for xmin in xmin_candidates[:-1*(min_size_series-1)]:
data = [x for x in sorted_series if x>=xmin]
estimated_Alpha = estimate_scaling_parameter(data, xmin)
if(discrete):
Px = [zeta(estimated_Alpha, x)/zeta(estimated_Alpha, xmin) for x in unique(data)]
else:
Px = [pow(float(x)/xmin, 1 - estimated_Alpha ) for x in unique(data)]
n = len(Px)
Sx = [i[1]/n for i in frequency_distribution(data, pdf=False)]
ks_statistics = max( [abs(Sx[counter] - Px[counter]) for counter in range(0, n)] )
if(ks_statistics<ks_statistics_min):
ks_statistics_min = ks_statistics
xmin_result = xmin
Alpha_result = estimated_Alpha
return (xmin_result, Alpha_result, ks_statistics_min)
def generate_dataset(series, xmin, alpha, epsilon = 0.01):
"""
Generator to generate datasets for goodness_of_fit test.
**Parameters**
series : series of data on which the power-law model was fitted.
xmin : xmin for the fitted power-law model.
alpha : alpha for the fitted power-law model.
epsilon : desired accuracy in p-value. Default is set to 0.01
**Returns**
A generator to generate list of numbers (datasets).
"""
number_of_datasets = int(round(0.25/(epsilon**2)) +1)
print(number_of_datasets)
n = len(series)
non_powerlaw_series = [x for x in series if x<xmin]
ntail = n - len(non_powerlaw_series)
p = float(ntail)/n
# print p
# print ntail
# print n
for i in range(0, number_of_datasets):
dataset = []
count_powerlaw_series = 0
# how many numbers are to be picked from powerlaw distribution
for random_number in random_series(n):
if(random_number<=p):
count_powerlaw_series+=1
# generate number from power-law distribution
else:
# pick number from non_powerlaw_series
dataset.append(random.choice(non_powerlaw_series))
dataset = dataset + [i for i in powerlaw_series(Alpha = alpha, xmin = xmin, n = count_powerlaw_series)]
yield dataset
def goodness_of_fit(series, xmin, alpha, ks_statistics, epsilon = 0.01, min_size_series = 50):
"""
Function to calculate the p-value as a measure of goodness_of_fit for the fitted model.
**Parameters**
series : series of data on which the power-law model was fitted.
xmin : xmin for the fitted power-law model.
alpha : alpha for the fitted power-law model.
ks_statistics : KS statistics for the fitted power-law model.
epsilon : desired accuracy in p-value. Default is set to 0.01.
min_size_series : Minimum possible size of the distribution to which power-law fit will be attempted. This value is used when fitting power-law to the generated datasets. The default value is taken to be 50. For further details, see `estimate_parameters()`.
**Returns**
p-value for the fitted model.
"""
count_dataset = 0.0
# number of synthetic datasets tested
n1 = 0.0
# number of synthetic datasets where ks value is greater than ks value for given data
for dataset in generate_dataset(series=series, xmin=xmin, alpha=alpha, epsilon=epsilon):
count_dataset+=1.0
(xmin_dataset, alpha_dataset, ks_statistics_dataset) = estimate_parameters(series=dataset, min_size_series = min_size_series)
if(ks_statistics_dataset>ks_statistics):
n1+=1.0
return n1/count_dataset
if __name__ == "__main__":
n = 10
data = [i for i in powerlaw_series(n=n, xmin = 20, Alpha = 2.6)]
# print data
(xmin, alpha, ks_statistics) = estimate_parameters(series=data, min_size_series = 5)
print("xmin = "+str(xmin))
print("alpha = "+str(alpha))
print(goodness_of_fit(series=data, xmin=xmin, alpha=alpha, ks_statistics=ks_statistics, epsilon = 0.01, min_size_series = 50))
|
chickenzord/dotenvy
|
setup.py
|
Python
|
mit
| 1,627 | 0 |
#!/usr/bin/env python
from setuptools import setup, find_packages
REPO_NAME = 'chickenzord/dotenvy'
VERSION = '0.2.0'
ARCHIVE_URL = 'https://github.com/%s/archive/v%s.tar.gz' % (REPO_NAME, VERSION)
setup(
# packaging
packages=find_packages('src'),
|
package_dir={'': 'src'},
package_data={},
install_requires=[
'future',
],
setup_requires=[
'pytest-runner',
'flake8',
],
tests_require=[
'pytest',
'pytest-cov',
'pytest-travis-fold',
'moc
|
k',
'backports.tempfile',
],
entry_points={
"console_scripts": ['dotenvy = dotenvy.cli:main']
},
zip_safe=False,
# metadata
name='dotenvy',
version=VERSION,
author='Akhyar Amarullah',
author_email='akhyrul@gmail.com',
description='Dotenv handler for Python',
long_description=open('README.rst').read(),
download_url=ARCHIVE_URL,
license='MIT',
keywords=['dotenv', 'configuration', 'environment'],
url='https://github.com/%s' % (REPO_NAME),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
)
|
aeroaks/httpProfiler
|
methods/webencodings/tests.py
|
Python
|
mit
| 6,184 | 0 |
# coding: utf8
"""
webencodings.tests
~~~~~~~~~~~~~~~~~~
A basic test suite for Encoding.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
IncrementalDecoder, IncrementalEncoder, UTF8)
def assert_raises(exception, function, *args, **kwargs):
try:
function(*args, **kwargs)
except exception:
return
else: # pragma: no cover
raise AssertionError('Did not raise %s.' % exception)
def test_labels():
assert lookup('utf-8').name == 'utf-8'
assert lookup('Utf-8').name == 'utf-8'
assert lookup('UTF-8').name == 'utf-8'
assert lookup('utf8').name == 'utf-8'
assert lookup('utf8').name == 'utf-8'
assert lookup('utf8 ').name == 'utf-8'
assert lookup(' \r\nutf8\t').name == 'utf-8'
assert lookup('u8') is None # Python label.
assert lookup('utf-8 ') is None # Non-ASCII white space.
assert lookup('US-ASCII').name == 'windows-1252'
assert lookup('iso-8859-1').name == 'windows-1252'
assert lookup('latin1').name == 'windows-1252'
assert lookup('LATIN1').name == 'windows-1252'
assert lookup('latin-1') is None
assert lookup('LATİN1') is None # ASCII-only case insensitivity.
def test_all_labels():
for label in LABELS:
assert decode(b'', label) == ''
assert encode('', label) == b''
for repeat in [0, 1, 12]:
output, _ = iter_decode([b''] * repeat, label)
assert list(output) == []
assert list(iter_encode([''] * repeat, label)) == []
decoder = IncrementalDecoder(label)
assert decoder.decode(b'') == ''
assert decoder.decode(b'', final=True) == ''
encoder = IncrementalEncoder(label)
assert encoder.encode('') == b''
assert encoder.encode('', final=True) == b''
# All encoding names are valid labels too:
for name in set(LABELS.values()):
assert lookup(name).name == name
def test_invalid_label():
assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
assert_raises(LookupError, encode, 'é', 'invalid')
assert_raises(LookupError, iter_decode, [], 'invalid')
assert_raises(LookupError, iter_encode, [], 'invalid')
assert_raises(LookupError, IncrementalDecoder, 'invalid')
assert_raises(LookupError, IncrementalEncoder, 'invalid')
def test_decode():
assert decode(b'\x80', 'latin1') == '€'
assert decode(b'\x80', lookup('latin1')) == '€'
assert decode(b'\xc3\xa9', 'utf8') == 'é'
assert decode(b'\xc3\xa9', UTF8) == 'é'
assert decode(b'\xc3\xa9', 'ascii') == 'é'
assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == 'é' # UTF-8 with BOM
assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == 'é' # UTF-16-BE with BOM
assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == 'é' # UTF-16-LE with BOM
assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == '\ue900'
assert decode(b'\xFF\xFE\x00\xe9', 'ascii') ==
|
'\ue900'
assert decode(b'\x00\xe9', 'UTF-16BE') == 'é'
assert decode(b'\xe9\x00', 'UTF-16LE') == 'é'
assert decode(b'\xe9\x00', 'UTF-16') == 'é'
assert decode(b'\xe9\x00', 'UTF-16BE') == '\ue900'
assert decode(b'\x00\xe
|
9', 'UTF-16LE') == '\ue900'
assert decode(b'\x00\xe9', 'UTF-16') == '\ue900'
def test_encode():
assert encode('é', 'latin1') == b'\xe9'
assert encode('é', 'utf8') == b'\xc3\xa9'
assert encode('é', 'utf8') == b'\xc3\xa9'
assert encode('é', 'utf-16') == b'\xe9\x00'
assert encode('é', 'utf-16le') == b'\xe9\x00'
assert encode('é', 'utf-16be') == b'\x00\xe9'
def test_iter_decode():
def iter_decode_to_string(input, fallback_encoding):
output, _encoding = iter_decode(input, fallback_encoding)
return ''.join(output)
assert iter_decode_to_string([], 'latin1') == ''
assert iter_decode_to_string([b''], 'latin1') == ''
assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([
b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([
b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
assert iter_decode_to_string([
b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
assert iter_decode_to_string([
b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
assert iter_decode_to_string([
b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
def test_iter_encode():
assert b''.join(iter_encode([], 'latin1')) == b''
assert b''.join(iter_encode([''], 'latin1')) == b''
assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
assert b''.join(iter_encode([
'', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
def test_x_user_defined():
encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
encoded = b'aa'
decoded = 'aa'
assert decode(encoded, 'x-user-defined') == decoded
assert encode(decoded, 'x-user-defined') == encoded
|
imajes/Sick-Beard
|
sickbeard/notifiers/boxcar2.py
|
Python
|
gpl-3.0
| 5,011 | 0.002993 |
# Author: Marvin Pinto <me@marvinp.ca>
# Author: Dennis Lutter <lad1337@gmail.com>
# Author: Shawn Conroyd <mongo527@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
import sickbeard
from sickbeard import logger
from sickbeard.common import notifyStrings, NOTIFY_SNATCH, NOTIFY_DOWNLOAD
from sickbeard.exceptions import ex
API_URL = "https://new.boxcar.io/api/notifications"
class Boxcar2Notifier:
def _sendBoxcar2(self, title, msg, accessToken, sound):
"""
Sends a boxcar2 notification to the address provided
|
msg: The message to send (unicode)
title: The title of the message
accessToken: The access token to send notification to
returns: True if the message succeeded, False otherwise
"""
# build up the URL and parameters
|
msg = msg.strip().encode('utf-8')
data = urllib.urlencode({
'user_credentials': accessToken,
'notification[title]': title + " - " + msg,
'notification[long_message]': msg,
'notification[sound]': sound,
'notification[source_name]': "SickBeard"
})
# send the request to boxcar2
try:
req = urllib2.Request(API_URL)
handle = urllib2.urlopen(req, data)
handle.close()
except urllib2.URLError, e:
# FIXME: Python 2.5 hack, it wrongly reports 201 as an error
if hasattr(e, 'code') and e.code == 201:
logger.log(u"BOXCAR2: Notification successful.", logger.MESSAGE)
return True
# if we get an error back that doesn't have an error code then who knows what's really happening
if not hasattr(e, 'code'):
logger.log(u"BOXCAR2: Notification failed." + ex(e), logger.ERROR)
else:
logger.log(u"BOXCAR2: Notification failed. Error code: " + str(e.code), logger.ERROR)
if e.code == 404:
logger.log(u"BOXCAR2: Access token is wrong/not associated to a device.", logger.ERROR)
elif e.code == 401:
logger.log(u"BOXCAR2: Access token not recognized.", logger.ERROR)
elif e.code == 400:
logger.log(u"BOXCAR2: Wrong data sent to boxcar.", logger.ERROR)
elif e.code == 503:
logger.log(u"BOXCAR2: Boxcar server to busy to handle the request at this time.", logger.WARNING)
return False
logger.log(u"BOXCAR2: Notification successful.", logger.MESSAGE)
return True
def _notify(self, title, message, accessToken=None, sound=None, force=False):
"""
Sends a boxcar2 notification based on the provided info or SB config
title: The title of the notification to send
message: The message string to send
accessToken: The access token to send the notification to (optional, defaults to the access token in the config)
force: If True then the notification will be sent even if Boxcar is disabled in the config
"""
# suppress notifications if the notifier is disabled but the notify options are checked
if not sickbeard.USE_BOXCAR2 and not force:
return False
# fill in omitted parameters
if not accessToken:
accessToken = sickbeard.BOXCAR2_ACCESS_TOKEN
if not sound:
sound = sickbeard.BOXCAR2_SOUND
logger.log(u"BOXCAR2: Sending notification for " + message, logger.DEBUG)
return self._sendBoxcar2(title, message, accessToken, sound)
##############################################################################
# Public functions
##############################################################################
def notify_snatch(self, ep_name):
if sickbeard.BOXCAR2_NOTIFY_ONSNATCH:
self._notify(notifyStrings[NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD:
self._notify(notifyStrings[NOTIFY_DOWNLOAD], ep_name)
def test_notify(self, accessToken, sound):
return self._notify("Test", "This is a test notification from Sick Beard", accessToken, sound, force=True)
def update_library(self, ep_obj=None):
pass
notifier = Boxcar2Notifier
|
garrettcap/Bulletproof-Backup
|
wx/tools/Editra/src/syntax/_smalltalk.py
|
Python
|
gpl-2.0
| 3,406 | 0.00411 |
###############################################################################
# Name: smalltalk.py #
# Purpose: Define Smalltalk syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: smalltalk.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for Smalltalk
@todo: more keywords, styling fixes
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _smalltalk.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
# Special Selectors
ST_KEYWORDS = (0, "ifTrue: ifFalse: whileTrue: whileFalse: ifNil: ifNotNil: "
"whileTrue repeat isNil put to at notNil super self "
"true false new not isNil inspect out nil do add for "
"methods methodsFor instanceVariableNames classVariableNames "
"poolDictionaries subclass")
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [(stc.STC_ST_ASSIGN, 'operator_style'),
(stc.STC_ST_BINARY, 'operator_style'),
(stc.STC_ST_BOOL, 'keyword_style'),
(stc.STC_ST_CHARACTER, 'char_style'),
(stc.STC_ST_COMMENT, 'comment_style'),
(stc.STC_ST_DEFAULT, 'default_style'),
(stc.STC_ST_GLOBAL, 'global_style'),
(stc.STC_ST_KWSEND, 'keyword_style'),
(stc.STC_ST_NIL, 'keyword_style'),
(stc.STC_ST_NUMBER, 'number_style'),
(stc.STC_ST_RETURN, 'keyword_style'),
(stc.STC_ST_SELF, 'keyword_style'),
(stc.STC_ST_SPECIAL, 'pre_style'),
(stc.STC_ST_SPEC_SEL, 'keyword_style'), # Words in keyword list
(stc.STC_ST_STRING, 'string_style'),
(stc.STC_ST_SUPER, 'class_style'),
(stc.STC_ST_SYMBOL, 'scalar_style')]
#---- Extra Properties ----#
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Smalltalk"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_SMALLTALK)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [ST_KEY
|
WORDS]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetCommentP
|
attern(self):
"""Returns a list of characters used to comment a block of code """
return [u'\"', u'\"']
#---- Syntax Modules Internal Functions ----#
def KeywordString():
"""Returns the specified Keyword String
@note: not used by most modules
"""
return ST_KEYWORDS[1]
#---- End Syntax Modules Internal Functions ----#
|
maniteja123/numdifftools
|
conda_recipe/run_test.py
|
Python
|
bsd-3-clause
| 40 | 0.025 |
import numdifftools
n
|
umdifftools.tes
|
t()
|
Hattivat/hypergolic-django
|
hypergolic/catalog/urls/power_cycle_urls.py
|
Python
|
agpl-3.0
| 826 | 0.001211 |
from django.conf.urls import url
from ..views import (PowerCycleListView, PowerCycleCreateView, PowerCycleDetailView,
PowerCycleUpdateView, PowerCycleDeleteView)
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^create/$', # NOQA
login_required(PowerCycleCreateView.as_view()),
name="power_cycle_create"),
url(r'^(?P<pk>.+)/update/$',
login_required(PowerCycleUpdateView.as_view()),
name="power_cycle_update"),
url(r'^(?P<pk>.+)/delete/$',
login_required(PowerCycleDeleteView.as_vi
|
ew()),
name="power_cycle_delete"),
url(r'^(?P<pk>.+)/$',
PowerCycleDetailView.as_view(),
name="pow
|
er_cycle_detail"),
url(r'^$',
PowerCycleListView.as_view(),
name="power_cycle_list"),
]
|
AdrianGaudebert/configman
|
configman/converters.py
|
Python
|
bsd-3-clause
| 15,400 | 0.002013 |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, lars@mozilla.com
# Peter Bengtsson, peterbe@mozilla.com
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import sys
import re
import datetime
import types
import inspect
import collections
import json
from required_config import RequiredConfig
from namespace import Namespace
from .datetime_util import datetime_from_ISO_string as datetime_converter
from .datetime_util import date_from_ISO_string as date_converter
import datetime_util
#------------------------------------------------------------------------------
def option_value_str(an_option):
"""return an instance of Option's value as a string.
The option instance doesn't actually have to be from the Option class. All
it requires is that the passed option instance has a ``value`` attribute.
"""
if an_option.value is None:
return ''
try:
converter = to_string_converters[type(an_option.value)]
s = converter(an_option.value)
except KeyError:
if not isinstance(an_option.value, basestring):
s = unicode(an_option.value)
else:
s = an_option.value
if an_option.from_string_converter in converters_requiring_quotes:
s = "'''%s'''" % s
return s
#------------------------------------------------------------------------------
def str_dict_keys(a_dict):
"""return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['age']
99
>>> result[1]
2
The reason for this is that in Python <= 2.6.4 doing
``MyClass(**{u'name': u'Peter'})`` would raise a TypeError
Note that only unicode types are converted to str types.
The reason for that is you might have a class that looks like this::
class Option(object):
def __init__(self, foo=None, bar=None, **kwargs):
...
And it's being used like this::
Option(**{u'foo':1, u'bar':2, 3:4})
Then you don't want to change that {3:4} part which becomes part of
`**kwargs` inside the __init__ method.
Using integers as parameter keys is a silly example but the point is that
due to the python 2.6.4 bug only unicode keys are converted to str.
"""
new_dict = {}
for key in a_dict:
if isinstance(key, unicode):
new_dict[str(key)] = a_dict[key]
else:
new_dict[key] = a_dict[key]
return new_dict
#------------------------------------------------------------------------------
def io_converter(input_str):
""" a conversion function for to select stdout, stderr or open a file for
writing"""
if type(input_str) is str:
input_str_lower = input_str.lower()
if input_str_lower == 'stdout':
return sys.stdout
if input_str_lower == 'stderr':
return sys.stderr
return open(input_str, "w")
return input_str
#------------------------------------------------------------------------------
def timedelta_converter(input_str):
"""a conversion function for time deltas"""
if isinstance(input_str, basestring):
days, hours, minutes, seconds = 0, 0, 0, 0
details = input_str.split(':')
if len(details) >= 4:
days = int(details[-4])
if len(details) >= 3:
hours = int(details[-3])
if len(details) >= 2:
minutes = int(details[-2])
if len(details) >= 1:
seconds = int(details[-1])
return datetime.timedelta(days=days,
hours=hours,
minutes=minutes,
seconds=seconds)
raise ValueError(input_str)
#------------------------------------------------------------------------------
def boolean_converter(input_str):
""" a conversion function for boolean
"""
return input_str.lower() in ("true", "t", "1", "y", "yes")
#------------------------------------------------------------------------------
import __builtin__
_all_named_builtins = dir(__builtin__)
def class_converter(input_str):
""" a conversion that will import a module and class name
"""
if not input_str:
return None
if '.' not in input_str and input_str in _all_named_builtins:
return eval(input_str)
parts = [x.strip() for x in input_str.split('.') if x.strip()]
try:
# first try as a complete module
package = __import__(input_str)
except ImportError:
# it must be a class from a module
if len(parts) == 1:
# since it has only one part, it must be a class from __main__
parts = ('__main__', input_str)
package = __import__('.'.join(parts[:-1]), globals(), locals(), [])
obj = package
for name in parts[1:]:
obj = getattr(obj, name)
return obj
#---------------------------------------------------------
|
---------------------
def classes_in_namespaces_converter(template_for_namespace="cls%d",
name_of_class_option='cls',
instantiate_classes=False):
"""take a comma delimited list of class names, convert each class name
into an actual class as an option within a numbered namespace. This
function creates a closure over a new function. That new function,
in turn creat
|
es a class derived from RequiredConfig. The inner function,
'class_list_converter', populates the InnerClassList with a Namespace for
each of the classes in the class list. In addition, it puts the each class
itself into the subordinate Namespace. The requirement discovery mechanism
of configman then reads the InnerClassList's requried config, pulling in
the namespaces and associated classes within.
For example, if we have a class list like this: "Alpha, Beta", then this
converter will add the following Namespaces and options to the
configuration:
"cls0" - the subordinate Namespace for Alpha
"cls0.cls" - the option containing the class Alpha itself
"cls1" - the subordinate Namespace for Beta
"cls1.cls" - the option containing the class Beta itself
Optionally, the 'class_list_converter' inner function can embue the
InnerClassList's subordinate namespaces with aggregates that will
instantiate classes from the class list. This is a convenience to
|
emijrp/pywikibot-core
|
pywikibot/i18n.py
|
Python
|
mit
| 21,745 | 0.000368 |
# -*- coding: utf-8 -*-
"""
Various i18n functions.
Helper functions for both the internal translation system
and for TranslateWiki-based translations.
By default messages are assumed to reside in a package called
'scripts.i18n'. In pywikibot 2.0, that package is not packaged
with pywikibot, and pywikibot 2.0 does not have a hard dependency
on any i18n messages. However, there are three user input questions
in pagegenerators which will use i18 messages if they can be loaded.
The default message location may be changed by calling
L{set_message_package} with a package name. The package must contain
an __init__.py, and a message bundle called 'pywikibot' containing
messages. See L{twntranslate} for more information on the messages.
"""
#
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import sys
import re
import locale
import json
import os
import pkgutil
from collections import defaultdict
from pywikibot import Error
from .plural import plural_rules
import pywikibot
from . import config2 as config
if sys.version_info[0] > 2:
basestring = (str, )
PLURAL_PATTERN = r'{{PLURAL:(?:%\()?([^\)]*?)(?:\)d)?\|(.*?)}}'
# Package name for the translation messages. The messages data must loaded
# relative to that package name. In the top of this package should be
# directories named after for each script/message bundle, and each directory
# should contain JSON files called <lang>.json
_messages_package_name = 'scripts.i18n'
# Flag to indicate whether translation messages are available
_messages_available = None
# Cache of translated messages
_cache = defaultdict(dict)
def set_messages_package(package_name):
"""Set the package name where i18n messages are located."""
global _messages_package_name
global _messages_available
_messages_package_name = package_name
_messages_available = None
def messages_available():
"""
Return False if there are no i18n messages available.
To determine if messages are available, it looks for the package name
set using L{set_messages_package} for a message bundle called 'pywikibot'
containing messages.
@rtype: bool
"""
global _messages_available
if _messages_available is not None:
return _messages_available
try:
__import__(_messages_package_name)
except ImportError:
_messages_available = False
return False
_messages_available = True
return True
def _altlang(code):
"""Define fallback languages for particular languages.
If no translation is available to a specified language, translate() will
try each of the specified fallback languages, in order, until it finds
one with a translation, with 'en' and '_default' as a last resort.
For example, if for language 'xx', you want the preference of languages
to be: xx > fr > ru > en, you let this method return ['fr', 'ru'].
This code is used by other translating methods below.
@param code: The language code
@type code: string
@return: language codes
@rtype: list of str
"""
# Akan
if code in ['ak', 'tw']:
return ['ak', 'tw']
# Amharic
if code in ['aa', 'ti']:
return ['am']
# Arab
if code in ['arc', 'arz', 'so']:
return ['ar']
if code == 'kab':
return ['ar', 'fr']
# Bulgarian
if code in ['cu', 'mk']:
return ['bg', 'sr', 'sh']
# Czech
if code in ['cs', 'sk']:
return ['cs', 'sk']
# German
if code in ['bar', 'frr',
|
'ksh', 'pdc', 'pfl']:
return ['de']
if code == 'lb':
return ['de', 'fr']
if code in ['als', 'gsw']:
return ['als', 'gsw', 'de']
if code == 'nds':
return ['nds-nl', 'de']
if code in ['dsb', 'hsb']:
return ['hsb', 'dsb', 'de']
if code == 'sli':
return ['de', 'pl']
if code =
|
= 'rm':
return ['de', 'it']
if code == 'stq':
return ['nds', 'de']
# Greek
if code in ['grc', 'pnt']:
return ['el']
# Esperanto
if code in ['io', 'nov']:
return ['eo']
# Spanish
if code in ['an', 'arn', 'ast', 'ay', 'ca', 'ext', 'lad', 'nah', 'nv', 'qu',
'yua']:
return ['es']
if code in ['gl', 'gn']:
return ['es', 'pt']
if code == 'eu':
return ['es', 'fr']
if code == 'cbk-zam':
return ['es', 'tl']
# Estonian
if code in ['fiu-vro', 'vro']:
return ['fiu-vro', 'vro', 'et']
if code == 'liv':
return ['et', 'lv']
# Persian (Farsi)
if code == 'ps':
return ['fa']
if code in ['glk', 'mzn']:
return ['glk', 'mzn', 'fa', 'ar']
# Finnish
if code == 'vep':
return ['fi', 'ru']
if code == 'fit':
return ['fi', 'sv']
# French
if code in ['bm', 'br', 'ht', 'kg', 'ln', 'mg', 'nrm', 'pcd',
'rw', 'sg', 'ty', 'wa']:
return ['fr']
if code == 'oc':
return ['fr', 'ca', 'es']
if code in ['co', 'frp']:
return ['fr', 'it']
# Hindi
if code in ['sa']:
return ['hi']
if code in ['ne', 'new']:
return ['ne', 'new', 'hi']
if code in ['bh', 'bho']:
return ['bh', 'bho']
# Indonesian and Malay
if code in ['ace', 'bug', 'bjn', 'id', 'jv', 'ms', 'su']:
return ['id', 'ms', 'jv']
if code == 'map-bms':
return ['jv', 'id', 'ms']
# Inuit languages
if code in ['ik', 'iu']:
return ['iu', 'kl']
if code == 'kl':
return ['da', 'iu', 'no', 'nb']
# Italian
if code in ['eml', 'fur', 'lij', 'lmo', 'nap', 'pms', 'roa-tara', 'sc',
'scn', 'vec']:
return ['it']
# Lithuanian
if code in ['bat-smg', 'sgs']:
return ['bat-smg', 'sgs', 'lt']
# Latvian
if code == 'ltg':
return ['lv']
# Dutch
if code in ['af', 'fy', 'li', 'pap', 'srn', 'vls', 'zea']:
return ['nl']
if code == ['nds-nl']:
return ['nds', 'nl']
# Polish
if code in ['csb', 'szl']:
return ['pl']
# Portuguese
if code in ['fab', 'mwl', 'tet']:
return ['pt']
# Romanian
if code in ['roa-rup', 'rup']:
return ['roa-rup', 'rup', 'ro']
if code == 'mo':
return ['ro']
# Russian and Belarusian
if code in ['ab', 'av', 'ba', 'bxr', 'ce', 'cv', 'inh', 'kk', 'koi', 'krc',
'kv', 'ky', 'lbe', 'lez', 'mdf', 'mhr', 'mn', 'mrj', 'myv',
'os', 'sah', 'tg', 'udm', 'uk', 'xal']:
return ['ru']
if code in ['kbd', 'ady']:
return ['kbd', 'ady', 'ru']
if code == 'tt':
return ['tt-cyrl', 'ru']
if code in ['be', 'be-x-old', 'be-tarask']:
return ['be', 'be-x-old', 'be-tarask', 'ru']
if code == 'kaa':
return ['uz', 'ru']
# Serbocroatian
if code in ['bs', 'hr', 'sh']:
return ['sh', 'hr', 'bs', 'sr', 'sr-el']
if code == 'sr':
return ['sr-el', 'sh', 'hr', 'bs']
# Tagalog
if code in ['bcl', 'ceb', 'ilo', 'pag', 'pam', 'war']:
return ['tl']
# Turkish and Kurdish
if code in ['diq', 'ku']:
return ['ku', 'ku-latn', 'tr']
if code == 'gag':
return ['tr']
if code == 'ckb':
return ['ku']
# Ukrainian
if code in ['crh', 'crh-latn']:
return ['crh', 'crh-latn', 'uk', 'ru']
if code in ['rue']:
return ['uk', 'ru']
# Chinese
if code in ['zh-classical', 'lzh', 'minnan', 'zh-min-nan', 'nan', 'zh-tw',
'zh', 'zh-hans']:
return ['zh', 'zh-hans', 'zh-tw', 'zh-cn', 'zh-classical', 'lzh']
if code in ['cdo', 'gan', 'hak', 'ii', 'wuu', 'za', 'zh-classical', 'lzh',
'zh-cn', 'zh-yue', 'yue']:
return ['zh', 'zh-hans' 'zh-cn', 'zh-tw', 'zh-classical', 'lzh']
# Scandinavian languages
if code in ['da', 'sv']:
return ['da', 'no', 'nb', 'sv', 'nn']
if code in ['fo', 'is']:
return ['da', 'no', 'nb', 'nn', 'sv']
if code == 'nn':
return ['no', 'nb', 'sv', 'da']
if code in ['no', 'nb']:
return ['no', 'nb', 'da', 'n
|
gchinellato/Self-Balance-Robot
|
nfs-server/modules/Motion/Motor/motor.py
|
Python
|
gpl-3.0
| 3,971 | 0.007555 |
#!/usr/bin/python
"""
*************************************************
* @Project: Self Balance
* @Platform: Raspberry PI 2 B+
* @Description: Motor module
* DC Motor with gearbox/encoder
* Motor driver VNH2SP30
* @Owner: Guilherme Chinellato
* @Email: guilhermechinellato@gmail.com
*************************************************
"""
import RPi.GPIO as GPIO
import time
from Motion.constants import *
from Utils.gpio_mapping import *
from Utils.traces.trace import *
class Motor():
def __init__(self, name, pinPWM, pinCW, pinCCW, debug=0):
self.debug = debug
self.name = name
self.pinPWM = pinPWM
self.pinCW = pinCW
|
self.pinCCW = pinCCW
#Set up BCM GPIO numbe
|
ring
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
#Set GPIO as output
GPIO.setup(pinPWM, GPIO.OUT)
GPIO.setup(pinCW, GPIO.OUT)
GPIO.setup(pinCCW, GPIO.OUT)
GPIO.output(pinCW, False)
GPIO.output(pinCCW, False)
#Set GPIO as PWM output
self._motorPWM = GPIO.PWM(pinPWM, PWM_FREQ)
logging.info(("Motor " + str(name) + " module initialized"))
def start(self):
'''Start PWM (stopped)'''
self._motorPWM.start(0)
if (self.debug & MODULE_MOTION_MOTOR):
logging.debug(("Motor %s started" % (self.name)))
def stop(self):
'''Stop motor (speed to zero), it is not necessary to restart the motor'''
self._motorPWM.ChangeDutyCycle(0)
if (self.debug & MODULE_MOTION_MOTOR):
logging.debug(("Motor %s stopepd" % (self.name)))
def shutdown(self):
'''Disable motor, it is not necessary to restart the motor'''
self._motorPWM.stop()
GPIO.cleanup()
if (self.debug & MODULE_MOTION_MOTOR):
logging.debug(("Motor %s is down" % (self.name)))
def setSpeed(self, direction="", pwm=0):
'''Set motor speed'''
if direction == "CW":
GPIO.output(self.pinCW, True)
GPIO.output(self.pinCCW, False)
elif direction == "CCW":
GPIO.output(self.pinCW, False)
GPIO.output(self.pinCCW, True)
else:
GPIO.output(self.pinCW, False)
GPIO.output(self.pinCCW, False)
self._motorPWM.ChangeDutyCycle(pwm)
if (self.debug & MODULE_MOTION_MOTOR):
logging.debug(("Motor %s: Direction %s and Speed %d" % (self.name, direction, pwm)))
def TestMotor():
try:
setVerbosity("debug")
motorA = Motor("Left", MA_PWM_GPIO, MA_CLOCKWISE_GPIO, MA_ANTICLOCKWISE_GPIO, MODULE_MOTION_MOTOR)
motorB = Motor("Right", MB_PWM_GPIO, MB_CLOCKWISE_GPIO, MB_ANTICLOCKWISE_GPIO, MODULE_MOTION_MOTOR)
LP = 0.1
print "Start motor"
motorA.start()
motorB.start()
while True:
v = float((input("Inser PWM duty cycle: ")))
motorA.setSpeed(direction="CCW", pwm=v)
motorB.setSpeed(direction="CCW", pwm=v)
#motorA.setSpeed(direction="CW", pwm=10)
#motorB.setSpeed(direction="CW", pwm=10)
#time.sleep(1000)
'''for i in range(100):
print "Set speed CW: " + str(i)
motorA.setSpeed(direction="CW", pwm=i)
motorB.setSpeed(direction="CW", pwm=i)
time.sleep(LP)
for i in range(100):
print "Set speed CCW: " + str(i)
motorA.setSpeed(direction="CCW", pwm=i)
motorB.setSpeed(direction="CCW", pwm=i)
time.sleep(LP) '''
print "Stop motor"
motorA.setSpeed()
motorB.setSpeed()
motorA.stop()
motorB.stop()
except KeyboardInterrupt:
print "Shutdown motor"
motorA.shutdown()
motorB.shutdown()
if __name__ == '__main__':
TestMotor()
|
DIRACGrid/RESTDIRAC
|
RESTSystem/private/RESTApp.py
|
Python
|
gpl-3.0
| 2,840 | 0.044366 |
import ssl
import sys
from tornado import web, httpserver, ioloop, process, autoreload
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from RESTDIRAC.RESTSystem.Base.RESTHandler import RESTHandler
from RESTDIRAC.ConfigurationSystem.Client.Helpers import RESTConf
class RESTApp( object ):
def __init__( self ):
self.__handlers = {}
def _logRequest( self, handler ):
status = handler.get_status()
if status < 400:
logm = gLogger.notice
elif status < 500:
logm = gLogger.warn
else:
logm = gLogger.error
request_time = 1000.0 * handler.request.request_time()
logm( "%d %s %.2fms" % ( status, handler._request_summary(), request_time ) )
def __reloadAppCB( self ):
gLogger.notice( "\n !!!!!! Reloading web app...\n" )
def bootstrap( self ):
gLogger.always( "\n === Bootstrapping REST Server === \n" )
ol = ObjectLoader( [ 'DIRAC', 'RESTDIRAC' ] )
result = ol.getObjects( "RESTSystem.API", parentClass = RESTHandler, recurse = True )
if not result[ 'OK' ]:
return result
self.__handlers = result[ 'Value' ]
if not self.__handlers:
return S_ERROR( "No handlers found" )
self.__routes = [ ( self.__handlers[ k ].getRoute(), self.__handlers[k] ) for k in self.__handlers if self.__handlers[ k ].getRoute() ]
gLogger.info( "Routes found:" )
for t in sorted( self.__routes ):
gLogger.info( " - %s : %s" % ( t[0], t[1].__name__ ) )
balancer = RESTConf.balancer()
kw = dict( debug = RESTConf.debug(), log_function = self._logRequest )
if balancer and RESTConf.numProcesses not in ( 0, 1 ):
pr
|
ocess.fork_processes( RESTConf.numProcesses(), max_restarts = 0 )
kw[ 'debug' ] = False
if kw[ 'debug' ]:
gLogger.always( "Starting in debug mode" )
self.__app = web.Appl
|
ication( self.__routes, **kw )
port = RESTConf.port()
if balancer:
gLogger.notice( "Configuring REST HTTP service for balancer %s on port %s" % ( balancer, port ) )
self.__sslops = False
else:
gLogger.notice( "Configuring REST HTTPS service on port %s" % port )
self.__sslops = dict( certfile = RESTConf.cert(),
keyfile = RESTConf.key(),
cert_reqs = ssl.CERT_OPTIONAL,
ca_certs = RESTConf.generateCAFile() )
self.__httpSrv = httpserver.HTTPServer( self.__app, ssl_options = self.__sslops )
self.__httpSrv.listen( port )
return S_OK()
def run( self ):
port = RESTConf.port()
if self.__sslops:
url = "https://0.0.0.0:%s" % port
else:
url = "http://0.0.0.0:%s" % port
gLogger.always( "Starting REST server on %s" % url )
autoreload.add_reload_hook( self.__reloadAppCB )
ioloop.IOLoop.instance().start()
|
alipsgh/tornado
|
drift_detection/page_hinkley.py
|
Python
|
mit
| 2,052 | 0.001462 |
"""
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
---
*** The Page Hinkley (PH) Method Implementation ***
Paper: Page, Ewan S. "Continuous inspection schemes."
Published in: Biometrika 41.1/2 (1954): 100-115.
URL: http://www.jstor.org/stable/2333009
"""
from dictionary.tornado_dictionary import TornadoDic
from drift_detection.detector import SuperDetector
class PH(SuperDetector):
"""The Page Hinkley (PH) drift detection method class."""
DETECTOR_NAME = TornadoDic.PH
def __init__(self, min_instance=30, delta=0.005, lambda_=50, alpha=1 - 0.0001):
super().__init__()
self.MINIMUM_NUM_INSTANCES = min_instance
self.m_n = 1
self.x_mean = 0.0
sel
|
f.sum = 0.0
self.delta = delta
self.lambda_ = lambda_
self.alpha = alpha
def run(self, pr):
pr = 1 if pr is False else 0
warning_status = False
|
drift_status = False
# 1. UPDATING STATS
self.x_mean = self.x_mean + (pr - self.x_mean) / self.m_n
self.sum = self.alpha * self.sum + (pr - self.x_mean - self.delta)
self.m_n += 1
# 2. UPDATING WARNING AND DRIFT STATUSES
if self.m_n >= self.MINIMUM_NUM_INSTANCES:
if self.sum > self.lambda_:
drift_status = True
return warning_status, drift_status
def reset(self):
super().reset()
self.m_n = 1
self.x_mean = 0.0
self.sum = 0.0
def get_settings(self):
return [str(self.MINIMUM_NUM_INSTANCES) + "." + str(self.delta) + "." +
str(self.lambda_) + "." + str(self.alpha),
"$n_{min}$:" + str(self.MINIMUM_NUM_INSTANCES) + ", " +
"$\delta$:" + str(self.delta).upper() + ", " +
"$\lambda$:" + str(self.lambda_).upper() + ", " +
"$\\alpha$:" + str(self.alpha).upper()]
|
andela/troupon
|
troupon/troupon/settings/development.py
|
Python
|
mit
| 420 | 0.002381 |
"""
Development specific settings for trou
|
pon project.
"""
from .base import *
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'troupon',
'USER': os.getenv('DB_USE
|
R'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
|
mitya57/debian-buildbot
|
buildbot/db/connector.py
|
Python
|
gpl-2.0
| 5,291 | 0.000756 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import textwrap
from buildbot import config
from buildbot.db import buildrequests
from buildbot.db import builds
from buildbot.db import buildsets
from buildbot.db import buildslaves
from buildbot.db import changes
from buildbot.db import enginestrategy
from buildbot.db import model
from buildbot.db import pool
from buildbot.db import schedulers
from buildbot.db import sourcestamps
from buildbot.db import sourcestampsets
from buildbot.db import state
from buildbot.db import users
from twisted.application import internet
from twisted.application import service
from twisted.internet import defer
from twisted.python import log
class DatabaseNotReadyError(Exception):
pass
upgrade_message = textwrap.dedent("""\
The Buildmaster database needs to be upgraded before this version of
buildbot can run. Use the following command-line
buildbot upgrade-master path/to/master
to upgrade the database, and try starting the buildmaster again. You may
want to make a backup of your buildmaster before doing so.
""").strip()
class DBConnector(config.ReconfigurableServiceMixin, service.MultiService):
# The connection between Buildbot and its backend database. This is
# generally accessible as master.db, but is also used during upgrades.
#
# Most of the interesting operations available via the connector are
# implemented in connector components, available as attributes of this
# object, and listed below.
# Period, in seconds, of the cleanup task. This master will perform
# periodic cleanup actions on this schedule.
CLEANUP_PERIOD = 3600
def __init__(self, master, basedir):
service.MultiService.__init__(self)
self.setName('db')
self.master = master
self.basedir = basedir
# not configured yet - we don't build an engine until the first
# reconfig
self.configured_url = None
# set up components
self._engine = None # set up in reconfigService
self.pool = None # set up in reconfigService
self.model = model.Model(self)
self.changes = changes.ChangesConnectorComponent(self)
self.schedulers = schedulers.SchedulersConnectorComponent(self)
self.sourcestamps = sourcestamps.SourceStampsConnectorComponent(self)
self.sourcestampsets = sourcestampsets.SourceStampSetsConnectorComponent(self)
self.buildsets = buildsets.BuildsetsConnectorComponent(self)
self.buildrequests = buildrequests.BuildRequestsConnectorComponent(self)
self.state = state.StateConnectorComponent(self)
self.builds = builds.BuildsConnectorComponent(self)
self.buildslaves = buildslaves.BuildslavesConnectorComponent(self)
self.users = users.UsersConnectorComponent(self)
self.cleanup_timer = internet.TimerService(self.
|
CLEANUP_PERIOD,
|
self._doCleanup)
self.cleanup_timer.setServiceParent(self)
def setup(self, check_version=True, verbose=True):
db_url = self.configured_url = self.master.config.db['db_url']
log.msg("Setting up database with URL %r" % (db_url,))
# set up the engine and pool
self._engine = enginestrategy.create_engine(db_url,
basedir=self.basedir)
self.pool = pool.DBThreadPool(self._engine, verbose=verbose)
# make sure the db is up to date, unless specifically asked not to
if check_version:
d = self.model.is_current()
def check_current(res):
if not res:
for l in upgrade_message.split('\n'):
log.msg(l)
raise DatabaseNotReadyError()
d.addCallback(check_current)
else:
d = defer.succeed(None)
return d
def reconfigService(self, new_config):
# double-check -- the master ensures this in config checks
assert self.configured_url == new_config.db['db_url']
return config.ReconfigurableServiceMixin.reconfigService(self,
new_config)
def _doCleanup(self):
"""
Perform any periodic database cleanup tasks.
@returns: Deferred
"""
# pass on this if we're not configured yet
if not self.configured_url:
return
d = self.changes.pruneChanges(self.master.config.changeHorizon)
d.addErrback(log.err, 'while pruning changes')
return d
|
yuhangwang/ninjag-python
|
test/frontend/build_dep/test_5.py
|
Python
|
mit
| 304 | 0 |
import ninjag
from ninjag.tk.ioTK import read_all
def test():
f_input = "input/in5.yaml"
|
f_answer = "output/out5.ninja"
f_solution = "solution/sol5.ninja"
ninjag.main(f_answer, [f_input])
answer = read_all(f_answer)
solution = read_all(f_solution)
assert answe
|
r == solution
|
gunan/tensorflow
|
tensorflow/python/keras/saving/saved_model/load.py
|
Python
|
apache-2.0
| 40,001 | 0.006875 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras SavedModel deserialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import types
from tensorflow.python.eager import context
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.saving
|
.saved_model import constants
from tensorflow.python.keras.saving.saved_model import json_utils
fro
|
m tensorflow.python.keras.saving.saved_model import utils
from tensorflow.python.keras.saving.saved_model.serialized_attributes import CommonEndpoints
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import load as tf_load
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking.tracking import delete_tracking
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util.lazy_loader import LazyLoader
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
models_lib = LazyLoader("models_lib", globals(),
"tensorflow.python.keras.models")
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
layers_module = LazyLoader(
"layers_module", globals(),
"tensorflow.python.keras.layers")
input_layer = LazyLoader(
"input_layer", globals(),
"tensorflow.python.keras.engine.input_layer")
network_lib = LazyLoader(
"network_lib", globals(),
"tensorflow.python.keras.engine.network")
training_lib = LazyLoader(
"training_lib", globals(),
"tensorflow.python.keras.engine.training")
training_lib_v1 = LazyLoader(
"training_lib_v1", globals(),
"tensorflow.python.keras.engine.training_v1")
metrics = LazyLoader("metrics", globals(),
"tensorflow.python.keras.metrics")
recurrent = LazyLoader(
"recurrent", globals(),
"tensorflow.python.keras.layers.recurrent")
# pylint:enable=g-inconsistent-quotes
PUBLIC_ATTRIBUTES = CommonEndpoints.all_functions.union(
CommonEndpoints.all_checkpointable_objects)
PUBLIC_ATTRIBUTES.add(constants.KERAS_ATTR)
KERAS_OBJECT_IDENTIFIERS = (
'_tf_keras_layer', '_tf_keras_input_layer', '_tf_keras_network',
'_tf_keras_model', '_tf_keras_sequential', '_tf_keras_metric',
'_tf_keras_rnn_layer')
def load(path, compile=True): # pylint: disable=redefined-builtin
"""Loads Keras objects from a SavedModel.
Any Keras layer or model saved to the SavedModel will be loaded back
as Keras objects. Other objects are loaded as regular trackable objects (same
as `tf.saved_model.load`).
Currently, Keras saving/loading only retains the Keras object's weights,
losses, and call function.
The loaded model can be re-compiled, but the original optimizer, compiled loss
functions, and metrics are not retained. This is temporary, and `model.save`
will soon be able to serialize compiled models.
Args:
path: Path to SavedModel.
compile: If true, compile the model after loading it.
Returns:
Object loaded from SavedModel.
"""
# TODO(kathywu): Add saving/loading of optimizer, compiled losses and metrics.
# TODO(kathywu): Add code to load from objects that contain all endpoints
model = tf_load.load_internal(path, loader_cls=KerasObjectLoader)
# pylint: disable=protected-access
if isinstance(model, training_lib.Model) and compile:
# TODO(kathywu): Use compiled objects from SavedModel, instead of
# creating new objects from the training config.
training_config = model._serialized_attributes['metadata'].get(
'training_config', None)
if training_config is not None:
model.compile(**saving_utils.compile_args_from_training_config(
training_config))
else:
logging.warning('No training configuration found in save file, so the '
'model was *not* compiled. Compile it manually.')
# pylint: enable=protected-access
# Force variables and resources to initialize.
if not context.executing_eagerly():
sess = backend.get_session() # Variables are initialized by this call.
sess.run(ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS))
return model
def _is_graph_network(layer):
"""Determines whether the layer is a graph network."""
# pylint: disable=protected-access
if isinstance(layer, RevivedNetwork):
return False
elif isinstance(layer, network_lib.Network):
return (layer._is_graph_network or
isinstance(layer, models_lib.Sequential))
return False
class KerasObjectLoader(tf_load.Loader):
"""Loader that recreates Keras objects (e.g. layers, models).
Layers and models are revived from either the config or SavedModel following
these rules:
1. If object is a graph network (i.e. Sequential or Functional) then it will
be initialized using the structure from the config only after the children
layers have been created. Graph networks must be initialized with inputs
and outputs, so all child layers must be created beforehand.
2. If object's config exists and the class can be found, then revive from
config.
3. Object may have already been created if its parent was revived from config.
In this case, do nothing.
4. If nothing of the above applies, compose the various artifacts from the
SavedModel to create a subclassed layer or model. At this time, custom
metrics are not supported.
"""
def __init__(self, *args, **kwargs):
# Maps node id -> (node, revive setter function)
# Nodes recreated from the config may generate other nodes. This list
# records all nodes that were generated directly/indirectly from the config,
# so that they do not get recreated multiple times.
self._nodes_recreated_from_config = {}
self._all_nodes_recreated_from_config = (
object_identity.ObjectIdentityWeakSet())
# Store all node ids that have already been traversed when tracking nodes
# that were recreated from the config.
self._traversed_nodes_from_config = []
# Maps model id -> (blank model obj, list of child layer or their node ids)
# This tracks all layers in functional and sequential models. These models
# are only reconstructed after all of their child layers have been created.
self.model_layer_dependencies = {}
self._models_to_reconstruct = []
super(KerasObjectLoader, self).__init__(*args, **kwargs)
# Now that the node object has been fully loaded, and the checkpoint has
# been restored, the object no longer needs to track objects added from
# SerializedAttributes. (Note that saving a training checkpoint still
# functions correctly, beca
|
gnu-user/mcsc-6030-project
|
codes/benchmarks/baseline.py
|
Python
|
gpl-3.0
| 1,605 | 0 |
#!/usr/bin/env python2
###############################################################################
#
# Set a baseline for all benchmarks using numpy's serial matrix multiplication
#
# Copyright (C) 2015, Jonathan Gillett
# All rights reserved.
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import numpy as np
from time import time
from docopt import docopt
from helpers import gen_matri
|
x, usage, schema
from schema import SchemaError
if __name__ == '__main__':
args = docopt(usage)
try:
args =
|
schema.validate(args)
except SchemaError as e:
exit(e)
# Generate the dynamic matrices for the test
dim, dtype, mtype = args['DIM'], args['--dtype'], args['--mtype']
A = gen_matrix(dim, dim, dtype, mtype)
B = gen_matrix(dim, dim, dtype, mtype)
# Calculate the execution time for the baseline
start = time()
C = np.dot(A, B)
end = time()
print "%0.3f" % (end-start,)
|
bluszcz/basketpinfo
|
python/process_to_rrdtool.py
|
Python
|
bsd-3-clause
| 812 | 0.011084 |
#!/usr/bin/env python
"""
basketPInfo data procesors (rrdtool output)
Rafal Zawadzki <bluszcz@bluszcz.net>
BSD License (license.txt)
"""
import sys
def exit_failure():
" Nice info on failur
|
e "
print "usage: %s int\n" % sys.argv[0]
print "int should be 2 (humidity) or 3 (temperature)"
sys.
|
exit(-1)
if len(sys.argv)!=2:
exit_failure()
ARG = int(sys.argv[1])
if ARG not in (2, 3):
exit_failure()
FILENAME = "/home/pi/logs/temphum.txt"
HANDLER = open(FILENAME)
def transpose_data(data):
" Parses data "
return [ll.lstrip('\t') for ll in data.strip().split(',')]
for line in (transpose_data(l) for l in HANDLER.xreadlines()):
if len(line)>1:
try:
print 'rrdtool update temp.rrd %s:%s' % (line[0], line[ARG])
except KeyError:
pass
|
vicnet/weboob
|
modules/ing/api/login.py
|
Python
|
lgpl-3.0
| 5,888 | 0.002887 |
# -*- coding: utf-8 -*-
# Copyright(C) 2019 Sylvie Ye
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from PIL import Image, ImageFilter
import random
from weboob.tools.captcha.virtkeyboard import SimpleVirtualKeyboard
from weboob.browser.pages import JsonPage
from weboob.browser.filters.json import Dict
class INGVirtKeyboard(SimpleVirtualKeyboard):
# from parent
tile_margin = 10
convert = 'RGB'
# for children
safe_tile_margin = 10
small_img_size = (15, 14)
alter_img_params = {
'radius': 2,
'percent': 95,
'threshold': 3,
'limit_pixel': 200
}
# for matching_symbols_coords, indexes are cases place like this
# --- --- --- --- ---
# |0| |1| |2| |3| |4|
# --- --- --- --- ---
# --- --- --- --- ---
# |5| |6| |7| |8| |9|
# --- --- --- --- ---
matching_symbols_coords = {
'0': (3, 3, 93, 91),
'1': (99, 3, 189, 91),
'2': (196, 3, 286, 91),
'3': (293, 3, 383, 91),
'4': (390, 3, 480, 91),
'5': (3, 98, 93, 186),
'6': (99, 98, 189, 186),
'7': (196, 98, 286, 186),
'8': (293, 98, 383, 186),
'9': (390, 98, 480, 186),
}
symbols = {
'0': ('7b4989b431e631ec79df5d71aecb1a47','e2522e1f7476ad6430219a73b10799b0', 'f7db285c5c742c3a348e332c0e9f7f3e',),
'1': ('9f1b03aa9a6f9789714c38eb90a43a11', '86bc0e7e1173472928e746db874b38c3',),
'2': ('3a7d1ba32f4326a02f717f71262ba02b', 'afc2a00289ba9e362c4e9333c14a574a',),
'3': ('203bfd122f474eb9c5c278eeda01bed4', 'c1daa556a1eff1fd18817dbef39792f8',),
'4': ('c09b323e5a80a195d9cb0c3000f3d7ec', 'f020eaf7cdffefec065d3b2801ed73e2', '5e194b0aae3b8f02ebbf9cdec5c37239',),
'5': ('1749dc3f2e302cd3562a0558755ab030', 'b64163e3f5f7d83ff1baad8c4d1bc37b',),
'6': ('0888a7dc9085fcf09d56363ac253a54a', 'e269686d10f95678caf995de6834f74b', '8c505dad47cf6029921fca5fb4b0bc8d',),
'7': ('75aaa903b8277b82c458c3540208a009', 'e97b0c0e01d77dd480b8a5f5c138a268',),
'8': ('f5fa36d16f55b72ba988eb87fa1ed753', '118a52a6a480b5db5eabb0ea26196db3',),
'9': ('62f91d10650583cb6146d25bb9ac161d', 'fd81675aa1c26cbf5bb6c9f1bcdbbdf9',),
}
def __init__(self, file, cols, rows, browser):
# use matching_symbols_coords because margins between tiles are not equals
super(INGVirtKeyboard, self).__init__(file=file, cols=cols, rows=rows, matching_symbols_coords=self.matching_symbols_coords, browser=browser)
def process_tiles(self):
for tile in self.tiles:
# format tile object like:
# `tile.original_img`: original tile image size
# `tile.coords`: original tile image coords
# `tile.image`: resized and altered image tile
# `tile.md5`: image tile resized hash
tile.original_img = tile.image
tile.image = tile.image.resize(self.small_img_size, resample=Image.BILINEAR)
# convert to monochrome image
tile.image = tile.image.convert('L')
# See ImageFilter.UnsharpMask from Pillow
tile.image = tile.image.filter(ImageFilter.UnsharpMask(
radius=self.alter_img_params['radius'],
percent=self.alter_img_params['percent'],
threshold=self.alter_img_params['threshold'])
)
tile.i
|
mage = Image.eval(tile.image, lambda px: 0 if px <= self.alter_img_params['limit_pixel'] else 255)
def cut_tiles(self, tile_margin=None):
assert self.tiles, 'There are no tiles to process'
super(INGVirtKeyboard, self).cut_tiles(tile_margin)
# alter tile
self.process_tiles()
def password_tiles_coord(self, password):
password_ti
|
les = []
for digit in password:
for tile in self.tiles:
if tile.md5 in self.symbols[digit]:
password_tiles.append(tile)
break
else:
# Dump file only when the symbol is not found
self.dump_tiles(self.path)
raise Exception("Symbol '%s' not found; all symbol hashes are available in %s"
% (digit, self.path))
formatted_password = []
for tile in password_tiles:
formatted_password.append([
random.uniform(tile.coords[0], tile.coords[2]),
random.uniform(tile.coords[1], tile.coords[3]),
])
return formatted_password
class LoginPage(JsonPage):
@property
def is_logged(self):
return 'firstName' in self.doc
def get_password_coord(self, img, password):
assert 'pinPositions' in self.doc, 'Virtualkeyboard position has failed'
assert 'keyPadUrl' in self.doc, 'Virtualkeyboard image url is missing'
pin_position = Dict('pinPositions')(self.doc)
image = BytesIO(img)
vk = INGVirtKeyboard(image, cols=5, rows=2, browser=self.browser)
password_random_coords = vk.password_tiles_coord(password)
# pin positions (website side) start at 1, our positions start at 0
return [password_random_coords[index-1] for index in pin_position]
|
jeremiahyan/odoo
|
addons/product_matrix/__manifest__.py
|
Python
|
gpl-3.0
| 888 | 0 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': "Product Matrix",
'summary': """
Technical module: Matrix Implementation
""",
'description': """
Please refer to Sale Matrix or Purchase Matrix for the use of this module.
""",
'category': 'Sales/Sales',
'version': '1.0',
'depends': ['account'],
# Account dependency for section_and_note widget.
'data': [
'views/matrix_templates.xml',
],
'demo': [
'data/product_matrix_demo.xml',
],
|
'assets': {
'web.assets_backend': [
'product_matrix/static/src/js/section_and_note_widget.js',
'product_matrix/static/src/scs
|
s/product_matrix.scss',
],
'web.assets_qweb': [
'product_matrix/static/src/xml/**/*',
],
},
'license': 'LGPL-3',
}
|
thast/EOSC513
|
DC/SimultaneousSources/Update_W_each_3it_5s_rademacher/Update_W_each_3it_5s_rademacher.py
|
Python
|
mit
| 8,698 | 0.022189 |
from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
#import pandas as pd
#from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
#from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
#from scipy.interpolate import LinearNDInterpolator, interp1d
#from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
import SimPEG
import scipy.sparse as sp
import os
import glob
#Remove older results
files = glob.glob('./*.npz')
for f in files:
os.remove(f)
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 123,41
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
survey.dobs = survey.dpred(mtrue)
survey.std = 0.05*np.ones_like(survey.dobs)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
print '# of data: ', survey.dobs.shape
class SimultaneousSrc(DC.Src.BaseSrc):
"""
Dipole source
"""
QW = None
Q = None
W = None
def __init__(self, rxList,Q,W, **kwargs):
SimPEG.Survey.BaseSrc.__init__(self, rxList, **kwargs)
def eval(self, prob):
return self.QW
class SimultaneousRx(DC.Rx.BaseRx):
"""
SimultaneousRx receiver
"""
def __init__(self, locs, rxType='phi', **kwargs):
# We may not need this ...
SimPEG.Survey.BaseRx.__init__(self, locs, rxType)
@property
def nD(self):
"""Number of data in the receiver."""
return self.locs.shape[0]
# Not sure why ...
# return int(self.locs[0].size / 2)
def getP(self, mesh, Gloc):
return self.locs
P = []
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
P =
|
rx.getP(mesh,'CC')
#Update W Inversion
nsubSrc = 5
m0 = (-5.)*np.ones(mapping.nP);
miter = m0
n_its = 50
InnerIt =
|
3
dmisfitsub = []
dmisfitall = []
#beta schedule
beta = 1.
betalist = [beta]
coolingFactor = 2.
coolingRate = 3
W = np.random.randint(0, high=2, size=[survey.nSrc,nsubSrc])*2-1
print W
dmisAll = DataMisfit.l2_DataMisfit(survey)
dmisfitall.append(dmisAll.eval(m0)/survey.nD)
print "Starting Model Dmisfit compared to full dataset: ",dmisAll.eval(m0)/survey.nD
print "Check misfit with true model: ",dmisAll.eval(mtrue)/survey.nD
for it in range(n_its):
problem.unpair()
problem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
print '# of data: ', survey_r.dobs.shape
regmesh = mesh;
dmis = DataMisfit.l2_DataMisfit(survey_r)
reg = Regularization.Tikhonov(regmesh)#,mapping = mapping)#,indActive=actind)
reg.mref = m0
opt = Optimization.InexactGaussNewton(maxIter=1,tolX=1e-6)
opt.remember('xc')
invProb = InvProblem.BaseInvProblem(dmis, reg, opt)
#beta = Directives.BetaEstimate_ByEig(beta0= 10.,beta0_ratio=1e0)
reg.alpha_s = 1e-6;
invProb.beta = beta
#betaSched = Directives.BetaSchedule(coolingFactor=5, coolingRate=2)
#sav0 = Directives.SaveEveryIteration()
#sav1 = Directives.SaveModelEveryIteration()
#sav2 = Directives.SaveOutputDictEveryIteration()
inv = Inversion.BaseInversion(invProb)#, directiveList=[sav2])#[beta,betaSched])#sav0,sav1,
msimple = inv.run(miter);
beta = invProb.beta
if np.mod(it+1,coolingRate) ==0:
beta = beta/coolingFactor
betalist.append(beta)
miter = copy.deepcopy(msimple)
dmisfitsub.append(dmis.eval(msimple)/survey_r.nD)
print "Dmisfit compared to sub dataset: ",dmis.eval(msimple)/survey_r.nD
print "Check misfit with true model: ",dmis.eval(mtrue)/survey_r.nD
problem.unpair()
problem.pair(survey)
dmisAll = DataMisfit.l2_DataMisfit(survey)
dmisfitall.append(dmisAll.eval(msimple)/survey.nD)
print "Dmisfit compared to full dataset: ",dmisAll.eval
|
ContinuumBridge/hot_drinks_app
|
hot_drinks.py
|
Python
|
mit
| 13,490 | 0.005263 |
#!/usr/bin/env python
# hot_drinks.py
# Copyright (C) ContinuumBridge Limited, 2014-2015 - All Rights Reserved
# Written by Peter Claydon
#
# Default values:
config = {
"hot_drinks": True,
"name": "A Human Being",
"alert": True,
"ignore_time": 120,
"window": 360,
"threshold": 10,
"daily_report_time": "02:00",
"data_send_delay": 1
}
import sys
import os.path
import time
from cbcommslib import CbApp, CbClient
from cbconfig import *
import requests
import json
from twisted.internet import reactor
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from cbutils import nicetime
#from cbutils import timeCorrect
# Can be removed after all bridges are at a version that supports timeCorrect()
def timeCorrect():
if time.time() < 32000000:
return False
else:
return True
CONFIG_FILE = CB_CONFIG_DIR + "hot_drinks.config"
STATE_FILE = CB_CONFIG_DIR + "hot_drinks.state"
CID = "CID164" # Client ID
class HotDrinks():
def __init__(self):
self.bridge_id = "unconfigured"
self.kettleOn = False
self.kettleOffTime = 0
self.s = []
self.waiting = False
self.triggered = False
self.power = None
self.binary = []
self.sensorOnTimes = {}
self.counts = {
"drinksInDay": 0,
"kettlesInDay": 0
}
def initIDs(self, bridge_id, idToName):
self.idToName = idToName
self.bridge_id = bridge_id
self.startMonitor()
def addSensor(self, characteristic, sensorID):
if characteristic == "power":
self.power = sensorID
elif characteristic == "binary":
self.binary.append(sensorID)
self.sensorOnTimes[sensorID] = 0
self.cbLog("debug", "addSensor, sensorOnTimes: " + str(self.sensorOnTimes))
def monitor(self):
try:
values = {
"name": self.bridge_id + "/hot_drinks_in_day",
"points": [[int(now*1000), self.counts["drinksInDay"]]]
}
self.storeValues(values)
values = {
"name": self.bridge_id + "/kettles_in_day",
"points": [[int(now*1000), self.counts["kettlesInDay"]]]
}
self.storeValues(values)
self.counts["drinksInDay"] = 0
self.counts["kettlesInDay"] = 0
self.startMonitor()
except Exception as ex:
self.cbLog("warning", "monitor failed. Exception. Type: " + str(type(ex)) + "exception: " + str(ex.args))
def startMonitor(self):
try:
if not timeCorrect():
reactor.callLater(60, self.startMonitor)
now = time.strftime("%Y %b %d %H:%M", time.localtime()).split()
now[3] = config["daily_report_time"]
midnight_e = time.mktime(time.strptime(" ".join(now), "%Y %b %d %H:%M")) + 86400
wait = midnight_e - time.time() + 60
self.cbLog("debug", "monitor set for " + str(int(wait)) + " seconds")
reactor.callLater(wait, self.monitor)
except Exception as ex:
self.cbLog("warning", "startMonitor failed. Exception. Type: " + str(type(ex)) + "exception: " + str(ex.args))
def loadMonitor(self):
try:
if os.path.isfile(STATE_FILE):
with open(STATE_FILE, 'r') as f:
self.counts = json.load(f)
self.cbLog("debug", "Loaded saved counts: " + str(self.counts))
except Exception as ex:
self.cbLog("warning", "Problem loading stored counts. Exception. Type: " + str(type(ex)) + "exception: " + str(ex.args))
finally:
try:
os.remove(STATE_FILE)
except Exception as ex:
self.cbLog("debug", "Cannot remove stored counts file. Exception. Type: " + str(type(ex)) + "exception: " + str(ex.args))
def saveMonitor(self):
try:
with open(STATE_FILE, 'w') as f:
json.dump(self.counts, f)
self.cbLog("info", "Saved counts")
except Exception as ex:
self.cbLog("warning", "Problem saving counts. Type: " + str(type(ex)) + "exception: " + str(ex.args))
def onChange(self, sensor, timeStamp, value):
try:
#self.cbLog("debug", "onChange. sensor: " + self.idToName[sensor] + ", value: " + str(value) + ", time: " + nicetime(timeStamp) + ", kettleOn: " + str(self.kettleOn))
if not timeCorrect():
self.cbLog("info", "Data not processed as time is not correct")
return
if sensor == self.power:
if value > config["threshold"] and not self.kettleOn:
if timeStamp - self.kettleOffTime > config["ignore_time"]:
self.sensorOnTimes[sensor] = timeStamp
self.kettleOn = True
self.cbLog("debug", "kettle on")
values = {
"name": self.bridge_id + "/kettle",
"points": [[int(timeStamp*1000), 1]]
}
self.storeValues(values)
self.counts["kettlesInDay"] += 1
self.cbLog("debug", "kettlesInDay: " + str(self.counts["kettlesInDay"]))
elif value < config["threshold"] and self.kettleOn:
self.kettleOn = False
self.triggered = False
self.kettleOffTime = timeStamp
self.cbLog("debug", "kettle off")
elif sensor in self.binary and value == "on":
self.sensorOnTimes[sensor] = timeStamp
now = time.time()
trigger = True
#self.cbLog("debug", "onChange, sensorOnTimes: " + str(self.sensorOnTimes))
for t in self.sensorOnTimes:
if now - self.sensorOnTimes[t] > config["window"]:
trigger = False
if trigger and not self.triggered:
self.cbLog("debug", "triggered")
self.triggered = True
self.counts["drinksInDay"] += 1
self.cbLog("debug", "drinksInDay: " + str(self.counts["drinksInDay"]))
if config["alert"]:
msg = {"m": "alert",
"a": "Hot drinks being made by " + config["name"] + " at " + nicetime(now),
"t": now
}
self.client.send(msg)
|
self.cbLog("debug", "msg send to client: " + str(json.dumps(msg, indent=4)))
values = {
"name": self.bridge_id + "/hot_drinks",
"points": [[int(now*1000), 1]]
}
|
self.storeValues(values)
except Exception as ex:
self.cbLog("warning", "HotDrinks onChange encountered problems. Exception: " + str(type(ex)) + str(ex.args))
def sendValues(self):
msg = {"m": "data",
"d": self.s
}
self.cbLog("debug", "sendValues. Sending: " + str(json.dumps(msg, indent=4)))
self.client.send(msg)
self.s = []
self.waiting = False
def storeValues(self, values):
self.s.append(values)
if not self.waiting:
self.waiting = True
reactor.callLater(config["data_send_delay"], self.sendValues)
class App(CbApp):
def __init__(self, argv):
self.appClass = "monitor"
self.state = "stopped"
self.status = "ok"
self.devices = []
self.devServices = []
self.idToName = {}
self.hotDrinks = HotDrinks()
#CbApp.__init__ MUST be called
CbApp.__init__(self, argv)
def setState(self, action):
if action == "clear_error":
self.state = "running"
else:
self.state = action
msg = {"id": self.id,
"status": "state",
"stat
|
SMAC/corelib
|
smac/amqp/protocol.py
|
Python
|
gpl-3.0
| 13,170 | 0.008276 |
# Copyright (C) 2005-2010 MISG/ICTI/EIA-FR
# See LICENSE for details.
"""
Factories for AMQ clients, Thrift clients and SMAC Clients and servers.
@author: Jonathan Stoppani <jonathan.stoppani@edu.hefr.ch>
"""
import weakref
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet import defer, error
from txamqp.protocol import AMQClient
from txamqp.contrib.thrift.client import ThriftTwistedDelegate
from txamqp.queue import TimeoutDeferredQueue, Closed
from txamqp.contrib.thrift.transport import TwistedAMQPTransport
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from smac.python import log
from smac.amqp.models import Exchange, Queue, IAddress
from smac.conf import topology
from smac.modules import utils
class SMACServerFactory(object):
iprot_factory = TBinaryProtocol.TBinaryProtocolFactory()
oprot_factory = TBinaryProtocol.TBinaryProtocolFactory()
def __init__(self, client, channel=None):
self.client = client
self.channel = channel or 1
if client.check_0_8():
self.reply_to = "reply to"
else:
self.reply_to = "reply-to"
@defer.inlineCallbacks
def build_server(self, delegate, processor, handler, address, queues=None, standalone=True):
processor_name = processor.__name__
log.debug("Creating new server for {0} with ID {1}".format(
processor_name, address.instance))
address = IAddress(address)
if not queues:
queues = topology.queues
if isinstance(self.channel, int):
channel = yield self.client.channel(self.channel)
yield channel.channel_open()
else:
# Assume it's already open!
channel = self.channel
deferreds = []
# Declare all exchanges
exchanges = {}
for k, e in topology.exchanges.iteritems():
e = Exchange(channel, **e)
e.format_name(**dict(address))
e.declare()
exchanges[k] = e
self.responses = Exchange(channel, **topology.exchanges['responses'])
# Declare all queues
qs = []
for q in queues:
q = q.copy()
bindings = q.pop('bindings')
q = Queue(channel, **q)
q.format_name(**dict(address))
q.declare()
deferreds += [q.bind(exchanges[e], k.format(**dict(address))) for e, k in bindings]
qs.append(q)
# Wait for declarations and bindings
yield defer.DeferredList(deferreds)
log.debug("All queues and needed exchanges declared and bound, start listening")
tags = []
for queue in qs:
tag = yield queue.consume()
tags.append(tag)
@defer.inlineCallbacks
def destroy(ref):
log.debug("Server for {0} garbage collected, removing " \
"subscriptions".format(processor_name))
try:
yield defer.DeferredList([channel.basic_cancel(t) for t in tags])
except Exception as e:
pass
if not standalone:
handler = weakref.proxy(handler, destroy)
processor = processor.Processor(handler)
for tag in tags:
queue = yield self.client.queue(tag)
self.get_next_message(channel, queue, processor, delegate)
def parse_message(self, msg, channel, queue, processor, delegate):
tag = msg.delivery_tag
try:
sender = msg.content[self.reply_to]
except KeyError:
sender = None
transport_in = TTransport.TMemoryBuffer(msg.content.body)
transport_out = TwistedAMQPTransport(channel, str(self.responses), sender)
iprot = self.iprot_factory.getProtocol(transport_in)
oprot = self.oprot_factory.getProtocol(transport_out)
d = processor.process(iprot, oprot)
d.addErrback(delegate.processing_error)
channel.basic_ack(tag, True)
self.get_next_message(channel, queue, processor, delegate)
def get_next_message(self, channel, queue, processor, delegate):
d = queue.get()
d.addCallback(self.parse_message, channel, queue, processor, delegate)
d.addErrback(self.catch_closed_queue, delegate)
d.addErrback(delegate.queue_error)
def catch_closed_queue(self, failure, delegate):
failure.trap(Closed)
delegate.queue_closed(failure)
class SMACClientFactory(object):
iprot_factory = TBinaryProtocol.TBinaryProtocolFactory()
oprot_factory = TBinaryProtocol.TBinaryProtocolFactory()
def __init__(self, client, channel=None):
self.client = client
self.client_lock = defer.DeferredLock()
self.clients = {}
if client.check_0_8():
self.reply_to = "reply to"
else:
self.reply_to = "reply-to"
self.channel = channel or 1
@defer.inlineCallbacks
def build_client(self, address, service=None, distribution=None, cache=True):
yield self.client_lock.acquire()
try:
address = IAddress(address)
if not service:
service = utils.get_module_from_address(address)
service_name = service.__name__ + address.routing_key
distribution = distribution or address.distribution
if not distribution:
raise ValueError("The distribution mode was not defined and " \
"could not be inferred from the address.")
key = (service, address.routing_key, distribution)
try:
client = self.clients[key]
except KeyError:
log.debug("Creating new client for {0} with routing key {1} and distribution {2}".format(
service.__name__, address.routing_key, distribution))
if isinstance(self.channel, int):
channel = yield self.client.channel(self.channel)
yield channel.channel_open()
else:
# Assume it's already open!
channel = self.channel
|
response_exchange = Exchange(channel, **topology.exchanges['responses'])
response_queue = Queue(cha
|
nnel, exclusive=True, auto_delete=True)
yield response_queue.declare()
yield response_queue.bind(response_exchange)
consumer_tag = yield response_queue.consume()
service_exchange = Exchange(channel, **topology.exchanges[distribution])
service_exchange.format_name(**dict(address))
yield service_exchange.declare()
amqp_transport = TwistedAMQPTransport(channel, str(service_exchange),
address.routing_key, service_name,
str(response_queue), self.reply_to)
client = service.Client(amqp_transport, self.oprot_factory)
client.address = address
client.factory = self
if cache:
weak_client = client
self.clients[key] = client
else:
@defer.inlineCallbacks
def destroy(ref):
log.debug("Client for {0} garbage collected, removing " \
"subscriptions".format(service_name))
try:
yield channel.basic_cancel(consumer_tag)
except Exception as e:
pass
weak_client = weakref.proxy(client, destroy)
|
CWVanderReyden/originalMyHomeNet
|
recipeBox/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 353 | 0.005666 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from sou
|
th.db import db
from south.v2 impor
|
t SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['recipeBox']
|
trondeau/gnuradio-old
|
gr-blocks/python/blocks/qa_stream_mux.py
|
Python
|
gpl-3.0
| 6,241 | 0.005929 |
#!/usr/bin/env python
#
# Copyright 2004,2005,2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import os
class test_stream_mux (gr_unittest.TestCase):
def setUp (self):
os.environ['GR_CONF_CONTROLPORT_ON'] = 'False'
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def help_stream_2ff(self, N, stream_sizes):
v0 = blocks.vector_source_f(N*[1,], False)
v1 = blocks.vector_source_f(N*[2,], False)
mux = blocks.stream_mux(gr.sizeof_float, stream_sizes)
dst = blocks.vector_sink_f ()
self.tb.connect (v0, (mux,0))
self.tb.connect (v1, (mux,1))
self.tb.connect (mux, dst)
self.tb.run ()
return dst.data ()
def help_stream_ramp_2ff(self, N, stream_sizes):
r1 = range(N)
r2 = range(N)
r2.reverse()
v0 = blocks.vector_source_f(r1, False)
v1 = blocks.vector_source_f(r2, False)
mux = blocks.stream_mux(gr.sizeof_float, stream_sizes)
dst = blocks.vector_sink_f ()
self.tb.connect (v0, (mux,0))
self.tb.connect (v1, (mux,1))
self.tb.connect (mux, dst)
self.tb.run ()
return dst.data ()
def test_stream_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0)
self.assertEqual (exp_data, result_data)
def test_stream_ramp_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_ramp_2ff(N, stream_sizes)
exp_data = ( 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
39.0, 38.0, 37.0, 36.0, 35.0, 34.0, 33.0, 32.0, 31.0, 30.0,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
19.0, 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0)
self.assertEqual (exp_data, result_data)
def test_stream_2NM_ff(self):
N = 40
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0)
self.assertEqual (exp_data, result_data)
def test_stream_2MN_ff(self):
N = 37
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0)
self.assertEqual (exp_data, result_data)
def test_stream_2N0_ff(self):
N = 30
stream_sizes = [7, 0]
|
self.help_stream_2ff(N, strea
|
m_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0)
self.assertEqual (exp_data, result_data)
def test_stream_20N_ff(self):
N = 30
stream_sizes = [0, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0)
self.assertEqual (exp_data, result_data)
if __name__ == '__main__':
gr_unittest.run(test_stream_mux, "test_stream_mux.xml")
|
Akrog/gcs-client
|
gcs_client/common.py
|
Python
|
apache-2.0
| 7,705 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from functools import wraps
import math
import random
import time
from gcs_client import errors as errors
def is_complete(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
attributes = getattr(self, '_required_attributes') or []
for attribute in attributes:
if not getattr(self, attribute, None):
raise Exception('%(func_name)s needs %(attr)s to be set.' %
{'func_name': f.__name__, 'attr': attribute})
return f(self, *args, **kwargs)
return wrapped
# Generate default codes to retry from transient HTTP errors
DEFAULT_RETRY_CODES = tuple(
code for code, (cls_name, cls) in errors.http_errors.items()
if cls is errors.Transient)
class RetryParams(object):
"""Truncated Exponential Backoff configuration class.
This configuration is used to provide truncated exponential backoff retries
for communications.
The algorithm requires 4 arguments: max retries, initial delay, max backoff
wait time and backoff factor.
As long as we have pending retries we will wait
(backoff_factor ^ n-1) * initial delay
Where n is the number of retry.
As long as this wait is not greater than max backoff wait time, if it is
max backoff time wait will be used.
We'll add a random wait time to this delay to help avoid cases where many
clients get synchronized by some situation and all retry at once, sending
requests in synchronized waves.
For example with default values of max_retries=5, initial_delay=1,
max_backoff=32 and backoff_factor=2
- 1st failure: 1 second + random delay [ (2^(1-1)) * 1 ]
- 2nd failure: 2 seconds + random delay [ (2^(2-1)) * 1 ]
- 3rd failure: 4 seconds + random delay [ (2^(3-1)) * 1 ]
- 4th failure: 8 seconds + random delay [ (2^(4-1)) * 1 ]
- 5th failure: 16 seconds + random delay [ (2^(5-1)) * 1 ]
- 6th failure: Fail operation
"""
def __init__(self, max_retries=5, initial_delay=1, max_backoff=32,
backoff_factor=2, randomize=True):
"""Initialize retry configuration.
:param max_retries: Maximum number of retries before giving up.
:type max_retries: int
:param initial_delay: Seconds to wait for the first retry.
:type initial_delay: int or float
:param max_backoff: Maximum number of seconds to wait between retries.
:type max_backoff: int or float
:param backoff_factor: Base to use for the power used to calculate the
delay for the backoff.
:type backoff_factor: int or float
:param randomize: Whether to use randomization of the delay time to
a
|
void synchronized waves.
:type randomize: bool
"""
self.max_retries = max_retries
self.initial_delay = initial_delay
self.max_backoff = max_backoff
self.backoff_factor = backoff_factor
self.randomize = randomize
@classmethod
def get_default(cls):
"""Return default configuration (simpleton patern)."""
|
if not hasattr(cls, 'default'):
cls.default = cls()
return cls.default
@classmethod
def set_default(cls, *args, **kwargs):
"""Set default retry configuration.
Methods acepts a RetryParams instance or the same arguments as the
__init__ method.
"""
default = cls.get_default()
# For RetryParams argument copy dictionary to default instance so all
# references to the default configuration will have new values.
if len(args) == 1 and isinstance(args[0], RetryParams):
default.__dict__.update(args[0].__dict__)
# For individual arguments call __init__ method on default instance
else:
default.__init__(*args, **kwargs)
def retry(param='_retry_params', error_codes=DEFAULT_RETRY_CODES):
"""Truncated Exponential Backoff decorator.
There are multiple ways to use this decorator:
@retry
def my_func(self):
In this case we will try to use `self._retry_params` and if that's not
available we'll use default retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry('_retry_cfg')
def my_func(self):
In this case we will try to use `self._retry_cfg` and if that's
not available we'll use default retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry(RetryParams(5, 1, 32, 2, False))
def my_func(self):
In this case we will use a specific retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry('_retry_cfg', [408, 504])
def my_func(self):
In this case we will try to use `self._retry_cfg` and if that's
not available we'll use default retry configuration and retry only on
timeout status codes.
@retry(RetryParams(5, 1, 32, 2, False), [408, 504])
def my_func(self):
In this case we will use a specific retry configuration and retry only
on timeout status codes.
@retry(error_codes=[408, 504])
def my_func(self):
In this case we will try to use `self._retry_params` and if that's not
available we'll use default retry configuration and retry only on
timeout status codes.
If we pass None as the retry parameter or the value of the attribute on the
instance is None we will not do any retries.
"""
def _retry(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
# If retry configuration is none or a RetryParams instance, use it
if isinstance(param, (type(None), RetryParams)):
retry_params = param
# If it's an attribute name try to retrieve it
else:
retry_params = getattr(self, param, RetryParams.get_default())
delay = 0
random_delay = 0
n = 0 # Retry number
while True:
try:
result = f(self, *args, **kwargs)
return result
except errors.Http as exc:
if (not retry_params or n >= retry_params.max_retries or
exc.code not in error_codes):
raise exc
n += 1
# If we haven't reached maximum backoff yet calculate new delay
if delay < retry_params.max_backoff:
backoff = (math.pow(retry_params.backoff_factor, n-1) *
retry_params.initial_delay)
delay = min(retry_params.max_backoff, backoff)
if retry_params.randomize:
random_delay = random.random() * retry_params.initial_delay
time.sleep(delay + random_delay)
return wrapped
# If no argument has been used
if callable(param):
f, param = param, '_retry_params'
return _retry(f)
return _retry
|
Wireless-Innovation-Forum/Spectrum-Access-System
|
src/harness/reference_models/pre_iap_filtering/inter_sas_duplicate_grant.py
|
Python
|
apache-2.0
| 2,583 | 0.00813 |
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inter-SAS Duplicate Grants removal.
Thi
|
s is a subset of the pre-IAP reference model which implements inter-SAS
duplicate CBSD removal. If a CBSD has registered with multiple SASs then the
CBSD is removed from the FAD objects of the respective SASs.
"""
from __future__ import absolute_imp
|
ort
from __future__ import division
from __future__ import print_function
import logging
from collections import defaultdict
def interSasDuplicateGrantPurgeReferenceModel(sas_uut_fad, sas_test_harness_fads):
""" Removes CBSDs with grants from more than one SAS from FAD objects.
Checks if a CBSD is registered with more than one SAS and removes the CBSD from
all the FAD objects of all SASs (SAS UUT and SAS Test Harnesses).
Args:
sas_uut_fad: A |FullActivityDump| object containing the FAD records of SAS UUT.
sas_test_harness_fads: A list of |FullActivityDump| objects containing the FAD records
from SAS test harnesses.
"""
# Get all the CBSD Reference ID of all CBSDs from UUT and SAS test Harness FAD objects
cbsd_id_counts = defaultdict(int)
for cbsd in sas_uut_fad.getCbsdRecords():
cbsd_id_counts[cbsd['id']] += 1
for fad in sas_test_harness_fads:
for cbsd in fad.getCbsdRecords():
cbsd_id_counts[cbsd['id']] += 1
# Iterate through the UUT CBSD list and keep only the non duplicate CBSDs
cbsds_to_keep = []
for cbsd in sas_uut_fad.getCbsdRecords():
if cbsd_id_counts[cbsd['id']] == 1:
cbsds_to_keep.append(cbsd)
logging.info('CBSDs to keep in SAS UUT: %s', cbsds_to_keep)
sas_uut_fad.setCbsdRecords(cbsds_to_keep)
# Iterate through the test harness CBSD list and keep only the non duplicate CBSDs
for fad in sas_test_harness_fads:
cbsds_to_keep = []
for cbsd in fad.getCbsdRecords():
if cbsd_id_counts[cbsd['id']] == 1:
cbsds_to_keep.append(cbsd)
logging.info('CBSDs to keep in SAS TH: %s', cbsds_to_keep)
fad.setCbsdRecords(cbsds_to_keep)
|
lixxu/sanic
|
sanic/request.py
|
Python
|
mit
| 11,420 | 0.000088 |
import asyncio
import email.utils
import json
import sys
from cgi import parse_header
from collections import namedtuple
from http.cookies import SimpleCookie
from urllib.parse import parse_qs, unquote, urlunparse
from httptools import parse_url
from sanic.exceptions import InvalidUsage
from sanic.log import error_logger, logger
try:
from ujson import loads as json_loads
except ImportError:
if sys.version_info[:2] == (3, 5):
def json_loads(data):
# on Python 3.5 json.loads only supports str not bytes
return json.loads(data.decode())
else:
json_loads = json.loads
DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
# > If the media type remains unknown, the recipient SHOULD treat it
# > as type "application/octet-stream"
class RequestParameters(dict):
"""Hosts a dict with lists as values where get returns the first
value of the list and getlist returns the whole shebang
"""
def get(self, name, default=None):
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0]
def getlist(self, name, default=None):
"""Return the entire list"""
return super().get(name, default)
class StreamBuffer:
def __init__(self, buffer_size=100):
self._queue = asyncio.Queue(buffer_size)
async def read(self):
""" Stop reading when gets None """
payload = await self._queue.get()
self._queue.task_done()
return payload
async def put(self, payload):
await self._queue.put(payload)
def is_full(self):
return self._queue.full()
class Request(dict):
"""Properties of an HTTP request such as URL, headers, etc."""
__slots__ = (
"__weakref__",
"_cookies",
"_ip",
"_parsed_url",
"_port",
"_remote_addr",
"_socket",
"app",
"body",
"endpoint",
"headers",
"method",
"parsed_args",
"parsed_files",
"parsed_form",
"parsed_json",
"raw_url",
"stream",
"transport",
"uri_template",
"version",
)
def __init__(self, url_bytes, headers, version, method, transport):
self.raw_url = url_bytes
# TODO: Content-Encoding detection
self._parsed_url = parse_url(url_bytes)
self.app = None
self.headers = headers
self.version = version
self.method = method
self.transport = transport
# Init but do not inhale
self.body_init()
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_args = None
self.uri_template = None
self._cookies = None
self.stream = None
self.endpoint = None
def __repr__(self):
return "<{0}: {1} {2}>".format(
self.__class__.__name__, self.method, self.path
)
def __bool__(self):
if self.transport:
return True
return False
def body_init(self):
self.body = []
def body_push(self, data):
self.body.append(data)
def body_finish(self):
self.body = b"".join(self.body)
@property
def json(self):
if self.parsed_json is None:
self.load_json()
return self.parsed_json
def load_json(self, loads=json_loads):
try:
self.parsed_json = loads(self.body)
except Exception:
|
if not
|
self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def token(self):
"""Attempt to return the auth header token.
:return: token related to request
"""
prefixes = ("Bearer", "Token")
auth_header = self.headers.get("Authorization")
if auth_header is not None:
for prefix in prefixes:
if prefix in auth_header:
return auth_header.partition(prefix)[-1].strip()
return auth_header
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.get(
"Content-Type", DEFAULT_HTTP_CONTENT_TYPE
)
content_type, parameters = parse_header(content_type)
try:
if content_type == "application/x-www-form-urlencoded":
self.parsed_form = RequestParameters(
parse_qs(self.body.decode("utf-8"))
)
elif content_type == "multipart/form-data":
# TODO: Stream this instead of reading to/from memory
boundary = parameters["boundary"].encode("utf-8")
self.parsed_form, self.parsed_files = parse_multipart_form(
self.body, boundary
)
except Exception:
error_logger.exception("Failed when parsing form")
return self.parsed_form
@property
def files(self):
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
@property
def args(self):
if self.parsed_args is None:
if self.query_string:
self.parsed_args = RequestParameters(
parse_qs(self.query_string)
)
else:
self.parsed_args = RequestParameters()
return self.parsed_args
@property
def raw_args(self):
return {k: v[0] for k, v in self.args.items()}
@property
def cookies(self):
if self._cookies is None:
cookie = self.headers.get("Cookie")
if cookie is not None:
cookies = SimpleCookie()
cookies.load(cookie)
self._cookies = {
name: cookie.value for name, cookie in cookies.items()
}
else:
self._cookies = {}
return self._cookies
@property
def ip(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._ip
@property
def port(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._port
@property
def socket(self):
if not hasattr(self, "_socket"):
self._get_address()
return self._socket
def _get_address(self):
self._socket = self.transport.get_extra_info("peername") or (
None,
None,
)
self._ip = self._socket[0]
self._port = self._socket[1]
@property
def remote_addr(self):
"""Attempt to return the original client ip based on X-Forwarded-For.
:return: original client ip.
"""
if not hasattr(self, "_remote_addr"):
forwarded_for = self.headers.get("X-Forwarded-For", "").split(",")
remote_addrs = [
addr
for addr in [addr.strip() for addr in forwarded_for]
if addr
]
if len(remote_addrs) > 0:
self._remote_addr = remote_addrs[0]
else:
self._remote_addr = ""
return self._remote_addr
@property
def scheme(self):
if (
self.app.websocket_enabled
and self.headers.get("upgrade") == "websocket"
):
scheme = "ws"
else:
scheme = "http"
if self.transport.get_extra_info("sslcontext"):
scheme += "s"
return scheme
@property
def host(self):
# it appears that httptools doesn't return the host
# so pull it from the headers
return self.headers.get("Host", "")
@property
def content_type(self):
return self.headers.get("Content-Type", DEFAULT_HTTP_CONTENT_TYPE)
|
srijannnd/Login-and-Register-App-in-Django
|
simplesocial/accounts/tokens.py
|
Python
|
mit
| 375 | 0.008 |
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils import six
class AccountActivationTokenGenerator
|
(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (six.text_type(user.pk) + six.text_type(timestamp)) + six.text_type(user.is_active)
account
|
_activation_token = AccountActivationTokenGenerator()
|
mozilla/pto
|
pto/apps/dates/views.py
|
Python
|
mpl-2.0
| 37,380 | 0.00099 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import traceback
from StringIO import StringIO
import re
import datetime
from urllib import urlencode
from collections import defaultdict
from django import http
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import redirect_to_login
from django.db import transaction
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import redirect, get_object_or_404
from django.contrib import messages
from django.db.models import Q
from django.template import Context, loader
from django.core.mail import get_connection, EmailMessage
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.views.decorators.http import require_POST
from django.contrib.sites.models import RequestSite
from django.core.cache import cache
from django.db.models import Min, Count
import vobject
from .models import Entry, Hours, BlacklistedUser, FollowingUser, UserKey
from pto.apps.users.models import UserProfile, User
from pto.apps.users.utils import ldap_lookup
from .utils import parse_datetime, DatetimeParseError
from .utils.countrytotals import UnrecognizedCountryError, get_country_totals
import utils
import forms
from .decorators import json_view
from .csv_export import UnicodeWriter as CSVUnicodeWriter
def valid_email(value):
try:
validate_email(value)
return True
except ValidationError:
return False
def handler500(request):
data = {}
if settings.TRACEBACKS_ON_500:
err_type, err_value, err_traceback = sys.exc_info()
out = StringIO()
traceback.print_exc(file=out)
traceback_formatted = out.getvalue()
data['err_type'] = err_type
data['err_value'] = err_value
data['err_traceback'] = traceback_formatted
data['_report_traceback'] = True
else:
data['_report_traceback'] = False
return render(request, '500.html', data, status=500)
def home(request): # aka dashboard
data = {}
data['mobile'] = request.MOBILE # thank you django-mobility (see settings)
if data['mobile']:
# unless an explicit cookie it set, redirect to /mobile/
if not request.COOKIES.get('no-mobile', False):
return redirect(reverse('mobile.home'))
# now do what the login_required would usually do
if not request.user.is_authenticated():
path = request.get_full_path()
return redirect_to_login(path)
data['page_title'] = "Dashboard"
profile = request.user.get_profile()
if profile and profile.country in ('GB', 'FR', 'DE'):
first_day = 1 # 1=Monday
else:
first_day = 0 # default to 0=Sunday
data['first_day'] = first_day
if 'all-rightnow' in request.GET:
MAX_RIGHT_NOWS = 9999
else:
MAX_RIGHT_NOWS = 20
## Commented out whilst we decide whether to keep it at all
#right_nows, right_now_users = get_right_nows()
#data['right_nows'] = right_nows
#data['right_now_users'] = right_now_users
#if len(right_now_users) > MAX_RIGHT_NOWS:
# data['right_now_too_many'] = (len(data['right_now_users'])
# - MAX_RIGHT_NOWS)
# data['right_now_users'] = data['right_now_users'][:MAX_RIGHT_NOWS]
#else:
# data['right_now_too_many'] = None
data.update(get_taken_info(request.user))
data['calendar_url'] = _get_user_calendar_url(request)
cache_key = 'recently_created_%s' % request.user.pk
recently_created = cache.get(cache_key)
if recently_created:
data['recently_created'] = recently_created
cache.delete(cache_key)
return render(request, 'dates/home.html', data)
def _get_user_calendar_url(request):
user_key, __ = UserKey.objects.get_or_create(user=request.user)
base_url = '%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
return base_url + reverse('dates.calendar_vcal', args=(user_key.key,))
def get_taken_info(user):
data = {}
profile = user.get_profile()
if profile.country:
data['country'] = profile.country
try:
data['country_totals'] = get_country_totals(profile.country)
except UnrecognizedCountryError:
data['unrecognized_country'] = True
today = datetime.date.today()
start_date = datetime.date(today.year, 1, 1)
last_date = datetime.date(today.year + 1, 1, 1)
from django.db.models import Sum
qs = Entry.objects.filter(
user=user,
start__gte=start_date,
end__lt=last_date
)
agg = qs.aggregate(Sum('total_hours'))
total_hours = agg['total_hours__sum']
if total_hours is None:
total_hours = 0
data['taken'] = _friendly_format_hours(total_hours)
return data
def _friendly_format_hours(total_hours):
days = 1.0 * total_hours / settings.WORK_DAY
hours = total_hours % settings.WORK_DAY
if not total_hours:
return '0 days'
elif total_hours < settings.WORK_DAY:
return '%s hours' % total_hours
elif total_hours == settings.WORK_DAY:
return '1 day'
else:
if not hours:
return '%d days' % days
else:
return '%s days' % days
def get_right_nows():
right_now_users = []
right_nows = defaultdict(list)
_today = datetime.date.today()
for entry in (Entry.objects
.filter(start__lte=_today,
end__gte=_today,
total_hours__gte=0)
.order_by('user__first_name',
'user__last_name',
'user__username')):
if entry.user not in right_now_users:
right_now_users.append(entry.user)
left = (entry.end - _today).days + 1
right_nows[entry.user].append((left, entry))
return right_nows, right_now_users
def get_upcomings(max_days=14):
users = []
upcoming = defaultdict(list)
today = datetime.date.today()
max_future = today + datetime.timedelta(days=max_days)
for entry in (Entry.objects
.filter(start__gt=today,
start__lt=max_future,
total_hours__gte=0)
.order_by('user__first_name',
'user__last_name',
'user__username')):
if entry.user not in users:
users.append(entry.user)
days = (entry.start - today).days + 1
upcoming[entry.user].append((days, entry))
return upcoming, users
def make_entry_title(entry, this_user, include_details=True):
if entry.user != this_user:
if entry.user.first_name:
title = '%s %s - ' % (entry.user.first_name,
entry.user.last_name)
else:
title = '%s - ' % entry.user.username
else:
title = ''
days = 0
f
|
or hour in Hours.objects.filter(entry=entry):
if hour.hours == 8:
days += 1
elif hour.hours == 4:
days += 0.5
if days > 1:
if int(days) == days:
title += '%d days' % days
else:
title += '%s days' % days
if Hours.objects.filter(entry=entry, birthday=True).exists():
title += ' (includes birthday)'
elif (days == 1 and entry.total_hours == 0 and
|
Hours.objects.filter(entry=entry, birthday=True)):
title += 'Birthday!'
elif days == 1 and entry.total_hours == 8:
title += '1 day'
else:
title += '%s hours' % entry.total_hours
if entry.details:
if days == 1:
max_length = 20
else:
max_length = 40
if include_details:
title += ', '
if len(entry.details) > max_length:
title += entry.details[:max_l
|
fopina/django-holidays
|
holidays/tests.py
|
Python
|
mit
| 1,476 | 0 |
from django.test import TestCase
from .utils import is_holiday
from datetime import date, timedelta
class HolidaysTests(TestCase):
longMessage = True
fixtures = ['demo']
def fullYearTest(self, group, year, holidays):
it = date(year, 1, 1)
end = date(year, 12, 31)
delta = timedelta(days=1)
calc_holidays = []
while it <= end:
if is_holiday(group, it):
calc_holidays.append(it)
it += delta
self.assertEquals(calc_holidays, holidays)
def testPortugal2015(self):
self.fullYearTest(
'PT',
2015,
[
date(2015, 1, 1),
date(2015, 4, 3),
date(2015, 4, 5),
date(2015, 4, 25),
date(201
|
5, 5, 1),
date(2015, 6, 10),
date(2015, 8, 15),
date(2015, 12, 8),
date(2015, 12, 25),
],
)
def testPortugalPorto2015(self):
self.fullYearTest(
'PT-PRT',
2015,
|
[
date(2015, 1, 1),
date(2015, 4, 3),
date(2015, 4, 5),
date(2015, 4, 25),
date(2015, 5, 1),
date(2015, 6, 10),
date(2015, 6, 24),
date(2015, 8, 15),
date(2015, 12, 8),
date(2015, 12, 25),
],
)
|
sfcta/synthpop
|
synthpop/synthesizer.py
|
Python
|
bsd-3-clause
| 6,015 | 0.001829 |
import logging
import sys
import traceback
from collections import namedtuple
import numpy as np
import pandas as pd
from scipy.stats import chisquare
from . import categorizer as cat
from . import draw
from .ipf.ipf import calculate_constraints
from .ipu.ipu import household_weights
logger = logging.getLogger("synthpop")
FitQuality = namedtuple(
'FitQuality',
('people_chisq', 'people_p'))
BlockGroupID = namedtuple(
'BlockGroupID', ('state', 'county', 'tract', 'block_group'))
def enable_logging():
handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def synthesize(h_marg, p_marg, h_jd, p_jd, h_pums, p_pums,
marginal_zero_sub=.01, jd_zero_sub=.001, hh_index_start=0):
# this is the zero marginal problem
h_marg = h_marg.replace(0, marginal_zero_sub)
p_marg = p_marg.replace(0, marginal_zero_sub)
# zero cell problem
h_jd.frequency = h_jd.frequency.replace(0, jd_zero_sub)
p_jd.frequency = p_jd.frequency.replace(0, jd_zero_sub)
# ipf for households
logger.info("Running ipf for households")
h_constraint, _ = calculate_constraints(h_marg, h_jd.frequency)
h_constraint.index = h_jd.cat_id
logger.debug("Household constraint")
logger.debug(h_constraint)
logger.debug(h_constraint.sum())
# ipf for persons
logger.info("Running ipf for persons")
p_constraint, _ = calculate_constraints(p_marg, p_jd.frequency)
p_constraint.index = p_jd.cat_id
logger.debug("Person constraint")
logger.debug(p_constraint)
logger.debug(p_constraint.sum())
# make frequency tables that the ipu expects
household_freq, person_freq = cat.frequency_tables(p_pums, h_pums,
p_jd.cat_id,
h_jd.cat_id)
# do the ipu to match person marginals
logger.info("Running ipu")
import time
t1 = time.time()
best_weights, fit_quality, iterations = household_weights(household_freq,
person_freq,
h_constraint,
p_constraint)
logger.info("Time to run ipu: %.3fs" % (time.time()-t1))
logger.debug("IPU weights:")
logger.debug(best_weights.describe())
logger.debug(best_weights.sum())
logger.debug("Fit quality:")
logger.debug(fit_quality)
logger.debug("Number of iterations:")
logger.debug(iterations)
num_households = int(h_marg.groupby(level=0).sum().mean())
print "Drawing %d households" % num_households
best_chisq = np.inf
return draw.draw_households(
num_households, h_pums, p_pums, household_freq, h_constraint,
p_constraint, best_weights, hh_index_start=hh_index_start)
def synthesize_all(recipe, num_geogs=None, indexes=None,
marginal_zero_sub=.01, jd_zero_sub=.001):
"""
Parameters
----------
write_households_csv, write_persons_csv : str
Name of households and persons csv file to write.
Pass None to return these rather than write.
Returns
-------
households, people : pandas.DataFrame
Only returns these if `write_households_csv` and `write_persons_csv`
are None.
fit_quality : dict of FitQuality
Keys are geographic IDs, values are namedtuples with attributes
``.household_chisq``, ``household_p``, ``people_chisq``,
and ``people_p``.
"""
print "Synthesizing at geog level: '{}' (number of geographies is {})".\
format(recipe.get_geography_name(), recipe.get_num_geographies())
if indexes is None:
indexes = recipe.get_available_geography_ids()
hh_list = []
people_list = []
cnt = 0
fit_quality = {}
hh_index_start = 0
# TODO will parallelization work here?
for geog_id in indexes:
print "Synthesizing geog id:\n", geog_id
h_marg = recipe.get_household_marginal_for_geography(geog_id)
logger.debug("Household marginal")
logger.debug(h_marg)
p_marg = recipe.get_person_marginal_for_geography(geog_id)
logger.debug("Person marginal")
logger.debug(p_marg)
h_pums, h_jd = recipe.\
get_household_joint_dist_for_geography(geog_id)
logger.debug("Household joint distribution")
logger.debug(h_jd)
p_pums, p_jd = recipe.get_person_joint_dist_for_geography(geog_id)
logger.debug("Person joint distribution")
logger.debug(p_jd)
try:
households, people, people_chisq, people_p = \
synthesize(
h_marg, p_marg, h_jd, p_jd, h_pums, p_pums,
marginal_zero_sub=marginal_zero_sub, jd_zero_sub=jd_zero_sub,
hh_index_start=hh_index_start)
if not recipe.write_households(geog_id, households):
hh_list.append(households)
if not recipe.write_persons(geog_id, people):
people_list.append(people)
key = tuple(geog_id.values)
# key = BlockGroupID(
# geog_id['state'], geog_id['county'], geog_id['tract'],
# geog_id['block group'])
fit_quality[key] = FitQuality(people_chisq, people_p)
cnt += 1
if len(households) > 0:
hh_index_start = households.index.values[-1] + 1
if num_geogs is not None and cnt >= num_geogs:
break
except Exception as e:
print "Exception caught: ", sys.exc_info()[0]
|
print traceback.format_exc()
# continue
return (pd.concat(hh_list) if len(hh_list) > 0 else None,
|
pd.concat(people_list, ignore_index=True) if len(people_list) > 0 else None,
fit_quality)
|
sdeleeuw/contagement
|
videos/api/viewsets/video.py
|
Python
|
gpl-3.0
| 1,272 | 0 |
from __future__ import unicode_literals
from rest_framework import viewsets
from rest_framework import permissions
from videos.api.serializers import video as video_serializers
from videos.models import Video
class VideoViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
def get_queryset(self):
queryset = Video.objects.all()
if self.request.method not in permissions.SAFE_METHODS:
if self.request.user.is_authenticated:
queryset = queryset.filter_owner(user=self.request.user)
else:
return queryset.none()
return queryset
def get_serializer_class(self):
if self.request.method in permissions.SAFE_METHODS:
return
|
video_serializers.RetrieveSerializer
return video_serializers.DefaultSerializ
|
er
def get_serializer_context(self):
context = super(VideoViewSet, self).get_serializer_context()
if self.request.method not in permissions.SAFE_METHODS \
and not self.request.user.is_superuser:
context['exclude'] = ('sites', )
return context
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
|
pybursa/homeworks
|
s_shybkoy/hw1/hw1_task8_ShybkoiSergei.py
|
Python
|
gpl-2.0
| 162 | 0.018519 |
#hw 1/ task8/ S
|
ergei Shybkoi
t = (1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 'a', 'b', 'c')
print "Set:",t
print "Each third element:"
print t[2::3]
print
|
t[-1*len(t)+2:-1:3]
|
yaroslavprogrammer/django-modeltranslation
|
modeltranslation/fields.py
|
Python
|
bsd-3-clause
| 15,602 | 0.002628 |
# -*- coding: utf-8 -*-
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db.models import fields
from modeltranslation import settings as mt_settings
from modeltranslation.utils import (
get_language, build_localized_fieldname, build_localized_verbose_name, resolution_order)
from modeltranslation.widgets import ClearableWidgetWrapper
SUPPORTED_FIELDS = (
fields.CharField,
# Above implies also CommaSeparatedIntegerField, EmailField, FilePathField, SlugField
# and URLField as they are subclasses of CharField.
fields.TextField,
fields.IntegerField,
# Above implies also BigIntegerField, SmallIntegerField, PositiveIntegerField and
# PositiveSmallIntegerField, as they are subclasses of IntegerField.
fields.BooleanField,
fields.NullBooleanField,
fields.FloatField,
fields.DecimalField,
fields.IPAddressField,
fields.DateField,
fields.DateTimeField,
fields.TimeField,
fields.files.FileField,
fields.files.ImageField,
fields.related.ForeignKey,
# Above implies also OneToOneField
)
try:
SUPPORTED_FIELDS += (fields.GenericIPAddressField,) # Django 1.4+ only
except AttributeError:
pass
class NONE:
"""
Used for fallback options when they are not provided (``None`` can be
given as a fallback or undefined value) or to mark that a nullable value
is not yet known and needs to be computed (e.g. field default).
"""
pass
def create_translation_field(model, field_name, lang, empty_value):
"""
Translation field factory. Returns a ``TranslationField`` based on a
fieldname and a language.
The list of supported fields can be extended by defining a tuple of field
names in the projects settings.py like this::
MODELTRANSLATION_CUSTOM_FIELDS = ('MyField', 'MyOtherField',)
If the class is neither a subclass of fields in ``SUPPORTED_FIELDS``, nor
in ``CUSTOM_FIELDS`` an ``ImproperlyConfigured`` exception will be raised.
"""
if empty_value not in ('', 'both', None, NONE):
raise ImproperlyConfigured('%s is not a valid empty_value.' % empty_value)
field = model._meta.get_field(field_name)
cls_name = field.__class__.__name__
if not (isinstance(field, SUPPORTED_FIELDS) or cls_name in mt_settings.CUSTOM_FIELDS):
raise ImproperlyConfigured(
'%s is not supported by modeltranslation.' % cls_name)
translation_class = field_factory(field.__class__)
return translation_class(translated_field=field, language=lang, empty_value=empty_value)
def field_factory(baseclass):
class TranslationFieldSpecific(TranslationField, baseclass):
pass
# Reflect baseclass name of returned subclass
TranslationFieldSpecific.__name__ = 'Translation%s' % baseclass.__name__
return TranslationFieldSpecific
class TranslationField(object):
"""
The translation field functions as a proxy to the original field which is
wrapped.
For every field defined in the model's ``TranslationOptions`` localized
versions of that field are added to the model depending on the languages
given in ``settings.LANGUAGES``.
If for example there is a model ``News`` with a field ``title`` which is
registered for translation and the ``settings.LANGUAGES`` contains the
``de`` and ``en`` languages, the fields ``title_de`` and ``title_en`` will
be added to the model class. These fields are realized using this
descriptor.
The translation field needs to know which language it contains therefore
that needs to be specified when the field is created.
"""
def __init__(self, translated_field, language, empty_value, *args, **kwargs):
# Update the dict of this field with the content of the original one
# This might be a bit radical?! Seems to work though...
self.__dict__.update(translated_field.__dict__)
# Store the originally wrapped field for later
self.translated_field = translated_field
self.language = language
self.empty_value = empty_value
if empty_value is NONE:
self.empty_value = None if translated_field.null else ''
# Translation are always optional (for now - maybe add some parameters
# to the translation options for configuring this)
if not isinstance(self, fields.BooleanField):
# TODO: Do we really want to enforce null *at all*? Shouldn't this
# better honour the null setting of the translated field?
self.null = True
self.blank = True
# Adjust the name of this field to reflect the language
self.attname = build_localized_fieldname(self.translated_field.name, self.language)
self.name = self.attname
# Copy the verbose name and append a language suffix
# (will show up e.g. in the admin).
self.verbose_name = build_localized_verbose_name(translated_field.verbose_name, language)
# ForeignKey support - rewrite related_name
if self.rel and self.related and not self.rel.is_hidden():
import copy
current = self.related.get_accessor_name()
self.rel = copy.copy(self.rel) # Since fields cannot share the same rel object.
# self.related doesn't need to be copied, as it will be recreated in
# ``RelatedField.do_related_class``
if self.rel.related_name is None:
# For implicit related_name use different query field name
loc_related_query_name = build_localized_fieldname(
self.related_query_name(), self.language)
self.related_query_name = lambda: loc_related_query_name
self.rel.related_name = build_localized_fieldname(current, self.language)
self.rel.field = self # Django 1.6
if hasattr(self.rel.to._meta, '_related_objects_cache'):
del self.rel.to._meta._related_objects_cache
# Django 1.5 changed definition of __hash__ for fields to be fine with hash requirements.
# It spoiled our machinery, since TranslationField has the same creation_counter as its
# original field and fields didn't get added to sets.
# So here we override __eq__ and __hash__ to fix the issue while retaining fine with
# http://docs.python.org/2.7/reference/datamodel.html#object.__hash__
def __eq__(self, other):
if isinstance(other, fields.Field):
return (self.creation_counter == other.creation_counter and
self.language == getattr(other, 'language', None))
return super(TranslationField, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.creation_counter, self.language))
def get_attname_column(self):
attname = self.get_attname()
if self.translated_field.db_column:
column = build_localized_fieldname(self.translated_field.db_column, self.language)
else:
column = attname
return attname, column
def formfield(self, *args, **kwargs):
"""
Returns proper formfield, according to empty_values setting
(only for ``forms.CharField`` subclasses).
|
There are 3 different formfields:
- CharField that stores all empty values as empty strings;
- NullCharField
|
that stores all empty values as None (Null);
- NullableField that can store both None and empty string.
By default, if no empty_values was specified in model's translation options,
NullCharField would be used if the original field is nullable, CharField otherwise.
This can be overridden by setting empty_values to '' or None.
Setting 'both' will result in NullableField being used.
Textual widgets (subclassing ``TextInput`` or ``Textarea``) used for
nullable fields are enriched with a clear checkbox, allowing ``None``
values to be preserved rather than saved as empty strings.
The ``forms.CharField`` somewhat surprising behaviour is documented as a
"w
|
com4/py-editdist
|
test.py
|
Python
|
isc
| 1,856 | 0.033405 |
#!/usr/bin/env python
# Copyright (c) 2006 Damien Miller <djm@mindrot.org>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# $Id$
import editdist
import unittest
import random
test_vectors = (
( 'abc', 'abc', 0 ),
( 'a
|
bc', 'ab', 1 ),
( 'abc', 'abcd', 1 ),
( 'abc', 'bc', 1 ),
( 'abc', 'a', 2 ),
( 'abc', '', 3 ),
( '', '', 0 ),
( 'abc', 'acx', 2 ),
( 'abc', 'acxx', 3 ),
( 'abc', 'bcd', 2 ),
( 'a' * 1000, 'a' * 1000, 0 ),
( 'a' * 1000, 'b' * 1000, 1000),
)
def randstring(l):
a = "abcdefghijklmnopqrstuvwxyz"
r = ""
for i in range(0, l):
r += a[random.randint(0, len(a) - 1)]
return r
class TestRadix(unittest.TestCase):
def test_00__test_vectors(self):
for a, b, score i
|
n test_vectors:
self.assertEqual(editdist.distance(a, b), score)
def test_01__reversed_test_vectors(self):
for b, a, score in test_vectors:
self.assertEqual(editdist.distance(a, b), score)
def test_02__fuzz(self):
for i in range(0, 32) + range(128, 1024, 128):
for j in range(0, 32):
a = randstring(i)
b = randstring(j)
dist = editdist.distance(a, b)
self.assert_(dist >= 0)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
nuclear-wizard/moose
|
python/chigger/tests/wireframe/points.py
|
Python
|
lgpl-2.1
| 936 | 0.019231 |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(0.0105, 0.1507, 0.9885)
camera.SetPosition(15.6131, -0.3930, 0.0186)
camera.Se
|
tFocalPoint(0.0000, 0.0000, 0.1250)
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
mug = chigger.exodus.ExodusResult(reader, block=[76], representation='points', camera=camera, color=[0,1,0])
window = chigger.RenderWindow(mug, size=[300,300], test=True)
window.update();window.resetCamera() #TODO: This is needed to make results render correct
|
ly, not sure why
window.write('points.png')
window.start()
|
google/syzygy
|
syzygy/build/gyp_main.py
|
Python
|
apache-2.0
| 3,560 | 0.009551 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper for the gyp_main that ensures the appropriate include directories
are brought in.
"""
import os
import shlex
import sys
import vs_toolchain_wrapper
script_dir = os.path.dirname(os.path.realpath(__file__))
syzygy_src = os.path.abspath(os.path.join(script_dir, os.pardir, os.pardir))
sys.path.insert(0, os.path.join(syzygy_src, 'tools', 'gyp', 'pylib'))
import gyp
def apply_gyp_environment_from_file(file_path):
"""Reads in a *.gyp_env file and applies the valid keys to os.environ."""
if not os.path.exists(file_path):
return False
with open(file_path, 'rU') as f:
file_contents = f.read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = (
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATORS',
'GYP_MSVS_VERSION',
)
for var in supported_vars:
file_val = file_data.get(var)
if file_val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = file_val
return True
def get_output_directory():
"""Returns the output directory that GYP will use."""
# Handle generator flags from the environment.
genflags = shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', ''))
needle = 'output_dir='
for item in genflags:
if item.startswith(needle):
|
return item[len(needle):]
return 'out'
def apply_syzygy_gyp_env(syzygy_src_path):
if 'SKIP_SYZYGY_GYP_ENV' not in os.environ:
# Update the environment based on syzygy.gyp_env
path = os.path.join(syzygy_src_path, 'syzygy.gyp_env')
applied_env_from_file = apply_gyp_environment_from_file(path)
if (not applied_env_from_file or not os.environ.get('GYP_GENERATORS')):
# De
|
fault to ninja if no generator has explicitly been set.
os.environ['GYP_GENERATORS'] = 'ninja'
if (not applied_env_from_file or not os.environ.get('GYP_MSVS_VERSION')):
os.environ['GYP_MSVS_VERSION'] = '2015'
if __name__ == '__main__':
# Get the path of the root 'src' directory.
self_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(self_dir, '..', '..'))
apply_syzygy_gyp_env(src_dir)
assert os.environ.get('GYP_GENERATORS')
if os.environ.get('GYP_GENERATORS') == 'msvs':
print 'ERROR: The \'msvs\' configuration isn\'t supported anymore.'
sys.exit(1)
# Setup the VS toolchain.
vs_runtime_dll_dirs = \
vs_toolchain_wrapper.SetEnvironmentAndGetRuntimeDllDirs()
gyp_rc = gyp.main(sys.argv[1:])
# Copy the VS runtime DLLs to the build directories.
if vs_runtime_dll_dirs:
x64_runtime, x86_runtime = vs_runtime_dll_dirs
vs_toolchain_wrapper.CopyVsRuntimeDlls(
os.path.join(src_dir, get_output_directory()),
(x86_runtime, x64_runtime))
sys.exit(gyp_rc)
|
spiceqa/virt-test
|
qemu/tests/watchdog.py
|
Python
|
gpl-2.0
| 12,256 | 0.000408 |
import logging
import re
import time
import os
from autotest.client.shared import error, utils
from virttest import utils_misc, env_process
@error.context_aware
def run_watchdog(test, params, env):
"""
Configure watchdog, crash the guest and check if watchdog_action occurs.
Test Step:
1. see every function step
Params:
:param test: QEMU test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
"""
timeout = int(params.get("login_timeout", '360'))
relogin_timeout = int(params.get("relogin_timeout", '240'))
watchdog_device_type = params.get("watchdog_device_type", "i6300esb")
watchdog_action = params.get("watchdog_action", "reset")
trigger_cmd = params.get("trigger_cmd", "echo c > /dev/watchdog")
# internal function
def _watchdog_device_check(session, watchdog_device):
"""
Check the watchdog device have been found and init successfully. if not
will raise error.
"""
# when using ib700 need modprobe it's driver manually.
if watchdog_device == "ib700":
session.cmd("modprobe ib700wdt")
# when wDT is 6300esb need check pci info
if watchdog_device == "i6300esb":
error.context("checking pci info to ensure have WDT device",
logging.info)
o = session.cmd_output("lspci")
if o:
wdt_pci_info = re.findall(".*6300ESB Watchdog Timer", o)
if not wdt_pci_info:
raise error.TestFail("Can find watchdog pci")
logging.info("Found watchdog pci device : %s" % wdt_pci_info)
# checking watchdog init info using dmesg
error.context("Checking watchdog init info using dmesg", logging.info)
dmesg_info = params.get("dmesg_info", "(i6300ESB|ib700wdt).*init")
(s, o) = session.cmd_status_output(
"dmesg | grep -i '%s' " % dmesg_info)
if s != 0:
error_msg = "Wactchdog device '%s' initialization failed "
raise error.TestError(error_msg % watchdog_device)
logging.info("Watchdog device '%s' add and init successfully"
% watchdog_device)
logging.debug("Init info : '%s'" % o)
def _trigger_watchdog(session, trigger_cmd=None):
"""
Trigger watchdog action
Params:
@session: guest connect session.
@trigger_cmd: cmd trigger the watchdog
"""
if trigger_cmd is not None:
error.context("Trigger Watchdog action using:'%s'." % trigger_cmd,
logging.info)
session.sendline(trigger_cmd)
def _action_check(session, watchdog_action):
"""
Check whethe
|
r or not the watchdog action occurred. if the action was
not occurred will raise error.
"""
# when watchdog action is pause, shutdown, re
|
set, poweroff
# the vm session will lost responsive
response_timeout = int(params.get("response_timeout", '240'))
error.context("Check whether or not watchdog action '%s' take effect"
% watchdog_action, logging.info)
if not utils_misc.wait_for(lambda: not session.is_responsive(),
response_timeout, 0, 1):
if watchdog_action == "none" or watchdog_action == "debug":
logging.info("OK, the guest session is responsive still")
else:
raise error.TestFail(
"Oops, seems action '%s' take no effect, ",
"guest is responsive" % watchdog_action)
# when action is poweroff or shutdown(without no-shutdown option), the vm
# will dead, and qemu exit.
# The others the vm monitor still responsive, can report the vm status.
if (watchdog_action == "poweroff" or (watchdog_action == "shutdown"
and params.get("disable_shutdown") != "yes")):
if not utils_misc.wait_for(lambda: vm.is_dead(),
response_timeout, 0, 1):
raise error.TestFail(
"Oops, seems '%s' action take no effect, ",
"guest is alive!" % watchdog_action)
else:
if watchdog_action == "pause":
f_param = "paused"
elif watchdog_action == "shutdown":
f_param = "shutdown"
else:
f_param = "running"
if not utils_misc.wait_for(
lambda: vm.monitor.verify_status(f_param),
response_timeout, 0, 1):
logging.debug("Monitor status is:%s" % vm.monitor.get_status())
raise error.TestFail(
"Oops, seems action '%s' take no effect, ",
"Wrong monitor status!" % watchdog_action)
# when the action is reset, need can relogin the guest.
if watchdog_action == "reset":
logging.info("Try to login the guest after reboot")
vm.wait_for_login(timeout=relogin_timeout)
logging.info("Watchdog action '%s' come into effect." %
watchdog_action)
# test case
def check_watchdog_support():
"""
check the host qemu-kvm support watchdog device
Test Step:
1. Send qemu command 'qemu -watchdog ?'
2. Check the watchdog type that the host support.
"""
qemu_binary = utils_misc.get_qemu_binary(params)
watchdog_type_check = params.get(
"watchdog_type_check", " -watchdog '?'")
qemu_cmd = qemu_binary + watchdog_type_check
# check the host support watchdog types.
error.context("Checking whether or not the host support WDT '%s'"
% watchdog_device_type, logging.info)
watchdog_device = utils.system_output("%s 2>&1" % qemu_cmd,
retain_output=True)
if watchdog_device:
if re.findall(watchdog_device_type, watchdog_device, re.I):
logging.info("The host support '%s' type watchdog device" %
watchdog_device_type)
else:
raise error.TestFail("Host not support watchdog device type %s "
% watchdog_device_type)
logging.info("The host support watchdog device type is: '%s'"
% watchdog_device)
else:
raise error.TestFail("No watchdog device support in the host!")
def guest_boot_with_watchdog():
"""
check the guest can boot with watchdog device
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
"""
_watchdog_device_check(session, watchdog_device_type)
def watchdog_action_test():
"""
Watchdog action test
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3.Trigger wathchdog action through open /dev/watchdog
4.Ensure watchdog_action take effect.
"""
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
_action_check(session, watchdog_action)
def magic_close_support():
"""
Magic close the watchdog action.
Test Step:
1. Boot guest with watchdog device
2. Check watchdog device have been initialized successfully in guest
3. Inside guest, trigger watchdog action"
4. Inside guest, before heartbeat expires, close this action"
5. Wait heartbeat timeout check the watchdog action deactive.
"""
response_timeout = int(params.get("response_timeout", '240'))
magic_cmd = params.get("magic_close_cmd", "echo V > /dev/watchdog")
_watchdog_device_check(session, watchdog_device_type)
_trigger_watchdog(session, trigger_cmd)
|
iharsh234/MIT6.00x
|
pset6-P2-FindBestShift.py
|
Python
|
mit
| 475 | 0 |
def findBestShift(wordList, text):
text = "".join((char if char.isalpha() else " ") for char in text).split()
max_valid = 0
best_shift = 0
for shift in rang
|
e(26):
num_valid = 0
for word in text:
plaintext = applyShif
|
t(word, shift)
if isWord(wordList, plaintext):
num_valid += 1
if num_valid > max_valid:
max_valid = num_valid
best_shift = shift
return best_shift
|
StefanBruens/libsigrokdecode
|
decoders/rc_encode/pd.py
|
Python
|
gpl-3.0
| 6,428 | 0.007156 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 Steve R <steversig@virginmedia.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
bitvals = ('0', '1', 'f', 'U')
def decode_bit(edges):
# Datasheet says long pulse is 3 times short pulse.
lmin = 2 # long min multiplier
lmax = 5 # long max multiplier
eqmin = 0.5 # equal min multiplier
eqmax = 1.5 # equal max multiplier
if ( # 0 -___-___
(edges[1] >= edges[0] * lmin and edges[1] <= edges[0] * lmax) and
(edges[2] >= edges[0] * eqmin and edges[2] <= edges[0] * eqmax) and
(edges[3] >= edges[0] * lmin and edges[3] <= edges[0] * lmax)):
return '0'
elif ( # 1 ---_---_
(edges[0] >= edges[1] * lmin and edges[0] <= edges[1] * lmax) and
(edges[0] >= edges[2] * eqmin and edges[0] <= edges[2] * eqmax) and
(edges[0] >= edges[3] * lmin and edges[0] <= edges[3] * lmax)):
return '1'
elif ( # float ---_-___
(edges[1] >= edges[0] * lmin and edges[1] <= edges[0] * lmax) and
(edges[2] >= edges[0] * lmin and edges[2] <= edges[0]* lmax) and
(edges[3] >= edges[0] * eqmin and edges[3] <= edges[0] * eqmax)):
return 'f'
else:
return 'U'
def pinlabels(bit_count):
if bit_count <= 6:
return 'A%i' % (bit_count - 1)
else:
return 'A%i/D%i' % (bit_count - 1, 12 - bit_count)
def decode_model(model, bits):
if model == 'maplin_l95ar':
address = 'Addr' # Address pins A0 to A5
for i in range(0, 6):
address += ' %i:' % (i + 1) + ('on' if bits[i][0] == '0' else 'off')
button = 'Button'
# Button pins A6/D5 to A11/D0
if bits[6][0] == '0' and bits[11][0] == '0':
button += ' A ON/OFF'
elif bits[7][0] == '0' and bits[11][0] == '0':
button += ' B ON/OFF'
elif bits[9][0] == '0' and bits[11][0] == '0':
button += ' C ON/OFF'
elif bits[8][0] == '0' and bits[11][0] == '0':
button += ' D ON/OFF'
else:
button += ' Unknown'
return ['%s' % address, bits[0][1], bits[5][2], \
'%s' % button, bits[6][1], bits[11][2]]
class Decoder(srd.Decoder):
api_version = 3
id = 'rc_encode'
name = 'RC encode'
longname = 'Remote control encoder'
desc = 'PT2262/HX2262/SC5262 remote control encoder protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['IC', 'IR']
channels = (
{'id': 'data', 'name': 'Data', 'desc': 'Data line'},
)
annotations = (
('bit-0', 'Bit 0'),
('bit-1', 'Bit 1'),
('bit-f', 'Bit f'),
('bit-U', 'Bit U'),
('bit-sync', 'Bit sync'),
('pin', 'Pin'),
('code-word-addr', 'Code word address'),
('code-word-data', 'Code word data'),
)
annotation_rows = (
('bits', 'Bits', (0, 1, 2, 3, 4)),
('pins', 'Pins', (5,)),
('code-words', 'Code words', (6, 7)),
)
options = (
{'id': 'remote', 'desc': 'Remote', 'default'
|
: 'none',
'values': ('none', 'maplin_l95ar')},
)
def __init__(self):
self.reset()
def reset(self):
self.samplenumber_last = None
self.pulses = []
self.bits = []
self.labels = []
self.bit_count = 0
self.ss = None
self.es = None
self.state = 'IDLE'
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.model = self.options['remote']
def putx(self, data):
|
self.put(self.ss, self.es, self.out_ann, data)
def decode(self):
while True:
pin = self.wait({0: 'e'})
self.state = 'DECODING'
if not self.samplenumber_last: # Set counters to start of signal.
self.samplenumber_last = self.samplenum
self.ss = self.samplenum
continue
if self.bit_count < 12: # Decode A0 to A11.
self.bit_count += 1
for i in range(0, 4): # Get four pulses for each bit.
if i > 0:
pin = self.wait({0: 'e'}) # Get next 3 edges.
samples = self.samplenum - self.samplenumber_last
self.pulses.append(samples) # Save the pulse width.
self.samplenumber_last = self.samplenum
self.es = self.samplenum
self.bits.append([decode_bit(self.pulses), self.ss,
self.es]) # Save states and times.
idx = bitvals.index(decode_bit(self.pulses))
self.putx([idx, [decode_bit(self.pulses)]]) # Write decoded bit.
self.putx([5, [pinlabels(self.bit_count)]]) # Write pin labels.
self.pulses = []
self.ss = self.samplenum
else:
if self.model != 'none':
self.labels = decode_model(self.model, self.bits)
self.put(self.labels[1], self.labels[2], self.out_ann,
[6, [self.labels[0]]]) # Write model decode.
self.put(self.labels[4], self.labels[5], self.out_ann,
[7, [self.labels[3]]]) # Write model decode.
samples = self.samplenum - self.samplenumber_last
pin = self.wait({'skip': 8 * samples}) # Wait for end of sync bit.
self.es = self.samplenum
self.putx([4, ['Sync']]) # Write sync label.
self.reset() # Reset and wait for next set of pulses.
self.state = 'DECODE_TIMEOUT'
if not self.state == 'DECODE_TIMEOUT':
self.samplenumber_last = self.samplenum
|
franciscod/python-telegram-bot
|
telegram/parsemode.py
|
Python
|
gpl-2.0
| 1,054 | 0 |
#!/usr/bin/env python
# pylint: disable=R0903
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www
|
.gnu.org/licenses/].
"""This module contains a object that represents a Telegram
Message Parse Modes."""
class ParseMode(object):
"""This object represents a Telegram Message Parse Modes."""
M
|
ARKDOWN = 'Markdown'
HTML = 'HTML'
|
sofianehaddad/ot-svn
|
python/test/t_BarPlot_std.py
|
Python
|
mit
| 1,975 | 0.000506 |
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
dim = 1
meanPoint = NumericalPoint(dim, 1.0)
meanPoint[0] = 0.5
sigma = NumericalPoint(dim, 1.0)
sigma[0] = 2.0
R = CorrelationMatrix(dim)
distribution1 = Normal(meanPoint, sigma, R)
# Instanciate another distribution object
meanPoint[0] = -1.5
sigma[0] = 4.0
distribution2 = Normal(meanPoint, sigma, R)
# Test for sampling
size = 2000
nBars = 20
sample1 = distribution1.getSample(size)
sample2 = distribution2.getSample(size)
# Construct histograms
epsilon = 0.1
min1 = sample1.getMin()[0]
max1 = sample1.getMax()[0] + epsilon
min2 = sample2.getMin()[0]
max2 = sample2.getMax()[0] + epsilon
tmp = NumericalPoint(2)
tmp[0] = (max1 - min1) / nBars
data1 = NumericalSample(nBars, tmp)
tmp[0] = (max2 - min2) / nBars
data2 = NumericalSample(nBars, tmp)
for i in range(size):
index = long(floor((sample1[i, 0] - min1) / (max1 - min1) * nBars))
data1[index, 1] += 1
index = long(floor((sample2[i, 0] - min2) / (max2 - min2) * nBars))
data2[index, 1] += 1
# Create an empty graph
myGraph = Graph("Some barplots", "y", "frequency", True, "topleft")
# Create the first barplot
myBarPlot1 = BarPlot(data1
|
, min1, "blue", "shaded", "dashed", "histogram1")
# Then, draw it
myGraph.add(myBarPlot1)
myGraph.draw("Graph_BarPlot_a_OT", 640, 480)
# Check that the correct files have been generated by computing their
# checksum
# Create the second barplot
myBarPlot2 = BarPlot(data2, min2, "red", "solid", "solid", "histogram2")
# Add it to the graph and draw everything
myGraph.add(myBarPlot2)
myGraph.draw("Graph_BarPlot_b_OT", 640, 480)
except:
|
import sys
print "t_BarPlot_std.py", sys.exc_type, sys.exc_value
|
igor-rodrigues01/casv
|
casv/core/migrations/0010_auto_20150804_1030.py
|
Python
|
agpl-3.0
| 443 | 0.002257 |
# -*- coding: utf-8
|
-*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20150729_1745'),
]
operations = [
migrations.AlterField(
model_name='areasoltura',
name='cpf',
field=models.CharField(max_length=11, verbose_name='CPF', null=True, blank=True),
|
)
]
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_7/ar_/test_artificial_32_Anscombe_PolyTrend_7__0.py
|
Python
|
bsd-3-clause
| 261 | 0.088123 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_data
|
set(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7,
|
transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 0);
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/cr/crvserver_crpolicy_binding.py
|
Python
|
apache-2.0
| 7,634 | 0.037595 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_crpolicy_binding(base_resource) :
""" Binding class showing the crpolicy that can be bound to crvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._hits = 0
self._name = ""
self._targetvserver = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
retu
|
rn self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""
|
Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def targetvserver(self) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
return self._targetvserver
except Exception as e:
raise e
@targetvserver.setter
def targetvserver(self, targetvserver) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
self._targetvserver = targetvserver
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_crpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_crpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = crvserver_crpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetvserver = resource.targetvserver
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [crvserver_crpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetvserver = resource[i].targetvserver
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = crvserver_crpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [crvserver_crpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch crvserver_crpolicy_binding resources.
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of crvserver_crpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count crvserver_crpolicy_binding resources configued on NetScaler.
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of crvserver_crpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_crpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class crvserver_crpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_crpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_crpolicy_binding = [crvserver_crpolicy_binding() for _ in range(length)]
|
OSAlt/secret-santa
|
santa_lib/__init__.py
|
Python
|
mit
| 559 | 0.003578 |
__author__ = 'sfaci'
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a
|
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distribut
|
ed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
Vagab0nd/SiCKRAGE
|
tests/notifier_tests.py
|
Python
|
gpl-3.0
| 9,061 | 0.002097 |
"""
Test notifiers
"""
import unittest
from sickchill.oldbeard import db
from sickchill.oldbeard.notifiers.emailnotify import Notifier as EmailNotifier
from sickchill.oldbeard.notifiers.prowl import Notifier as ProwlNotifier
from sickchill.tv import TVEpisode, TVShow
from sickchill.views.home import Home
from tests import test_lib as test
# noinspection PyProtectedMember
class NotifierTests(test.SickChillTestDBCase):
"""
Test notifiers
"""
@classmethod
def setUpClass(cls):
num_legacy_shows = 3
num_shows = 3
num_episodes_per_show = 5
cls.mydb = db.DBConnection()
cls.legacy_shows = []
cls.shows = []
# Per-show-notifications were originally added for email notifications only. To add
# this feature to other notifiers, it was necessary to alter the way text is stored in
# one of the DB columns. Therefore, to test properly, we must create some shows that
# store emails in the old method (legacy method) and then other shows that will use
# the new method.
for show_counter in range(100, 100 + num_legacy_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
show.saveToDB()
cls.legacy_shows.append(show)
for show_counter in range(200, 200 + num_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
|
show.saveToDB()
cls.shows.append(show)
def setUp(self):
"""
Set up tests
"""
self._debug_spew("\n\r")
@unittest.skip('Not yet im
|
plemented')
def test_boxcar(self):
"""
Test boxcar notifications
"""
pass
@unittest.skip('Cannot call directly without a request')
def test_email(self):
"""
Test email notifications
"""
email_notifier = EmailNotifier()
# Per-show-email notifications were added early on and utilized a different format than the other notifiers.
# Therefore, to test properly (and ensure backwards compatibility), this routine will test shows that use
# both the old and the new storage methodology
legacy_test_emails = "email-1@address.com,email2@address.org,email_3@address.tv"
test_emails = "email-4@address.com,email5@address.org,email_6@address.tv"
for show in self.legacy_shows:
showid = self._get_showid_by_showname(show.show_name)
self.mydb.action("UPDATE tv_shows SET notify_list = ? WHERE show_id = ?", [legacy_test_emails, showid])
for show in self.shows:
showid = self._get_showid_by_showname(show.show_name)
Home.saveShowNotifyList(show=showid, emails=test_emails)
# Now, iterate through all shows using the email list generation routines that are used in the notifier proper
shows = self.legacy_shows + self.shows
for show in shows:
for episode in show.episodes:
ep_name = episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality
show_name = email_notifier._parseEp(ep_name)
recipients = email_notifier._generate_recipients(show_name)
self._debug_spew("- Email Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for email in recipients:
self._debug_spew("-- " + email.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented')
def test_emby(self):
"""
Test emby notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_freemobile(self):
"""
Test freemobile notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_growl(self):
"""
Test growl notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_kodi(self):
"""
Test kodi notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_libnotify(self):
"""
Test libnotify notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nma(self):
"""
Test nma notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmj(self):
"""
Test nmj notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmjv2(self):
"""
Test nmjv2 notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_plex(self):
"""
Test plex notifications
"""
pass
@unittest.skip('Cannot call directly without a request')
def test_prowl(self):
"""
Test prowl notifications
"""
prowl_notifier = ProwlNotifier()
# Prowl per-show-notifications only utilize the new methodology for storage; therefore, the list of legacy_shows
# will not be altered (to preserve backwards compatibility testing)
test_prowl_apis = "11111111111111111111,22222222222222222222"
for show in self.shows:
showid = self._get_showid_by_showname(show.show_name)
Home.saveShowNotifyList(show=showid, prowlAPIs=test_prowl_apis)
# Now, iterate through all shows using the Prowl API generation routines that are used in the notifier proper
for show in self.shows:
for episode in show.episodes:
ep_name = episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality
show_name = prowl_notifier._parse_episode(ep_name)
recipients = prowl_notifier._generate_recipients(show_name)
self._debug_spew("- Prowl Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for api in recipients:
self._debug_spew("-- " + api.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented')
def test_pushalot(self):
"""
Test pushalot notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pushbullet(self):
"""
Test pushbullet notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pushover(self):
"""
Test pushover notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_pytivo(self):
"""
Test pytivo notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_synoindex(self):
"""
Test synoindex notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_synologynotifier(self):
"""
Test synologynotifier notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_trakt(self):
"""
Test trakt notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_tweet(self):
"""
Test tweet notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_twilio(self):
"""
Test twilio notifications
"""
pass
@staticmethod
def _debug_spew(text):
"""
|
OmnesRes/onco_lnc
|
lncrna/cox/COAD/patient_info.py
|
Python
|
mit
| 6,839 | 0.021787 |
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','clinical','nationwidechildrens.org_clinical_patient_coad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=
|
[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinic
|
al:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the COAD patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
f=open(os.path.join(BASE_DIR,'tcga_data','COAD','lncrna','COAD.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=''
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
|
Ultimaker/Uranium
|
UM/PluginObject.py
|
Python
|
lgpl-3.0
| 1,812 | 0.002208 |
# Copyright (c) 2018 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from typing import Optional, Dict, Any
class PluginObject:
"""Base class for objects that can be provided by a plugin.
This class should be inherited by any class that can be provided
by a plugin. Its only function is to serve as a mapping between
the plugin and the object.
"""
def __init__(self, *args, **kwags) -> None:
self._plugin_id = None # type: Optional[str]
self._version = None # type: Optional[str]
self._metadata = {} # type: Dict[str, Any]
self._name = None # type: Optional[str]
# This returns a globally unique id for this plugin object.
# It prepends it's set name (which should be locally (eg; within the plugin) unique) with the plugin_id, making it
# globally unique.
def getId(self
|
) -> str:
result = self.getPluginId()
if self._name:
result += "_%s" % self._name
return result
def setPluginId(self, plugin_id: str) -> None:
self._plugin_id = plugin_id
# The metadata of the plugin is set at the moment it is loaded.
def setMetaData(self, m
|
etadata: Dict[str, Any]) -> None:
self._metadata = metadata
def getMetaData(self) -> Dict[str, Any]:
return self._metadata
def getPluginId(self) -> str:
if not self._plugin_id:
raise ValueError("The plugin ID needs to be set before the plugin can be used")
return self._plugin_id
def setVersion(self, version: str) -> None:
self._version = version
def getVersion(self) -> str:
if not self._version:
raise ValueError("The plugin version needs to be set before the plugin can be used")
return self._version
|
maarons/pressui
|
cherrypy/server.py
|
Python
|
mit
| 1,340 | 0.009701 |
from cherrypy.process.plugins import PIDFile
import argparse
impo
|
rt cherrypy
from PressUI.cherrypy.PressConfig import PressConfig
import PressUI.cherrypy.PressProduction as PressProduction
parser = argparse.ArgumentParser()
parser.add_argument(
'--production',
help = 'Run app in production mode',
action = 'store_true',
)
parser.add_argument(
'--port',
help = 'Run app on this port (defaults to %(default)d)',
default = 8080,
)
parser.add_argument(
'config',
help = 'Path to config file for this app',
)
def quic
|
kstart(app, app_name, fun_callback = None):
args = parser.parse_args()
PressConfig.init(args.config)
if fun_callback is not None:
fun_callback()
cherrypy.config.update({
'server.socket_port': args.port,
'server.socket_host': '127.0.0.1',
'tools.gzip.on': True,
})
if args.production:
cherrypy.config.update({
'environment': 'production',
'tools.proxy.on': True,
'log.access_file': '/tmp/{}.access.log'.format(app_name),
'log.error_file': '/tmp/{}.error.log'.format(app_name),
})
PIDFile(
cherrypy.engine,
'/tmp/{}.pid'.format(app_name),
).subscribe()
PressProduction.set_production(True)
cherrypy.quickstart(app())
|
aychedee/kubrick
|
kubrick/secrets.py
|
Python
|
isc
| 333 | 0 |
# copyright: (c) 2012 by Hansel Dunlop.
# license: ISC, see LICENSE for more details.
#
|
# secret keys for virtual server providers
# Users of this package will need to add their
# keys to this file for it to work
#
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY
|
= ''
RACKSPACE_USERNAME = ''
RACKSPACE_API_KEY = ''
KEY_FILENAME = ''
|
digitalocean/netbox
|
netbox/extras/templatetags/plugins.py
|
Python
|
apache-2.0
| 2,412 | 0.001658 |
from django import template as template_
from django.conf import settings
from django.utils.safestring import mark_safe
from extras.plugins import PluginTemplateExtension
from extras.registry import registry
register = template_.Library()
def _get_registered_content(obj, method, template_context):
"""
Given an object and a PluginTemplateExtension method name and the template context, return all the
registered content for the object's model.
"""
html = ''
context = {
'object': obj,
'request': template_context['request'],
'settings': template_context['settings'],
'csrf_token': template_context['csrf_token'],
'perms': template_context['perms'],
}
model_name = obj._meta.label_lower
template_extensions = registry['plugin_template_extensions'].get(model_name, [])
for template_extension in template_extensions:
# If the class has not overridden the specified method, we can skip it (because we know it
# will raise NotImplementedError).
if getattr(template_extension, method) == getattr(PluginTemplateExtension, method):
continue
# Update context with plugin-specific configuration parameters
plugin_name = template_extension.__module__.split('.')[0]
context['config'] = settings.PLUGINS_CONFIG.get(plugin_name, {})
# Call the method to render content
instance = template_extension(context)
content = getattr(instance, method)()
html += content
return mark_safe(html)
@register.simple_tag(takes_context=True)
def plugin_buttons(context, obj):
"""
Render all buttons registered by plugins
"""
return _get_registered_content(obj, 'buttons', context)
|
@register.simple_tag(takes_context=True)
def plugin_left_page(context, obj):
"""
Render all left page content registered by plugins
"""
|
return _get_registered_content(obj, 'left_page', context)
@register.simple_tag(takes_context=True)
def plugin_right_page(context, obj):
"""
Render all right page content registered by plugins
"""
return _get_registered_content(obj, 'right_page', context)
@register.simple_tag(takes_context=True)
def plugin_full_width_page(context, obj):
"""
Render all full width page content registered by plugins
"""
return _get_registered_content(obj, 'full_width_page', context)
|
Hemisphere-Project/Telemir-DatabitMe
|
Telemir-EEG/TeleMir_171013/Fake_TeleMir_CB.py
|
Python
|
gpl-2.0
| 6,881 | 0.034443 |
# -*- coding: utf-8 -*-
"""
TeleMir developpement version with fake acquisition device
lancer dans un terminal :
python examples/test_osc_receive.py
"""
from pyacq import StreamHandler, FakeMultiSignals
from pyacq.gui import Oscilloscope, Oscilloscope_f, TimeFreq, TimeFreq2
from TeleMir.gui import Topoplot, KurtosisGraphics, freqBandsGraphics, spaceShipLauncher, Topoplot_imp
from TeleMir.gui import ScanningOscilloscope,SpectrumGraphics
from TeleMir.analyses import TransmitFeatures
#from TeleMir.example import test_osc_receive
import msgpack
#~ import gevent
#~ import zmq.green as zmq
from PyQt4 import QtCore,QtGui
#from multiprocessing import Process
import zmq
import msgpack
import time
import numpy as np
import os
def teleMir_CB():
streamhandler = StreamHandler()
# Configure and start
#~ dev = FakeMultiSignals(streamhandler = streamhandler)
#~ dev.configure( #name = 'Test dev',
#~ nb_channel = 14,
#~ sampling_rate =128.,
#~ buffer_length = 10.,
#~ packet_size = 1,
#~ )
#~ dev.initialize()
#~ dev.start()
filename = '/home/ran/Projets/pyacq_emotiv_recording/alex/Emotiv Systems Pty Ltd #SN201105160008860.raw'
#filename = '/home/ran/Projets/pyacq_emotiv_recording/caro/Emotiv Systems Pty Ltd #SN201105160008860.raw'
#filename = '/home/mini/pyacq_emotiv_recording/simple_blink/Emotiv Systems Pty Ltd #SN201105160008860.raw'
filenameImp = '/home/ran/Projets/EEG_recordings/anneLise/Emotiv Systems Pty Ltd #SN200709276578911.raw'
filenameXY = '/home/ran/Projets/EEG_recordings/anneLise/Emotiv Systems Pty Ltd #SN200709276578912.raw'
precomputed = np.fromfile(filename , dtype = np.float32).reshape(-1, 14).transpose()
precomputedImp = np.fromfile(filenameImp , dtype = np.float32).reshape(-1, 14).transpose()
precomputedXY = np.fromfile(filenameXY , dtype = np.float32).reshape(-1, 2).transpose()
# Configure and start signal
dev = FakeMultiSignals(streamhandler = streamhandler)
dev.configure( #name = 'Test dev',
nb_channel = 14,
sampling_rate =128.,
buffer_length = 30.,
packet_size = 1,
precomputed = precomputed,
)
dev.initialize()
dev.start()
#~ # Configure and start imp
#~ devImp = FakeMultiSignals(streamhandler = streamhandler)
#~ devImp.configure( #name = 'Test dev',
#~ nb_channel = 14,
#~ sampling_rate =128.,
#~ buffer_length = 30.,
#~ packet_size = 1,
#~ precomputed = precomputedImp,
#~ )
#~ devImp.initialize()
#~ devImp.start()
# Configure and start gyroXY
devXY = FakeMultiSignals(streamhandler = streamhandler)
devXY.configure( #name = 'Test dev',
nb_channel = 2,
sampling_rate =128.,
buffer_length = 30.,
packet_size = 1,
precomputed = precomputedXY,
)
devXY.initialize()
devXY.start()
## Configure and start output stream (for extracted feature)
fout = TransmitFeatures(streamhandler = streamhandler)
fout.configure( #name = 'Test fout',
nb_channel = 14, # np.array([1:5])
nb_feature = 6,
nb_pts = 128,
sampling_rate =10.,
buffer_length = 10.,
packet_size = 1,
)
fout.initialize(stream_in = dev.streams[0], stream_xy = devXY.streams[0])
fout.start()
#Osc server
#p = Process(target=., args=('bob',))
#color = 'summer'
# Bleu
#color = 'jet'
# Rouge
color = 'hot'
# vert/jaune
#color = 'summer'
app = QtGui.QApplication
|
([])
# Impedances
w_imp=Topoplot_imp(stream = dev.streams[0], type_Topo= 'imp'
|
)
w_imp.show()
# freqbands
w_sp_bd=freqBandsGraphics(stream = dev.streams[0], interval_length = 3., channels = [12])
w_sp_bd.run()
# signal
w_oscilo=Oscilloscope(stream = dev.streams[0])
w_oscilo.show()
w_oscilo.set_params(xsize = 10, mode = 'scroll')
w_oscilo.auto_gain_and_offset(mode = 2)
w_oscilo.gain_zoom(100)
#w_oscilo.set_params(colors = 'jet')
select_chan = np.ones(14, dtype = bool)
w_oscilo.automatic_color(cmap_name = 'jet', selected = select_chan)
# parametres
w_feat1=Oscilloscope_f(stream = fout.streams[0])
w_feat1.show()
w_feat1.set_params(colormap = color)
#w_feat1.auto_gain_and_offset(mode = 1)
#w_feat1.set_params(xsize = 10, mode = 'scroll')
#~ select_feat = np.ones(6, dtype = bool)
#~ # print select
#~ #w_oscilo.set_params(colormap = 'automn', selected = select)
#~ w_feat1.automatic_color(cmap_name = 'jet', selected = select_feat)
w_feat1.showFullScreen()
w_feat1.set_params(xsize = 10, mode = 'scroll')
#~ select_feat = np.ones(4, dtype = bool)
#~ w_feat1.automatic_color(cmap_name = 'jet', selected = select_feat)
# topographie
w_topo=Topoplot(stream = dev.streams[0], type_Topo= 'topo')
w_topo.show()
# temps frequence 1
w_Tf=TimeFreq(stream = dev.streams[0])
w_Tf.show()
w_Tf.set_params(xsize = 10)
w_Tf.change_param_tfr(f_stop = 45, f0 = 1)
w_Tf.set_params(colormap = color)
#w_Tf.clim_changed(20)
#w_Tf.change_param_channel(clim = 20)
# temps frequence 2
w_Tf2=TimeFreq2(stream = dev.streams[0])
w_Tf2.show()
w_Tf2.set_params(xsize = 10)
w_Tf2.change_param_tfr(f_stop = 45, f0 = 1)
w_Tf2.set_params(colormap = color)
# kurtosis
#w_ku=KurtosisGraphics(stream = dev.streams[0], interval_length = 1.)
#w_ku.run()
## Bien moins fluide
# Spectre
#~ w_sp=SpectrumGraphics(dev.streams[0],3.,channels=[11,12])
#~ w_sp.run()
w1 = spaceShipLauncher(dev.streams[0])
w1.run()
w1.showFullScreen()
app.exec_()
# Stope and release the device
fout.stop()
fout.close()
print 'ici'
dev.stop()
dev.close()
print 'ici'
devXY.stop()
devXY.close()
print 'ici'
devImp.stop()
devImp.close()
print 'ici'
if __name__ == '__main__':
teleMir_CB()
|
thomaslaurenson/Vestigium
|
dfxml/Objects.py
|
Python
|
gpl-2.0
| 127,176 | 0.005198 |
"""
This file re-creates the major DFXML classes with an emphasis on type safety, serializability, and de-serializability.
With this module, reading disk images or DFXML files is done with the parse or iterparse functions. Writing DFXML files can be done with the DFXMLObject.print_dfxml function.
"""
__version__ = "0.4.5"
#Remaining roadmap to 1.0.0:
# * Documentation.
# * User testing.
# * Compatibility with the DFXML schema, version >1.1.1.
import logging
import re
import copy
import xml.etree.ElementTree as ET
import subprocess
import dfxml
import os
import sys
import struct
_logger = logging.getLogger(os.path.basename(__file__))
#Contains: (namespace, local name) qualified XML element name pairs
_warned_elements = set([])
_warned_byterun_attribs = set([])
#Contains: Unexpected 'facet' values on byte_runs elements.
_warned_byterun_facets = set([])
#Issue some log statements only once per program invocation.
_nagged_alloc = False
_warned_byterun_badtypecomp = False
XMLNS_REGXML = "http://www.forensicswiki.org/wiki/RegXML"
def _ET_tostring(e):
"""Between Python 2 and 3, there are some differences in the ElementTree library's tostring() behavior. One, the method balks at the "unicode" encoding in 2. Two, in 2, the XML prototype's output with every invocation. This method serves as a wrapper to deal with those issues."""
if sys.version_info[0] < 3:
tmp = ET.tostring(e, encoding="UTF-8")
if tmp[0:2] == "<?":
#Trim away first line; it's an XML prototype. This only appears in Python 2's ElementTree output.
return tmp[ tmp.find("?>\n")+3 : ]
else:
return tmp
else:
return ET.tostring(e, encoding="unicode")
def _boolcast(val):
"""Takes Boolean values, and 0 or 1 in string or integer form, and casts them all to Boolean. Preserves nulls. Balks at everything else."""
if val is None:
return None
if val in [True, False]:
return val
_val = val
if val in ["0", "1"]:
_val = int(val)
if _val in [0, 1]:
return _val == 1
_logger.debug("val = " + repr(val))
raise ValueError("Received a not-straightforwardly-Boolean value. Expected some form of 0, 1, True, or False.")
def _bytecast(val):
"""Casts a value as a byte string. If a character string, assumes a UTF-8 encoding."""
if val is None:
return None
if isinstance(val, bytes):
return val
return _strcast(val).encode("utf-8")
def _intcast(val):
"""Casts input integer or string to integer. Preserves nulls. Balks at everything else."""
if val is None:
return None
if isinstance(val, int):
return val
if isinstance(val, str):
if val[0] == "-":
if val[1:].isdigit():
return int(val)
else:
if val.isdigit():
return int(val)
_logger.debug("val = " + repr(val))
raise ValueError("Received a non-int-castable value. Expected an integer or an integer as a string.")
def _read_differential_annotations(annodict, element, annoset):
"""
Uses the shorthand-to-attribute mappings of annodict to translate attributes of element into annoset.
"""
#_logger.debug("annoset, before: %r." % annoset)
#Start with inverting the dictionary
_d = { annodict[k].replace("delta:",""):k for k in annodict }
#_logger.debug("Inverted dictionary: _d = %r" % _d)
for attr in element.attrib:
#_logger.debug("Looking for differential annotations: %r" % element.attrib)
(ns, an) = _qsplit(attr)
if an in _d and ns == dfxml.XMLNS_DELTA:
#_logger.debug("Found; adding %r." % _d[an])
annoset.add(_d[an])
#_logger.debug("annoset, after: %r." % annoset)
def _qsplit(tagname):
"""Requires string input. Returns namespace and local tag name as a pair. I could've sworn this was a basic implementation gimme, but ET.QName ain't it."""
_typecheck(tagname, str)
if tagname[0] == "{":
i = tagname.rfind("}")
return ( tagname[1:i], tagname[i+1:] )
else:
return (None, tagname)
def _strcast(val):
if val is None:
return None
return str(val)
def _typecheck(obj, classinfo):
if not isinstance(obj, classinfo):
_logger.info("obj = " + repr(obj))
if isinstance(classinfo, tuple):
raise TypeError("Expecting object to be one of the types %r." % (classinfo,))
else:
raise TypeError("Expecting object to be of type %r." % classinfo)
class DFXMLObject(object):
def __init__(self, *args, **kwargs):
self.command_line = kwargs.get("command_line")
self.version = kwargs.get("version")
self.sources = kwargs.get("sources", [])
self.dc = kwargs.get("dc", dict())
self.externals = kwargs.get("externals", OtherNSElementList())
self._namespaces = dict()
self._volumes = []
self._files = []
input_volumes = kwargs.get("volu
|
mes") or []
input_files = kwargs.get("files") or []
for v in input_volumes:
self.append(v)
for f in input_files:
self.append(f)
#Add default namespaces
self.add_namespace("", dfxml.XMLNS_DFXML)
self.add_namespace("dc", dfxml.XMLNS_DC)
def __iter__(self):
"""Yields all VolumeObjects, recursively their FileObjects, and the FileOb
|
jects directly attached to this DFXMLObject, in that order."""
for v in self._volumes:
yield v
for f in v:
yield f
for f in self._files:
yield f
def add_namespace(self, prefix, url):
self._namespaces[prefix] = url
ET.register_namespace(prefix, url)
def append(self, value):
if isinstance(value, VolumeObject):
self._volumes.append(value)
elif isinstance(value, FileObject):
self._files.append(value)
else:
_logger.debug("value = %r" % value)
raise TypeError("Expecting a VolumeObject or a FileObject. Got instead this type: %r." % type(value))
def iter_namespaces(self):
"""Yields (prefix, url) pairs of each namespace registered in this DFXMLObject."""
for prefix in self._namespaces:
yield (prefix, self._namespaces[prefix])
def populate_from_Element(self, e):
if "version" in e.attrib:
self.version = e.attrib["version"]
for ce in e.findall(".//*"):
(cns, cln) = _qsplit(ce.tag)
if cln == "command_line":
self.command_line = ce.text
elif cln == "image_filename":
self.sources.append(ce.text)
elif cns not in [dfxml.XMLNS_DFXML, ""]:
#Put all non-DFXML-namespace elements into the externals list.
self.externals.append(ce)
def print_dfxml(self, output_fh=sys.stdout):
"""Memory-efficient DFXML document printer. However, it assumes the whole element tree is already constructed."""
pe = self.to_partial_Element()
dfxml_wrapper = _ET_tostring(pe)
dfxml_foot = "</dfxml>"
#Check for an empty element
if dfxml_wrapper.strip()[-3:] == " />":
dfxml_head = dfxml_wrapper.strip()[:-3] + ">"
elif dfxml_wrapper.strip()[-2:] == "/>":
dfxml_head = dfxml_wrapper.strip()[:-2] + ">"
else:
dfxml_head = dfxml_wrapper.strip()[:-len(dfxml_foot)]
output_fh.write("""<?xml version="1.0"?>\n""")
output_fh.write(dfxml_head)
output_fh.write("\n")
_logger.debug("Writing %d volume objects." % len(self._volumes))
for v in self._volumes:
v.print_dfxml(output_fh)
output_fh.write("\n")
_logger.debug("Writing %d file objects." % len(self._files))
for f in self._files:
e = f.to_Element()
output_fh.write(_ET_tostring(e))
output_fh.write("\n")
output_fh.write(dfxml_foot)
output_fh.write("\n")
def to_Element(self):
outel = self.to_partial_Element()
for e
|
rubendura/django-rest-framework
|
tests/test_response.py
|
Python
|
bsd-2-clause
| 10,811 | 0.002312 |
from __future__ import unicode_literals
from django.conf.urls import include, url
from django.test import TestCase
from django.utils import six
from rest_framework import generics, routers, serializers, status, viewsets
from rest_framework.renderers import (
BaseRenderer, BrowsableAPIRenderer, JSONRenderer
)
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from tests.models import BasicModel
# Serializer used to test BasicModel
class BasicModelSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
class MockPickleRenderer(BaseRenderer):
media_type = 'application/pickle'
class MockJsonRenderer(BaseRenderer):
media_type = 'application/json'
class MockTextMediaRenderer(BaseRenderer):
media_type = 'text/html'
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
def RENDERER_A_SERIALIZER(x):
return ('Renderer A: %s' % x).encode('ascii')
def RENDERER_B_SERIALIZER(x):
return ('Renderer B: %s' % x).encode('ascii')
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_A_SERIALIZER(data)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_B_SERIALIZER(data)
class RendererC(RendererB):
media_type = 'mock/rendererc'
format = 'formatc'
charset = "rendererc"
class MockView(APIView):
renderer_classes = (RendererA, RendererB, RendererC)
def get(self, request, **kwargs):
return Response(DUMMYCONTENT, status=DUMMYSTATUS)
class MockViewSettingContentType(APIView):
renderer_classes = (RendererA, RendererB, RendererC)
def get(self, request, **kwargs):
return Response(DUMMYCONTENT, status=DUMMYSTATUS, content_type='setbyview')
class HTMLView(APIView):
renderer_classes = (BrowsableAPIRenderer, )
def get(self, request, **kwargs):
return Response('text')
class HTMLView1(APIView):
renderer_classes = (BrowsableAPIRenderer, JSONRenderer)
def get(self, request, **kwargs):
return Response('text')
class HTMLNewModelViewSet(viewsets.ModelViewSet):
serializer_class = BasicModelSerializer
queryset = BasicModel.objects.all()
class HTMLNewModelView(generics.ListCreateAPIView):
renderer_classes = (BrowsableAPIRenderer,)
permission_classes = []
serializer_class = BasicModelSerializer
queryset = BasicModel.objects.all()
new_model_viewset_router = ro
|
uters.DefaultRouter()
new_model_viewset_router.register(r'', HTMLNewModelViewSet)
urlpatterns = [
url(r'
|
^setbyview$', MockViewSettingContentType.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
url(r'^$', MockView.as_view(renderer_classes=[RendererA, RendererB, RendererC])),
url(r'^html$', HTMLView.as_view()),
url(r'^html1$', HTMLView1.as_view()),
url(r'^html_new_model$', HTMLNewModelView.as_view()),
url(r'^html_new_model_viewset', include(new_model_viewset_router.urls)),
url(r'^restframework', include('rest_framework.urls', namespace='rest_framework'))
]
# TODO: Clean tests bellow - remove duplicates with above, better unit testing, ...
class RendererIntegrationTests(TestCase):
"""
End-to-end testing of renderers using an ResponseMixin on a generic view.
"""
urls = 'tests.test_response'
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, six.b(''))
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_accept_query(self):
"""The '_accept' query string should behave in the same way as the Accept header."""
param = '?%s=%s' % (
api_settings.URL_ACCEPT_OVERRIDE,
RendererB.media_type
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
class Issue122Tests(TestCase):
"""
Tests that covers #122.
"""
urls = 'tests.test_response'
|
adamtheturtle/flocker
|
flocker/dockerplugin/test/test_api.py
|
Python
|
apache-2.0
| 6,785 | 0.000147 |
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Tests for the Volumes Plugin API provided by the plugin.
"""
from uuid import uuid4, UUID
from twisted.web.http import OK
from twisted.internet import reactor
from .._api import VolumePlugin, DEFAULT_SIZE
from ...apiclient import FakeFlockerClient, Dataset
from ...control._config import dataset_id_from_name
from ...restapi.testtools import buildIntegrationTests, APIAssertionsMixin
class APITestsMixin(APIAssertionsMixin):
"""
Helpers for writing tests for the Docker Volume Plugin API.
"""
NODE_A = uuid4()
NODE_B = uuid4()
def initialize(self):
"""
Create initial objects for the ``VolumePlugin``.
"""
self.flocker_client = FakeFlockerClient()
def test_pluginactivate(self):
"""
``/Plugins.Activate`` indicates the plugin is a volume driver.
"""
# Docker 1.8, at least, sends "null" as the body. Our test
# infrastructure has the opposite bug so just going to send some
# other garbage as the body (12345) to demonstrate that it's
# ignored as per the spec which declares no body.
return self.assertResult(b"POST", b"/Plugin.Activate", 12345, OK,
{u"Implements": [u"VolumeDriver"]})
def test_remove(self):
"""
``/VolumeDriver.Remove`` returns a successful result.
"""
return self.assertResult(b"POST", b"/VolumeDriver.Remove",
{u"Name": u"vol"}, OK, {u"Err": None})
def test_unmount(self):
"""
``/VolumeDriver.Unmount`` returns a successful result.
"""
return self.assertResult(b"POST", b"/VolumeDriver.Unmount",
{u"Name": u"vol"}, OK, {u"Err": None})
def create(self, name):
"""
Call the ``/VolumeDriver.Create`` API to create a volume with the
given name.
:param unicode name: The name of the volume to create.
:return: ``Deferred`` that fires when the volume that was created.
"""
return self.assertResult(b"POST", b"/VolumeDriver.Create",
{u"Name": name}, OK, {u"Err": None})
def test_create_creates(self):
"""
``/VolumeDriver.Create`` creates a new dataset in the configuration.
"""
name = u"myvol"
d = self.create(name)
d.addCallback(
lambda _: self.flocker_client.list_datasets_configuration())
d.addCallback(self.assertItemsEqual, [
Dataset(dataset_id=UUID(dataset_id_from_name(name)),
primary=self.NODE_A,
maximum_size=DEFAULT_SIZE,
metadata={u"name": name})])
return d
def test_create_duplicate_name(self):
"""
If a dataset with the given name already exists,
``/VolumeDriver.Create`` succeeds without create a new volume.
"""
name = u"thename"
# Create a dataset out-of-band with matching name but non-matching
# dataset ID:
d = self.flocker_client.create_dataset(
self.NODE_A, DEFAULT_SIZE, metadata={u"name": name})
d.addCallback(lambda _: self.create(name))
d.addCallback(
lambda _: self.flocker_client.list_datasets_configuration())
d.addCallback(lambda results: self.assertEqual(len(results), 1))
return d
def test_create_duplicate_name_race_condition(self):
"""
If a dataset with the given name is created while the
``/VolumeDriver.Create`` call is in flight, the call does not
result in an error.
"""
name = u"thename"
# Create a dataset out-of-band with matching dataset ID and name
# which the docker plugin won't be able to see.
def create_after_list():
# Clean up the patched version:
del self.flocker_client.list_datasets_configuration
# But first time we're called, we create dataset and lie about
# its existence:
d = self.flocker_client.create_dataset(
self.NODE_A, DEFAULT_SIZE,
metadata={u"name": name},
dataset_id=UUID(dataset_id_from_name(name)))
d.addCallback(lambda _: [])
return d
self.flocker_client.list_datasets_configuration = create_after_list
return self.create(name)
def test_mount(self):
"""
``/VolumeDriver.Mount`` sets the primary of the dataset with matching
name to the current node and then waits for the dataset to
actually arrive.
"""
name = u"myvol"
dataset_id = UUID(dataset_id_from_name(name))
# Create dataset on a different node:
d = self.flocker_client.create_dataset(
self.NODE_B, DEFAULT_SIZE, metadata={u"name": name},
dataset_id=dataset_id)
# After two polling intervals the dataset arrives as state:
reactor.callLater(VolumePlugin._POLL_INTERVAL,
self.flocker_client.synchronize_state)
d.addCallback(lambda _:
|
self.assertResult(
b"PO
|
ST", b"/VolumeDriver.Mount",
{u"Name": name}, OK,
{u"Err": None,
u"Mountpoint": u"/flocker/{}".format(dataset_id)}))
d.addCallback(lambda _: self.flocker_client.list_datasets_state())
d.addCallback(lambda ds: self.assertEqual(
[self.NODE_A], [d.primary for d in ds
if d.dataset_id == dataset_id]))
return d
def test_path(self):
"""
``/VolumeDriver.Path`` returns the mount path of the given volume.
"""
name = u"myvol"
dataset_id = UUID(dataset_id_from_name(name))
d = self.create(name)
# After a polling interval the dataset arrives as state:
reactor.callLater(VolumePlugin._POLL_INTERVAL,
self.flocker_client.synchronize_state)
d.addCallback(lambda _: self.assertResponseCode(
b"POST", b"/VolumeDriver.Mount", {u"Name": name}, OK))
d.addCallback(lambda _:
self.assertResult(
b"POST", b"/VolumeDriver.Path",
{u"Name": name}, OK,
{u"Err": None,
u"Mountpoint": u"/flocker/{}".format(dataset_id)}))
return d
def _build_app(test):
test.initialize()
return VolumePlugin(reactor, test.flocker_client, test.NODE_A).app
RealTestsAPI, MemoryTestsAPI = buildIntegrationTests(
APITestsMixin, "API", _build_app)
|
kdyq007/cmdb-api
|
core/__init__.py
|
Python
|
gpl-2.0
| 299 | 0.003344 |
# -*- coding:utf-8 -*-
from attribute i
|
mport attribute
from
|
ci_type import citype
from ci_type_relation import cityperelation
from ci_relation import cirelation
from ci import ci
from history import history
from account import account
from special import special
from dns_record import dnsrecord
|
Fokko/incubator-airflow
|
airflow/contrib/sensors/bigquery_sensor.py
|
Python
|
apache-2.0
| 1,138 | 0 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under
|
one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# t
|
o you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.gcp.sensors.bigquery`."""
import warnings
# pylint: disable=unused-import
from airflow.gcp.sensors.bigquery import BigQueryTableSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.gcp.sensors.bigquery`.",
DeprecationWarning, stacklevel=2
)
|
JohnGriffiths/ConWhAt
|
setup.py
|
Python
|
bsd-3-clause
| 1,353 | 0.005913 |
#!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
setup(name='conwhat', #version=versioneer.get_version(),
description='python library for connectome-based white matter atlas analyses in neuroimaging',
long_description='python library for connectome-based white matter atlas analyses in neuroimaging',
|
keywords='white ma
|
tter, tractography, MRI, DTI, diffusion, python',
author='John David Griffiths',
author_email='j.davidgriffiths@gmail.com',
url='https://github.com/JohnGriffiths/conwhat',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=['numpy', 'setuptools'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
entry_points={
"console_scripts": [
"conwhat=conwhat.__main__:main",
]
},
#cmdclass=versioneer.get_cmdclass()
)
|
stweil/letsencrypt
|
certbot-compatibility-test/certbot_compatibility_test/util.py
|
Python
|
apache-2.0
| 1,520 | 0 |
"""Utility functions for Certbot plugin tests."""
import argparse
import copy
import os
import re
import shutil
import tarfile
import josepy as jose
from certbot._internal import constants
from certbot.tests import util as test_util
from certbot_compatibility_test import errors
_KEY_BASE = "rsa2048_key.pem"
KEY_PATH = test_util.vector_path(_KEY_BASE)
KEY = test_util.load_pyopenssl_private_key(_KEY_BASE)
JWK = jose.JWKRSA(key=test_util.load_rsa_private_key(_KEY_BASE))
IP_REGEX = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
def create_le_config(parent_dir):
"""Sets up LE dirs in parent_dir and returns the config dict"""
config = copy.deepcopy(constants.CLI_DEFAULTS)
le_dir = os.path.join(parent_dir, "certbot")
os.mkdir(le_dir)
for dir_name in ("config", "logs", "work"):
full_path = os.path.join(le_dir, dir_name)
os.mkdir(full_path)
full_name = dir_name + "_dir"
config[full_name] = full_path
config["domains"] = None
return argparse.Namespace(**config)
def extract_configs(configs, parent_dir):
"""Extracts configs to a new dir under parent_dir and returns it"""
c
|
onfig_dir = os.path.join(parent_dir, "configs")
if os.path.isdir(configs):
|
shutil.copytree(configs, config_dir, symlinks=True)
elif tarfile.is_tarfile(configs):
with tarfile.open(configs, "r") as tar:
tar.extractall(config_dir)
else:
raise errors.Error("Unknown configurations file type")
return config_dir
|
zjuchenyuan/BioWeb
|
Lib/Bio/NMR/NOEtools.py
|
Python
|
mit
| 3,420 | 0.000877 |
# Copyright 2004 by Bob Bussell. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""NOEtools: For predicting NOE coordinates from assignment data.
The input and output are modelled on nmrview peaklists.
This modules is suitable for directly generating an nmrview
peaklist with predicted crosspeaks directly from the
input assignment peaklist.
"""
from . import xpktools
def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
"""Predict the i->j NOE position based on self peak (diagonal) assignments
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detected
|
Nuc + ".P"] + 1
# Make a list of the data lines involving the detected
if str(toResNum) in peaklist.residue_dict(detectedNuc) \
and str(originResNum) in peaklist.residue_dict(detectedNuc):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
aveDetectedPPM = _col_ave
|
(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol]
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine
def _data_map(labelline):
# Generate a map between datalabels and column number
# based on a labelline
i = 0 # A counter
datamap = {} # The data map dictionary
labelList = labelline.split() # Get the label line
# Get the column number for each label
for i in range(len(labelList)):
datamap[labelList[i]] = i
return datamap
def _col_ave(list, col):
# Compute average values from a particular column in a string list
total = 0.0
n = 0
for element in list:
total += float(element.split()[col])
n += 1
return total / n
|
etherkit/OpenBeacon2
|
macos/venv/lib/python3.8/site-packages/macholib/framework.py
|
Python
|
gpl-3.0
| 1,125 | 0 |
"""
Generic framework path manipulation
"""
import re
__all__ = ["framework_info"]
_STRICT_FRAMEWORK_RE = re.compile(
r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>[-_A-Za-z0-9]+).framework/
(?:Versions/(?P<version>[^/]+)/)?
(?P=shortname)
(?:_(?P<suffix>[^_]+))?
)$
"""
)
def framework_info(filename):
"""
A framework name can take one of the following four forms:
Location/Name.framework/Versions/SomeVersion/Name_Suffix
Location/Name.framework/Versions/SomeVersion/Name
Location/Name.framework/Name_Suffix
Location/Name.framework/Name
returns None if not found, or a mapping equivalent to:
dict(
location='Location',
name='Name.framework/Versions/SomeVersion/Name_Suffix',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present
"""
is_framework = _STRICT_FRAMEWORK_RE.match(filename)
if not is_frame
|
work:
|
return None
return is_framework.groupdict()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.