repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
alexliyu/CDMSYSTEM
|
pyroute2/netns/nslink.py
|
Python
|
mit
| 10,211 | 0 |
'''
NetNS, network namespaces support
=================================
Pyroute2 provides basic network namespaces support. The core
class is `NetNS`.
Please be aware, that in order to run system calls the library
uses `ctypes` module. It can fail on platforms where SELinux
is enforced. If the Python interpreter, loading this module,
dumps the core, one can check the SELinux state with `getenforce`
command.
By default, NetNS creates requested netns, if it doesn't exist,
or uses existing one. To control this behaviour, one can use flags
as for `open(2)` system call::
# create a new netns or fail, if it already exists
netns = NetNS('test', flags=os.O_CREAT | os.O_EXIST)
# create a new netns or use existing one
netns = NetNS('test', flags=os.O_CREAT)
# the same as above, the default behaviour
netns = NetNS('test')
NetNS supports standard IPRoute API, so can be used instead of
IPRoute, e.g., in IPDB::
# start the main network settings database:
ipdb_main = IPDB()
# start the same for a netns:
ipdb_test = IPDB(nl=NetNS('test'))
# create VETH
ipdb_main.create(ifname='v0p0', kind='veth', peer='v0p1').commit()
# move peer VETH into the netns
with ipdb_main.interfaces.v0p1 as veth:
veth.net_ns_fd = 'test'
# please keep in mind, that netns move clears all the settings
# on a VETH interface pair, so one should run netns assignment
# as a separate operation only
# assign addresses
# please notice, that `v0p1` is already in the `test` netns,
# so should be accessed via `ipdb_test`
with ipdb_main.interfaces.v0p0 as veth:
veth.add_ip('172.16.200.1/24')
veth.up()
with ipdb_test.interfaces.v0p1 as veth:
veth.add_ip('172.16.200.2/24')
veth.up()
Please review also the test code, under `tests/test_netns.py` for
more examples.
To remove a network namespace, one can use one of two ways::
# The approach 1)
#
from pyroute2 import NetNS
netns = NetNS('test')
netns.close()
netns.remove()
# The approach 2)
#
from pyroute2.netns import remove
remove('test')
Using NetNS, one should stop it first with `close()`, and only after
that run `remove()`.
classes and functions
---------------------
'''
import os
import errno
import atexit
import select
import struct
import threading
import traceback
from socket import SOL_SOCKET
from socket import SO_RCVBUF
from pyroute2.config import MpPipe
from pyroute2.config import MpProcess
from pyroute2.iproute import IPRoute
from pyroute2.netlink.nlsocket import NetlinkMixin
from pyroute2.netlink.rtnl import IPRSocketMixin
from pyroute2.iproute import IPRouteMixin
from pyroute2.netns import setns
from pyroute2.netns import remove
def NetNServer(netns, rcvch, cmdch, flags=os.O_CREAT):
'''
The netns server supposed to be started automatically by NetNS.
It has two communication channels: one simplex to forward incoming
netlink packets, `rcvch`, and other synchronous duplex to get
commands and send back responses, `cmdch`.
Channels should support standard socket API, should be compatible
with poll/select and should be able to transparently pickle objects.
NetNS uses `multiprocessing.Pipe` for this purpose, but it can be
any other implementation with compatible API.
The first parameter, `netns`, is a netns name. Depending on the
`flags`, the netns can be created automatically. The `flags` semantics
is exactly the same as for `open(2)` system call.
...
The server workflow is simple. The startup sequence::
1. Create or open a netns.
2. Start `IPRoute` instance. It will be used only on the low level,
the `IPRoute` will not parse any packet.
3. Start poll/select loop on `cmdch` and `IPRoute`.
On the startup, the server sends via `cmdch` the status packet. It can be
`None` if all is OK, or some exception.
Further data handling, depending on the channel, server side::
1. `IPRoute`: read an incoming netlink packet and send it unmodified
to the peer via `rcvch`. The peer, polling `rcvch`, can handle
the packet on its side.
2. `cmdch`: read tuple (cmd, argv, kwarg). If the `cmd` starts with
"send", then take `argv[0]` as a packet buffer, treat it as one
netlink packet and substitute PID field (offset 12, uint32) with
its own. Strictly speaking, it is not mandatory for modern netlink
implementations, but it is required by the protocol standard.
'''
try:
nsfd = setns(netns, flags)
except OSError as e:
cmdch.send(e)
return e.errno
except Exception as e:
cmdch.send(OSError(errno.ECOMM, str(e), netns))
return 255
#
try:
ipr = IPRoute()
rcvch_lock = ipr._sproxy.lock
ipr._s_channel = rcvch
poll = select.poll()
poll.register(ipr, select.POLLIN | select.POLLPRI)
poll.register(cmdch, select.POLLIN | select.POLLPRI)
except Exception as e:
cmdch.send(e)
return 255
# all is OK so far
cmdch.send(None)
# 8<-------------------------------------------------------------
while True:
events = poll.poll()
for (fd, event) in events:
if fd == ipr.fileno():
bufsize = ipr.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2
with rcvch_lock:
rcvch.send(ipr.recv(bufsize))
elif fd == cmdch.fileno():
try:
cmdline = cmdch.recv()
if cmdline is None:
poll.unregister(ipr)
poll.unregister(cmdch)
ipr.close()
os.close(nsfd)
return
(cmd, argv, kwarg) = cmdline
if cmd[:4] == 'send':
# Achtung
#
# It's a hack, but we just have to do it: one
# must use actual pid in netlink messages
#
# FIXME: there can be several messages in one
# call buffer; but right now we can ignore it
msg = argv[0][:12]
msg += struct.pack("I", os.getpid())
msg += argv[0][16:]
argv = list(argv)
argv[0] = msg
cmdch.send(getattr(ipr, cmd)(*argv, **kwarg))
except Exception as e:
e.tb = traceback.format_exc()
cmdch.send(e)
class NetNSProxy(object):
netns = 'default'
flags = os.O_CREAT
def __init__(self, *argv, **kwarg):
self.cmdlock = threading.Lock()
self.rcvch, rcvch = MpPipe()
self.cmdch, cmdch = MpPipe()
self.server = MpProcess(target=NetNServer,
args=(self.netns, rcvch, cmdch, self.flags))
self.server.start()
error = self.cmdch.recv()
if error is not None:
self.server.join()
raise error
|
else:
atexit.register(self.close)
def recv(self, bufsize, flags=0):
return self.rcvch.recv()
def close(self):
self.cmdch.send(None)
self.server.join()
def proxy(self, cmd, *argv, **kwarg):
with self.cmdlock:
self.cmdch.s
|
end((cmd, argv, kwarg))
response = self.cmdch.recv()
if isinstance(response, Exception):
raise response
return response
def fileno(self):
return self.rcvch.fileno()
def bind(self, *argv, **kwarg):
if 'async' in kwarg:
kwarg['async'] = False
return self.proxy('bind', *argv, **kwarg)
def send(self, *argv, **kwarg):
return self.proxy('send', *argv, **kwarg)
def sendto(self, *argv, **kwarg):
return self.proxy('sendto', *argv, **kwarg)
def getsockopt(self, *argv, **kwarg):
return self.proxy('getsocko
|
neocogent/electrum
|
electrum/scripts/block_headers.py
|
Python
|
mit
| 955 | 0.003141 |
#!/usr/bin/env python3
# A simple script that connects to a server and displays block headers
import time
import asyncio
from electrum.network import Network
from electrum.util import print_msg, json_encode, create_and_start_event_loop, log_exceptions
# start network
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network()
network.start()
# wait until connected
while not network.is_connected():
time.sleep(1)
print_msg("waiting for network to get connected...")
header_queue = asyncio.Queue()
@log_exceptions
async def f():
try:
await network.interface.session.subscribe('blockchain.headers.subscribe', [], header_queue)
# 3. wait for results
while network.is_connected():
header = await header_q
|
ueue.get()
print_msg(json_encode(header))
finally:
stopping_fut.set_result(1)
# 2.
|
send the subscription
asyncio.run_coroutine_threadsafe(f(), loop)
|
supriyasawant/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/models.py
|
Python
|
agpl-3.0
| 72,774 | 0.007379 |
# imports from python libraries
import os
import hashlib
import datetime
import json
from itertools import chain # Using from_iterable()
# imports from installed packages
from django.contrib.auth.models import User
from django.db import models
from django_mongokit import connection
from django_mongokit import get_database
from django_mongokit.document import DjangoDocument
from mongokit import IS
from mongokit import OR
from mongokit import INDEX_ASCENDING, INDEX_DESCENDING
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
# imports from application folders/files
from gnowsys_ndf.settings import RCS_REPO_DIR
from gnowsys_ndf.settings import RCS_REPO_DIR_HASH_LEVEL
from gnowsys_ndf.settings import MARKUP_LANGUAGE
from gnowsys_ndf.settings import MARKDOWN_EXTENSIONS
from gnowsys_ndf.settings import GSTUDIO_GROUP_AGENCY_TYPES, GSTUDIO_AUTHOR_AGENCY_TYPES
from gnowsys_ndf.settings import META_TYPE
from gnowsys_ndf.ndf.rcslib import RCS
from django.dispatch import receiver
from registration.signals import user_registered
NODE_TYPE_CHOICES = (
('Nodes'),
('Attribute Types'),
('Attributes'),
('Relation Types'),
('Relations'),
('GSystem Types'),
('GSystems'),
('Node Specification'),
('Attribute Specification'),
('Relation Specification'),
('Intersection'),
('Complement'),
('Union'),
('Process Types'),
('Process')
)
TYPES_OF_GROUP = (
('ANONYMOUS'),
('PUBLIC'),
('PRIVATE')
)
EDIT_POLICY = (
('NON_EDITABLE'),
('EDITABLE_MODERATED'),
('EDITABLE_NON_MODERATED')
)
SUBSCRIPTION_POLICY = (
('OPEN'),
('BY_REQUEST'),
('BY_INVITATION'),
)
EXISTANCE_POLICY = (
('ANNOUNCED'),
('NOT_ANNOUNCED')
)
LIST_MEMBER_POLICY = (
('DISCLOSED_TO_MEM'),
('NOT_DISCLOSED_TO_MEM')
)
ENCRYPTION_POLICY = (
('ENCRYPTED'),
('NOT_ENCRYPTED')
)
DATA_TYPE_CHOICES = (
"None",
"bool",
"basestring",
"unicode",
"int",
"float",
"long",
"datetime.datetime",
"list",
"dict",
"ObjectId",
"IS()"
)
my_doc_requirement = u'storing_orignal_doc'
reduced_doc_requirement = u'storing_reduced_doc'
to_reduce_doc_requirement = u'storing_to_be_reduced_doc'
indexed_word_list_requirement = u'storing_indexed_words'
# CUSTOM DATA-TYPE DEFINITIONS
S
|
TATUS_CHOICES_TU = IS(u'DRAFT', u'HID
|
DEN', u'PUBLISHED', u'DELETED')
STATUS_CHOICES = tuple(str(qtc) for qtc in STATUS_CHOICES_TU)
QUIZ_TYPE_CHOICES_TU = IS(u'Short-Response', u'Single-Choice', u'Multiple-Choice')
QUIZ_TYPE_CHOICES = tuple(str(qtc) for qtc in QUIZ_TYPE_CHOICES_TU)
# FRAME CLASS DEFINITIONS
@receiver(user_registered)
def user_registered_handler(sender, user, request, **kwargs):
tmp_hold = node_collection.collection.node_holder()
dict_to_hold = {}
dict_to_hold['node_type'] = 'Author'
dict_to_hold['userid'] = user.id
agency_type = request.POST.get("agency_type", "")
if agency_type:
dict_to_hold['agency_type'] = agency_type
else:
# Set default value for agency_type as "Other"
dict_to_hold['agency_type'] = "Other"
dict_to_hold['group_affiliation'] = request.POST.get("group_affiliation", "")
tmp_hold.details_to_hold = dict_to_hold
tmp_hold.save()
return
@connection.register
class Node(DjangoDocument):
'''Everything is a Node. Other classes should inherit this Node class.
According to the specification of GNOWSYS, all nodes, including
types, metatypes and members of types, edges of nodes, should all
be Nodes.
Member of this class must belong to one of the NODE_TYPE_CHOICES.
Some in-built Edge names (Relation types) are defined in this
class: type_of, member_of, prior_node, post_node, collection_set,
group_set.
type_of is used to express generalization of Node. And member_of
to express its type. This type_of should not be confused with
_type. The latter expresses the Python classes defined in this
program that the object inherits. The former (type_of) is about
the data the application represents.
_type is useful in seggregating the nodes from the mongodb
collection, where all nodes are stored.
prior_node is to express that the current node depends in some way
to another node/s. post_node is seldom used. Currently we use it
to define sub-Group, and to set replies to a post in the Forum App.
Nodes are publisehed in one group or another, or in more than one
group. The groups in which a node is publisehed is expressed in
group_set.
'''
objects = models.Manager()
collection_name = 'Nodes'
structure = {
'_type': unicode, # check required: required field, Possible
# values are to be taken only from the list
# NODE_TYPE_CHOICES
'name': unicode,
'altnames': unicode,
'plural': unicode,
'prior_node': [ObjectId],
'post_node': [ObjectId],
'language': unicode,
'type_of': [ObjectId], # check required: only ObjectIDs of GSystemType
'member_of': [ObjectId], # check required: only ObjectIDs of
# GSystemType for GSystems, or only
# ObjectIDs of MetaTypes for
# GSystemTypes
'access_policy': unicode, # check required: only possible
# values are Public or Private. Why
# is this unicode?
'created_at': datetime.datetime,
'created_by': int, # test required: only ids of Users
'last_update': datetime.datetime,
'modified_by': int, # test required: only ids of Users
'contributors': [int], # test required: set of all ids of
# Users of created_by and modified_by
# fields
'location': [dict], # check required: this dict should be a
# valid GeoJason format
'content': unicode,
'content_org': unicode,
'group_set': [ObjectId], # check required: should not be
# empty. For type nodes it should be
# set to a Factory Group called
# Administration
'collection_set': [ObjectId], # check required: to exclude
# parent nodes as children, use
# MPTT logic
'property_order': [], # Determines the order & grouping in
# which attribute(s)/relation(s) are
# displayed in the form
'start_publication': datetime.datetime,
'tags': [unicode],
'featured': bool,
'url': unicode,
'comment_enabled': bool,
'login_required': bool,
# 'password': basestring,
'status': STATUS_CHOICES_TU,
'rating':[{'score':int,
'user_id':int,
'ip_address':basestring}]
}
required_fields = ['name', '_type'] # 'group_set' to be included
# here after the default
# 'Administration' group is
# ready.
default_values = {'created_at': datetime.datetime.utcnow, 'status': u'DRAFT'}
use_dot_notation = True
########## Setter(@x.setter) & Getter(@property) ##########
@property
def user_details_dict(self):
"""Retrieves names of created-by & modified-by users from the given
node, and appends those to 'user_details' dict-variable
"""
user_details = {}
if self.created_by:
user_details['created_by'] = User.objects.get(pk=self.created_by).username
contributor_names = []
for each_pk in self.contributors:
contributor_names.append(User.objects.get(pk=each_pk).username)
# user_details['modified_by'] = contributor_names
user_detail
|
virtualopensystems/neutron
|
neutron/tests/unit/nec/test_db.py
|
Python
|
apache-2.0
| 7,264 | 0 |
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import contextlib
import random
from neutron.common import constants as q_const
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.db import api as ndb
from neutron.plugins.nec.db import models as nmodels # noqa
from neutron.tests.unit.nec import test_nec_plugin
class NECPluginV2DBTestBase(test_nec_plugin.NecPluginV2TestCase):
"""Class conisting of NECPluginV2 DB unit tests."""
def setUp(self):
"""Setup for tests."""
super(NECPluginV2DBTestBase, self).setUp()
self.session = self.context.session
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test."""
ofc_id = uuidutils.generate_uuid()
neutron_id = uuidutils.generate_uuid()
none = uuidutils.generate_uuid()
return ofc_id, neutron_id, none
@contextlib.contextmanager
def portinfo_random_params(self):
with self.port() as port:
params = {'port_id': port['port']['id'],
'datapath_id': hex(random.randint(0, 0xffffffff)),
'port_no': random.randint(1, 100),
'vlan_id': random.randint(q_const.MIN_VLAN_TAG,
q_const.MAX_VLAN_TAG),
'mac': ':'.join(["%02x" % random.randint(0, 0xff)
for x in range(6)])
|
}
yield params
class NECPluginV2DBOfcMappingTest(NECPluginV2DBTestBase):
def test_add_ofc_item(self):
"""test add OFC item."""
|
o, q, n = self.get_ofc_item_random_params()
tenant = ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
self.assertEqual(tenant.ofc_id, o)
self.assertEqual(tenant.neutron_id, q)
def test_add_ofc_item_duplicate_entry(self):
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
self.assertRaises(nexc.NECDBException,
ndb.add_ofc_item,
self.session, 'ofc_tenant', q, o)
def test_get_ofc_item(self):
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
tenant = ndb.get_ofc_item(self.session, 'ofc_tenant', q)
self.assertEqual(tenant.ofc_id, o)
self.assertEqual(tenant.neutron_id, q)
def test_get_ofc_item_for_nonexisting_entry(self):
self.assertIsNone(
ndb.get_ofc_item(self.session, 'ofc_tenant', 'non-exist-id'))
def test_get_ofc_id(self):
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
tenant_id = ndb.get_ofc_id(self.session, 'ofc_tenant', q)
self.assertEqual(tenant_id, o)
def test_get_ofc_id_for_nonexisting_entry(self):
self.assertRaises(nexc.OFCMappingNotFound,
ndb.get_ofc_id,
self.session, 'ofc_tenant', 'non-exist-id')
def test_exists_ofc_item(self):
o, q, n = self.get_ofc_item_random_params()
self.assertFalse(ndb.exists_ofc_item(self.session, 'ofc_tenant', q))
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
self.assertTrue(ndb.exists_ofc_item(self.session, 'ofc_tenant', q))
ndb.del_ofc_item(self.session, 'ofc_tenant', q)
self.assertFalse(ndb.exists_ofc_item(self.session, 'ofc_tenant', q))
def test_find_ofc_item(self):
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
tenant = ndb.find_ofc_item(self.session, 'ofc_tenant', o)
self.assertEqual(tenant.ofc_id, o)
self.assertEqual(tenant.neutron_id, q)
def test_find_ofc_item_for_nonexisting_entry(self):
self.assertIsNone(
ndb.find_ofc_item(self.session, 'ofc_tenant', 'non-existi-id'))
def test_del_ofc_item(self):
o, q, n = self.get_ofc_item_random_params()
ndb.add_ofc_item(self.session, 'ofc_tenant', q, o)
self.assertTrue(ndb.del_ofc_item(self.session, 'ofc_tenant', q))
self.assertIsNone(ndb.get_ofc_item(self.session, 'ofc_tenant', q))
self.assertIsNone(ndb.find_ofc_item(self.session, 'ofc_tenant', o))
def test_del_ofc_item_for_nonexisting_entry(self):
self.assertFalse(
ndb.del_ofc_item(self.session, 'ofc_tenant', 'non-existi-id'))
class NECPluginV2DBPortInfoTest(NECPluginV2DBTestBase):
def _compare_portinfo(self, portinfo, expected):
self.assertEqual(portinfo.id, expected['port_id'])
self.assertEqual(portinfo.datapath_id, expected['datapath_id'])
self.assertEqual(portinfo.port_no, expected['port_no'])
self.assertEqual(portinfo.vlan_id, expected['vlan_id'])
self.assertEqual(portinfo.mac, expected['mac'])
def _add_portinfo(self, session, params):
return ndb.add_portinfo(session, params['port_id'],
params['datapath_id'], params['port_no'],
params['vlan_id'], params['mac'])
def testd_add_portinfo(self):
"""test add portinfo."""
with self.portinfo_random_params() as params:
portinfo = self._add_portinfo(self.session, params)
self._compare_portinfo(portinfo, params)
exception_raised = False
try:
self._add_portinfo(self.session, params)
except nexc.NECDBException:
exception_raised = True
self.assertTrue(exception_raised)
def teste_get_portinfo(self):
"""test get portinfo."""
with self.portinfo_random_params() as params:
self._add_portinfo(self.session, params)
portinfo = ndb.get_portinfo(self.session, params['port_id'])
self._compare_portinfo(portinfo, params)
nonexist_id = uuidutils.generate_uuid()
portinfo_none = ndb.get_portinfo(self.session, nonexist_id)
self.assertIsNone(portinfo_none)
def testf_del_portinfo(self):
"""test delete portinfo."""
with self.portinfo_random_params() as params:
self._add_portinfo(self.session, params)
portinfo = ndb.get_portinfo(self.session, params['port_id'])
self.assertEqual(portinfo.id, params['port_id'])
ndb.del_portinfo(self.session, params['port_id'])
portinfo_none = ndb.get_portinfo(self.session, params['port_id'])
self.assertIsNone(portinfo_none)
|
kklmn/xrt
|
examples/withRaycing/14_SoftiMAX/__init__.py
|
Python
|
mit
| 16,930 | 0.002662 |
# -*- coding: utf-8 -*-
r"""
.. _SoftiMAX:
SoftiMAX at MAX IV
------------------
The images below are produced by scripts in
``\examples\withRaycing\14_SoftiMAX``.
The beamline will have two branches:
- STXM (Scanning Transmission X-ray Microscopy) and
- CXI (Coherent X-ray Imaging),
see the scheme provided by K. Thånell.
.. imagezoom:: _images/softiMAX_layout.*
STXM branch
~~~~~~~~~~~
.. rubric:: Rays vs. hybrid
The propagation through the first optical elements – from undulator to front
end (FE) slit, to M1, to M2 and to plane grating (PG) – is done with rays:
+------------+------------+------------+------------+
| FE | M1 | M2 | PG |
+============+============+============+============+
| |st_rFE| | |st_rM1| | |st_rM2| | |st_rPG| |
+------------+------------+------------+------------+
.. |st_rFE| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-00-FE.*
.. |st_rM1| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-01-M1local.*
.. |st_rM2| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-02-M2local.*
.. |st_rPG| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-02a-PGlocal.*
:loc: upper-right-corner
Starting from PG – to M3, to exit slit, to Fresnel zone plate (FZP) and to
variously positioned sample screen – the propagation is done by rays or waves,
as compared below. Despite the M3 footprint looks not perfect (not black at
periphery), the field at normal surfaces (exit slit, FZP (not shown) and sample
screen) is of perfect quality. At the best focus, rays and waves result in a
similar image. Notice a micron-sized depth of focus.
+-----------+---------------------+---------------------+
| | rays | wave |
+===========+=====================+=====================+
| M3 | |st_rM3| | |st_hM3| |
+-----------+---------------------+---------------------+
| exit slit | |st_rES| | |st_hES| |
+-----------+---------------------+---------------------+
| sample | |st_rS| | |st_hS| |
+-----------+---------------------+---------------------+
.. |st_rM3| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-03-M3local.*
.. |st_hM3| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-03-M3local.*
:loc: upper-right-corner
.. |st_rES| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-04-ExitSlit.*
.. |st_hES| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-04-ExitSlit.*
:loc: upper-right-corner
.. |st_rS| animation:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hS| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-06i-ExpFocus-Is
:loc: upper-right-corner
.. rubric:: Influence of emittance
Non-zero emittance radiation is treated in xrt by incoherent addition of single
electron intensities. The single electron (filament) fields are considered as
fully coherent and are resulted from filament trajectories (one per repeat)
that attain positional and angular shifts within the given emittance
distribution. The following images are calculated for the exit slit and the
focus screen for zero and non-zero emittance
(for MAX IV 3 GeV ring: ε\ :sub:`x`\ =263 pm·rad,
β\ :sub:`x`\ =9 m, ε\ :sub:`z`\ =8 pm·rad, β\ :sub:`z`\ =2 m). At the real
emittance, the horizontal focal size increases by ~75%. A finite energy band,
as determined by vertical size of the exit slit, results in somewhat bigger
broadening due to a chromatic dependence of the focal length.
+-----------+---------------------+---------------------+---------------------+
| | 0 emittance | real emittance | |refeb| |
+===========+=====================+=====================+=====================+
| exit slit | |st_hESb| | |st_hES2| | |st_hES3| |
+-----------+---------------------+---------------------+---------------------+
| sample | |st_hSb| | |st_hS2| | |st_hS3| |
+-----------+---------------------+---------------------+---------------------+
.. |refeb| replace:: real emittance, finite energy band
.. |st_hESb| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-04-ExitSlit.*
.. |st_hES2| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-04-ExitSlit.*
.. |st_hS2| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hES3| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-wideE-04-ExitSlit.*
:loc: upper-right-corner
.. |st_hSb| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hS3| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-wideE-06i-ExpFocus-Is
:loc: upper-right-corner
.. rubric:: Correction of emittance effects
The increased focal size can be amended by closing the exit slit. With flux
loss of about 2/3, the focal size is almost restored.
+-----------+--------------------+--------------------+
| | 80 µm exit slit | 20 µm exit slit |
+===========+====================+====================+
| exit slit | |st_hES2b| | |st_hES4| |
+-----------+--------------------+--------------------+
| sample | |st_hS2b| | |st_hS4| |
+-----------+--------------------+--------------------+
.. |st_hES2b| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-04-ExitSlit.*
.. |st_hES4| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-025H-04-ExitSlit.*
:loc: upper-right-corner
.. |st_hS2b| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hS4| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-025H-06i-ExpFocus-Is
:loc: upper-right-corner
.. rubric:: Coherence signatures
The beam improvement can also be viewed via the coherence properties by the
four available methods (see :ref:`coh_signs`). As the horizontal exit slit
becomes smaller, one can observe the increase of the coherent fraction ζ and
the increase of the primary (coherent) mode weight. The width of degree of
coherence (DoC) relative to the width of the intensity distribution determines
the coherent beam fraction. Both widths vary with varying screen position
around the focal point such that their ratio is not invariant, so that the
coherent fraction also varies, which is counter-intuitive. An important
advantage of the eigen-mode or PCA methods is a simple definition of the
coherent fraction as the eigenvalue of the zeroth mode (component); this
eigenvalue appears to be invariant around the focal point, see below. Note that
the methods 2 and 3 give equal results. The method 4 that gives the degree of
transverse coherence (DoTC) is also invariant around the focal point, see DoTC
values on the pictures of Principal Components
|
.
+-----------+---------------
|
-----------+--------------------------+
| | 80 µm exit slit | 20 µm exit slit |
+===========+==========================+==========================+
| method 1 | |st_hS80m1| | |st_hS20m1| |
+-----------+--------------------------+--------------------------+
| method 2 | |st_hS80m3| | |st_hS20m3| |
+-----------+--------------------------+--------------------------+
| method 3, | |st_hS80m4| | |st_hS20m4| |
| method 4b | | |
+-----------+--------------------------+--------------------------+
.. |st_hS80m1| animation:: _images/stxm-IDOC-2D-2-hybr-non0e-0enSpread-monoE
.. |st_hS20m1| animation:: _images/stxm-IDOC-2D-2-hybr-non0e-0enSpread-monoE-025H
:loc: upper-right-corner
.. |st_hS80m3| animation:: _images/stxm-Modes-2D-2-hybr-non0e-0enSpread-monoE
.. |st_hS20m3| animation:: _images/stxm-Modes-2D-2-hybr-non0e-0enSpread-monoE-025H
:loc: upper-right-corner
.. |st_hS80m4| animation:: _images/stxm-PCA-2D-2-hybr-non0e-0enSpread-monoE
.. |st_hS20m4| animation:: _images/stxm-PCA-2D-2-hybr-non0e-0enSpread-monoE-025H
:loc: upper-right-corner
CXI branch
~~~~~~~~~~
.. rubric:: 2D vs 1D
Although the sample screen images are of good quality (the dark field is almost
black), the mirror footprints may
|
mdameenh/elysia
|
fplassist/update_database.py
|
Python
|
bsd-3-clause
| 10,609 | 0.010651 |
# -*- coding: utf-8 -*-
import requests
from datetime import datetime
from fplassist.models import Team_Info, Player_Info, Player_Basic_Stats, Player_Detailed_Stats, FPL_Config
def get_data(api_url):
api_response = requests.get(api_url)
try:
api_response.raise_for_status()
api_data = api_response.json()
except:
print("Error: There was an error while requesting the http-api. \
errorcode: %s" % (str(api_response.status_code)))
return False
if api_data:
return api_data
else:
return False
def update_database():
print("\n############----------> FPL Helper Script <----------############\n")
print("Initiating script...\n")
print("Connecting to database...")
print("Identifying next game week...")
event_data = get_data("https://fantasy.premierleague.com/drf/events")
next_gameweek = [gw for gw in event_data if gw["is_next"] == True][0]["id"]
if next_gameweek == None:
print("There was a problem identifying next game week!")
return False
print("Collecting team information...")
team_data = get_data("https://fantasy.premierleague.com/drf/teams")
print("Collecting fixture information...")
fixture_data = get_data("https://fantasy.premierleague.com/drf/fixtures")
for team in team_data:
difficulty = 0
for gameweek in range(next_gameweek, next_gameweek+5):
fixtures = [fixture for fixture in fixture_data if fixture["event"] == gameweek]
for fixture in fixtures:
if fixture["team_h"] == team["id"]:
difficulty += (fixture["team_h_difficulty"] - fixture["team_a_difficulty"])
elif fixture["team_a"] == team["id"]:
difficulty += (fixture["team_a_difficulty"] - fixture["team_h_difficulty"])
t_diff = difficulty/5.0
if t_diff <= -4.0:
f_difficulty = 0
elif t_diff < -1.0:
f_difficulty = 1
elif t_diff < 0.0:
f_difficulty = 2
elif t_diff < 2.0:
f_difficulty = 3
elif t_diff <= 4.0:
f_difficulty = 4
try:
team_entry = Team_Info.objects.get(team_id=team["id"])
team_entry.fixture_difficulty = f_difficulty
except Team_Info.DoesNotExist:
team_entry = Team_Info(team_id=team["id"], team_name=team["name"],
short_name=team["short_name"],
fixture_difficulty=f_difficulty)
team_entry.save()
print("Team and Fixture Difficulty information stored successfully!")
print("Collecting player information...")
player_types = get_data("https://fantasy.premierleague.com/drf/element-types")
print("Collecting player base stats...")
players = get_data("https://fantasy.premierleague.com/drf/elements")
for player in players:
print(player["web_name"])
player_cost = "%.1f" % (int(player["now_cost"])/10.0)
position_long = [pos for pos in player_types if pos["id"] == player["element_type"]][0]["singular_name"]
position_short = [pos for pos in player_types if pos["id"] == player["element_type"]][0]["singular_name_short"]
if not player["news"]:
p_news = "Match Fit!"
else:
p_news = player["news"]
player_deep_cumul = {'influence':0, 'creativity':0, 'threat':0, 'ict_index':0,
'open_play_crosses':0, 'big_chances_created':0, 'clearances_blocks_interceptions':0, 'recoveries':0,
'key_passes':0, 'tackles':0, 'winning_goals':0, 'attempted_passes':0, 'completed_passes':0,
'penalties_conceded':0, 'big_chances_missed':0, 'tackled':0, 'offside':0,
'target_missed':0, 'fouls':0, 'dribbles':0}
player_deep = get_data("https://fantasy.premierleague.com/drf/element-summary/%d" % (player["id"]))["history"]
_points_history = []
_ict_history = []
for deep_stat in player_deep:
_points_history.append(deep_stat["total_points"])
_ict_history.append(deep_stat["ict_index"])
for deep_attr in player_deep_cumul:
player_deep_cumul[deep_attr] += float(deep_stat[deep_attr])
try:
player_info = Player_Info.objects.get(player_id=player["id"])
player_info.team_id, player_info.availability, player_info.news = player["team"], player["status"], p_news
except Player_Info.DoesNotExist:
player_info = Player_Info(player_id=player["id"], player_name=player["web_name"],
pos_short=position_short, pos_long=position_long,
team_id=player["team"], availability=player["status"],
news=p_news, player_photo=player["photo"].split(".")[0])
player_info.save()
try:
player_base_stats = Player_Basic_Stats.objects.get(player_id=player["id"])
player_base_stats.points = player["total_points"]
player_base_stats.minutes = player["minutes"]
player_base_stats.cost = player_cost
player_base_stats.tsb = player["selected_by_percent"]
player_base_stats.ppg = player["points_per_game"]
player_base_stats.goals = player["goals_scored"]
player_base_stats.assists = player["assists"]
player_base_stats.cleansheet = player["clean_sheets"]
player_base_stats.saves = player["saves"]
player_base_stats.bps = player["bps"]
player_base_stats.transfer_in = player["transfers_in_event"]
player_base_stats.transfer_out = player["transfers_out_event"]
player_base_stats.form = player["form"]
except Player_Basic_Stats.DoesNotExist:
player_base_stats = Player_Basic_Stats(player_id=player["id"], points=player["total_points"],
minutes=player["minutes"], cost=player_cost,
tsb=player["selected_by_percent"],
ppg=player["points_per_game"], goals=player["goals_scored"],
assists=player["assists"], cleansheet=player["clean_sheets"],
saves=player["saves"], bps=player["bps"],
transfer_in=player["transfers_in_event"],
transfer_out=player["transfers_out_event"],
form=player["form"])
player_base_stats.save()
try:
player_d
|
etailed = Player_Detailed_Stats.objects.get(player_id=player["id"])
player_detailed.ict_index = player_deep_cumul["ict_index"]
player_detailed.open_play_crosses = player_deep_cumul["open_play_crosses"]
player_detailed.big_chances_created = player_deep_cumul["big_chances_created"]
player_detailed.cleara
|
nces_blocks_interceptions = player_deep_cumul["clearances_blocks_interceptions"]
player_detailed.recoveries = player_deep_cumul["recoveries"]
player_detailed.key_passes = player_deep_cumul["key_passes"]
player_detailed.tackles = player_deep_cumul["tackles"]
player_detailed.winning_goals = player_deep_cumul["winning_goals"]
player_detailed.attempted_passes = player_deep_cumul["attempted_passes"]
player_detailed.completed_passes = player_deep_cumul["completed_passes"]
player_detailed.penalties_conceded = player_deep_cumul["penalties_conceded"]
player_detailed.big_chances_missed = player_deep_cumul["big_chances_missed"]
player_detailed.tackled = player_deep_cumul["tackled"]
player_detailed.offside = player_deep_cumul["offside"]
player_detailed.target_missed = playe
|
JensTimmerman/radical.pilot
|
docs/source/conf.py
|
Python
|
mit
| 12,807 | 0.006637 |
# -*- coding: utf-8 -*-
#
# RADICAL-Pilot documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 3 21:55:42 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import imp
import sys
import os
import radical.utils as ru
import pprint
import subprocess as sp
script_dir = os.path.dirname(os.path.realpath(__file__))
################################################################################
cmd = "git branch | grep '*' | cut -f 2 -d \ " \
+ " | sed -e 's/readthedocs.tutorial/tutorial/g' " \
+ " | sed -e 's/readthedocs/release/g'"
mytag = sp.Popen(cmd, shell=True, stdout=sp.PIPE).stdout.read().strip()
if 'detached' in mytag :
cmd = "git branch | grep '*' | cut -f 2 -d '/' | cut -f 1 -d ')'" \
+ " | sed -e 's/readthedocs.tutorial/tutorial/g' " \
+ " | sed -e 's/readthedocs/release/g'"
mytag = sp.Popen(cmd, shell=True, stdout=sp.PIPE).stdout.read().strip()
tags.add (mytag)
################################################################################
##
print "* Generating resource configuration docs: resources.rst"
print "* using tag: %s" % mytag
try:
os.remove("{0}/resources.rst".format(script_dir))
except OSError:
pass
with open("{0}/resources.rst".format(script_dir), "w") as resources_rst:
resources_rst.write("""
.. _chapter_resources:
List of Pre-Configured Resources
================================
""")
configs = os.listdir("{0}/../../src/radical/pilot/configs/".format(script_dir))
for config in configs:
if config.endswith(".json") is False:
continue # skip all non-python files
if config.startswith("aliases") is True:
continue # skip alias files
print " * %s" % config
try:
json_data = ru.read_json_str("../../src/radical/pilot/configs/%s" % config)
except Exception, ex:
print " * JSON PARSING ERROR: %s" % str(ex)
continue
resources_rst.write("{0}\n".format(config[:-5].upper()))
resources_rst.write("{0}\n\n".format("="*len(config[:-5])))
for host_key, resource_config in json_data.iteritems():
resource_key = "%s.%s" % (config[:-5], host_key)
print " * %s" % resource_key
try:
default_queue = resource_config["default_queue"]
except Exception, ex:
default_queue = None
try:
working_dir = resource_config["default_remote_workdir"]
except Exception, ex:
|
working_dir = "$HOME"
try:
python_interpreter = resource_config["python_interpreter"]
except Exception, ex:
python_interpreter = None
try:
access_schemas = resource_config["schemas"]
except Exception, ex:
access_schemas = ['n/a']
reso
|
urces_rst.write("{0}\n".format(host_key.upper()))
resources_rst.write("{0}\n\n".format("*"*len(host_key)))
resources_rst.write("{0}\n\n".format(resource_config["description"]))
resources_rst.write("* **Resource label** : ``{0}``\n".format(resource_key))
resources_rst.write("* **Raw config** : :download:`{0} <../../src/radical/pilot/configs/{0}>`\n".format(config))
if resource_config["notes"] != "None":
resources_rst.write("* **Note** : {0}\n".format(resource_config["notes"]))
resources_rst.write("* **Default values** for ComputePilotDescription attributes:\n\n")
resources_rst.write(" * ``queue : {0}``\n".format(default_queue))
resources_rst.write(" * ``sandbox : {0}``\n".format(working_dir))
resources_rst.write(" * ``access_schema : {0}``\n\n".format(access_schemas[0]))
resources_rst.write("* **Available schemas** : ``{0}``\n".format(', '.join(access_schemas)))
resources_rst.write("\n")
##
################################################################################
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks']
[extensions]
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
rst_epilog = """
"""
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RADICAL-Pilot'
copyright = u'2014, The RADICAL Group at Rutgers University'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
import radical.pilot
version = radical.pilot.version
release = radical.pilot.version
except Exception as e:
print 'Could not determine version: %s' % e
version = "UNKNOWN"
release = "UNKNOWN"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_themes"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
extlinks = {'issue': ('https://github.com/radical-cybertools/radical.pilot/issues/%s',
'issue ')}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "armstrong"
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapsiblesidebar" : "true",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = N
|
plotly/python-api
|
packages/python/plotly/plotly/validators/funnel/_cliponaxis.py
|
Python
|
mit
| 450 | 0.002222 |
import _plotly_utils.basev
|
alidators
class CliponaxisValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="cliponaxis", parent_name="funnel", **kwargs):
|
super(CliponaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
mytliulei/Scapy
|
scapy/layers/sctp.py
|
Python
|
apache-2.0
| 17,954 | 0.011474 |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## Copyright (C) 6WIND <olivier.matz@6wind.com>
## This program is published under a GPLv2 license
"""
SCTP (Stream Control Transmission Protocol).
"""
import struct
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import IP
from scapy.layers.inet6 import IP6Field
from scapy.layers.inet6 import IPv6
IPPROTO_SCTP=132
# crc32-c (Castagnoli) (crc32c_poly=0x1EDC6F41)
crc32c_table = [
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
]
def crc32c(buf):
|
crc = 0xffffffff
for c in buf:
crc = (crc>>8) ^ crc32c_table[(crc^(ord(c))) & 0xFF]
crc = (~crc) & 0xffffffff
# reverse endianness
return struct.unpack(">I",struct.pack("<I", crc))[0]
# old checksum (RFC2960)
"""
BASE = 65521 # largest prime smaller than 65536
def update_adler32(adler, buf):
s1 = adler & 0xffff
s2 = (adler >> 16) & 0xffff
print s1,s2
for c in buf:
prin
|
t ord(c)
s1 = (s1 + ord(c)) % BASE
s2 = (s2 + s1) % BASE
print s1,s2
return (s2 << 16) + s1
def sctp_checksum(buf):
return update_adler32(1, buf)
"""
sctpchunktypescls = {
0 : "SCTPChunkData",
1 : "SCTPChunkInit",
2 : "SCTPChunkInitAck",
3 : "SCTPChunkSACK",
4 : "SCTPChunkHeartbeatReq",
5 : "SCTPChunkHeartbeatAck",
6 : "SCTPChunkAbort",
7 : "SCTPChunkShutdown",
8 : "SCTPChunkShutdownAck",
9 : "SCTPChunkError",
10 : "SCTPChunkCookieEcho",
11 : "SCTPChunkCookieAck",
14 : "SCTPChunkShutdownComplete",
}
sctpchunktypes = {
0 : "data",
1 : "init",
2 : "init-ack",
3 : "sack",
4 : "heartbeat-req",
5 : "heartbeat-ack",
6 : "abort",
7 : "shutdown",
8 : "shutdown-ack",
9 : "error",
10 : "cookie-echo",
11 : "cookie-ack",
14 : "shutdown-complete",
}
sctpchunkparamtypescls = {
1 : "SCTPChunkParamHearbeatInfo",
5 : "SCTPChunkParamIPv4Addr",
6 : "SCTPChunkParamIPv6Addr",
7 : "SCTPChunkParamStateCookie",
8 : "SCTPChunkParamUnrocognizedParam",
9 : "SCTPChunkParamCookiePreservative",
11 : "SCTPChunkParamHostname",
12 : "SCTPChunkParamSupportedAddrTypes",
32768 : "SCTPChunkParamECNCapable",
49152 : "SCTPChunkParamFwdTSN",
49158 : "SCTPChunkParamAdaptationLayer",
}
sctpchunkparamtypes = {
1 : "heartbeat-info",
5 : "IPv4",
6 : "IPv6",
7 : "state-cookie",
8 : "unrecognized-param",
9 : "cookie-preservative",
11 : "hostname",
12 : "addrtypes",
32768 : "ecn-capable",
49152 : "fwd-tsn-supported",
49158 : "adaptation-layer",
}
############## SCTP header
# Dummy class to guess payload type (variable parameters)
class _SCTPChunkGuessPayload:
def default_payload_class(self,p):
if len(p) < 4:
return conf.padding_layer
else:
t = ord(p[0])
return globals().get(sctpchunktypescls.get(t, "Raw"), conf.raw_layer)
class SCTP(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ShortField("sport", None),
ShortField("dport", None),
XIntField("tag", None),
XIntField("chksum", None), ]
def answers(self, other):
if not isinstance(other, SCTP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.dport) and
(self.dport == other.sport)):
return 0
return 1
def post_build(self, p, pay):
p += pay
if self.chksum is None:
crc = crc32c(str(p))
p = p[:8]+struct.pack(">I", crc)+p[12:]
return p
############## SCTP Chunk variable params
class ChunkParamField(PacketListField):
def __init__(self, name, default, count_from=None, length_from=None):
PacketListField.__init__(self, name, default, conf.raw_layer, count_from=count_from, length_from=length_from)
def m2i(self, p, m):
cls = conf.raw_layer
if len(m) >= 4:
t = ord(m[0]) * 256 + ord(m[1])
cls = globals().get(sctpchunkparamtypescls.get(t, "Raw"), conf.raw_layer)
return cls(m)
# dummy class to avoid Raw() after Chunk params
class _SCTPChunkParam:
def extract_padding(self, s):
return "",s[:]
class SCTPChunkParamHearbeatInfo(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 1, sctpchunkparamtypes),
FieldLenField("len", None, length_of="data",
adjust = lambda pkt,x:x+4),
PadField(StrLenField("data", "",
length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"),]
class SCTPChunkParamIPv4Addr(_SCTPChunkParam, Packet):
fields_desc = [ ShortE
|
netixx/python-tools
|
tools/debugger.py
|
Python
|
apache-2.0
| 674 | 0.004451 |
"""Module to debug python programs"""
import sys
import traceback
def getAllStacks():
code = []
for threadId, stack in sys._current_frames().iteritems():
code.append("\n# ThreadID: %s" % threadId)
for
|
filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename,
lineno, name))
if line:
code.append(" %s" % (line.strip()))
return code
def strStacks():
out = "\n*** STACKTRACE - START ***\n"
out += "\n".join(getAllStacks())
out += "\n*** ST
|
ACKTRACE - END ***\n"
return out
|
mgracer48/panda3d
|
direct/src/showbase/ProfileSession.py
|
Python
|
bsd-3-clause
| 12,549 | 0.002789 |
from panda3d.core import TrueClock
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import (
StdoutCapture, _installProfileCustomFuncs,_removeProfileCustomFuncs,
_getProfileResultFileInfo, _setProfileResultsFileInfo)
import __builtin__
import profile
import pstats
class PercentStats(pstats.Stats):
# prints more useful output when sampled durations are shorter than a millisecond
# lots of this is copied from Python's pstats.py
def setTotalTime(self, tt):
# use this to set 'total time' to base time percentages on
# allows profiles to show timing based on percentages of duration of another profile
self._totalTime = tt
def add(self, *args, **kArgs):
pstats.Stats.add(self, *args, **kArgs)
# DCR -- don't need to record filenames
self.files = []
def print_stats(self, *amount):
for filename in self.files:
print filename
if self.files: print
indent = ' ' * 8
for func in self.top_level:
print indent, func_get_function_name(func)
print indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print "(%d primitive calls)" % self.prim_calls,
# DCR
#print "in %.3f CPU seconds" % self.total_tt
print "in %s CPU milliseconds" % (self.total_tt * 1000.)
if self._totalTime != self.total_tt:
print indent, 'percentages are of %s CPU milliseconds' % (self._totalTime * 1000)
print
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print
# DCR
#print
return self
def f8(self, x):
if self._totalTime == 0.:
# profiling was too quick for clock resolution...
return ' Inf%'
return "%7.2f%%" % ((x*100.) / self._totalTime)
@staticmethod
def func_std_string(func_name): # match what old profile produced
return "%s:%d(%s)" % func_name
def print_line(self, func):
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
# DCR
f8 = self.f8
if nc != cc:
c = c + '/' + str(cc)
print c.rjust(9),
print f8(tt),
if nc == 0:
print ' '*8,
else:
print f8(tt/nc),
print f8(ct),
if cc == 0:
print ' '*8,
else:
|
print f8(ct/cc),
# DCR
#print func_std_string(func)
print PercentStats.func_std_string(func)
class ProfileSession:
# class that encapsulates a profile of a single callable using Python's standard
# 'profile' module
#
# defers formatting of profile results until they are requested
#
# implementation sidesteps memory leak in Python profile module,
# and
|
redirects file output to RAM file for efficiency
TrueClock = TrueClock.getGlobalPtr()
notify = directNotify.newCategory("ProfileSession")
def __init__(self, name, func=None, logAfterProfile=False):
self._func = func
self._name = name
self._logAfterProfile = logAfterProfile
self._filenameBase = 'profileData-%s-%s' % (self._name, id(self))
self._refCount = 0
# if true, accumulate profile results every time we run
# if false, throw out old results every time we run
self._aggregate = False
self._lines = 500
self._sorts = ['cumulative', 'time', 'calls']
self._callInfo = True
self._totalTime = None
self._reset()
self.acquire()
def getReference(self):
# call this when you want to store a new reference to this session that will
# manage its acquire/release reference count independently of an existing reference
self.acquire()
return self
def acquire(self):
self._refCount += 1
def release(self):
self._refCount -= 1
if not self._refCount:
self._destroy()
def _destroy(self):
del self._func
del self._name
del self._filenameBase
del self._filenameCounter
del self._filenames
del self._duration
del self._filename2ramFile
del self._resultCache
del self._successfulProfiles
def _reset(self):
self._filenameCounter = 0
self._filenames = []
# index of next file to be added to stats object
self._statFileCounter = 0
self._successfulProfiles = 0
self._duration = None
self._filename2ramFile = {}
self._stats = None
self._resultCache = {}
def _getNextFilename(self):
filename = '%s-%s' % (self._filenameBase, self._filenameCounter)
self._filenameCounter += 1
return filename
def run(self):
# make sure this instance doesn't get destroyed inside self._func
self.acquire()
if not self._aggregate:
self._reset()
# if we're already profiling, just run the func and don't profile
if 'globalProfileSessionFunc' in __builtin__.__dict__:
self.notify.warning('could not profile %s' % self._func)
result = self._func()
if self._duration is None:
self._duration = 0.
else:
# put the function in the global namespace so that profile can find it
assert hasattr(self._func, '__call__')
__builtin__.globalProfileSessionFunc = self._func
__builtin__.globalProfileSessionResult = [None]
# set up the RAM file
self._filenames.append(self._getNextFilename())
filename = self._filenames[-1]
_installProfileCustomFuncs(filename)
# do the profiling
Profile = profile.Profile
statement = 'globalProfileSessionResult[0]=globalProfileSessionFunc()'
sort = -1
retVal = None
# this is based on profile.run, the code is replicated here to allow us to
# eliminate a memory leak
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
# this has to be run immediately after profiling for the timings to be accurate
# tell the Profile object to generate output to the RAM file
prof.dump_stats(filename)
# eliminate the memory leak
del prof.dispatcher
# store the RAM file for later
profData = _getProfileResultFileInfo(filename)
self._filename2ramFile[filename] = profData
# calculate the duration (this is dependent on the internal Python profile data format.
# see profile.py and pstats.py, this was copied from pstats.Stats.strip_dirs)
maxTime = 0.
for cc, nc, tt, ct, callers in profData[1].itervalues():
if ct > maxTime:
maxTime = ct
self._duration = maxTime
# clean up the RAM file support
_removeProfileCustomFuncs(filename)
# clean up the globals
result = globalProfileSessionResult[0]
del __builtin__.__dict__['globalProfileSessionFunc']
del __builtin__.__dict__['globalProfileSessionResult']
self._successfulProfiles += 1
if self._logAfterProfile:
self.notify.info(self.getResults())
self.release()
return result
def getDuration(self):
return self._duration
def profileSucceeded(self):
return self._successfulProfiles > 0
def _restoreRamFile(self, filename):
# set up the RAM file
_installProfileCustomFuncs(filename)
# install the stored RAM file from self.run()
_setProfileResultsFileInfo(filename, self._filename2ramFile[filename])
def _discardRamFile(self, filename):
# take down the RAM file
_removeProfileCustomFuncs(filename)
# and
|
KeyWeeUsr/plyer
|
plyer/platforms/macosx/filechooser.py
|
Python
|
mit
| 3,427 | 0 |
'''
Mac OS X file chooser
---------------------
'''
from plyer.facades import FileChooser
from pyobjus import autoclass, objc_arr, objc_str
from pyobjus.dylib_manager import load_framework, INCLUDE
load_framework(INCLUDE.AppKit)
NSURL = autoclass('NSURL')
NSOpenPanel = autoclass('NSOpenPanel')
NSSavePanel = autoclass('NSSavePanel')
NSOKButton = 1
class MacFileChooser(object):
'''A native implementation of file chooser dialogs using Apple's API
through pyobjus.
Not implemented features:
* filters (partial, wildcards are converted to extensions if possible.
Pass the Mac-specific "use_extensions" if you can provide
Mac OS X-compatible to avoid automatic conversion)
* multiple (only for save dialog. Available in open dialog)
* icon
* preview
'''
mode = "open"
path = None
multiple = False
filters = []
preview = False
title = None
icon = None
show_hidden = False
use_extensions = False
def __init__(self, **kwargs):
# Simulate Kivy's behavior
for i in kwargs:
setattr(self, i, kwargs[i])
def run(self):
panel = None
if self.mode in ("open", "dir"):
panel = NSOpenPanel.openPanel()
else:
panel = NSSavePanel.savePanel()
panel.setCanCreateDirectories_(True)
panel.setCanChooseDirectories_(self.mode == "dir")
panel.setCanChooseFiles_(self.mode != "dir")
panel.setShowsHiddenFiles_(self.show_hidden)
if self.title:
panel.setTitle_(objc_str(self.title))
|
if self.mode != "save" and self.multiple:
panel.setAllowsMultipleSelection_(True)
# Mac OS X does not support wildcards unlike the other platforms.
# This tries to convert wildcards to "extensions" when possible,
# ans sets the panel to also allow o
|
ther file types, just to be safe.
if len(self.filters) > 0:
filthies = []
for f in self.filters:
if type(f) == str:
if not self.use_extensions:
if f.strip().endswith("*"):
continue
pystr = f.strip().split("*")[-1].split(".")[-1]
filthies.append(objc_str(pystr))
else:
for _ in f[1:]:
if not self.use_extensions:
if f.strip().endswith("*"):
continue
pystr = f.strip().split("*")[-1].split(".")[-1]
filthies.append(objc_str(pystr))
ftypes_arr = objc_arr(filthies)
panel.setAllowedFileTypes_(ftypes_arr)
panel.setAllowsOtherFileTypes_(not self.use_extensions)
if self.path:
url = NSURL.fileURLWithPath_(self.path)
panel.setDirectoryURL_(url)
if panel.runModal():
if self.mode == "save" or not self.multiple:
return [panel.filename().UTF8String()]
else:
return [i.UTF8String() for i in panel.filenames()]
return None
class MacOSXFileChooser(FileChooser):
'''FileChooser implementation for Windows, using win3all.
'''
def _file_selection_dialog(self, **kwargs):
return MacFileChooser(**kwargs).run()
def instance():
return MacOSXFileChooser()
|
dkasak/notify-osd-customizable
|
examples/summary-body.py
|
Python
|
gpl-3.0
| 3,917 | 0.032678 |
#!/usr/bin/python
################################################################################
##3456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789
## 10 20 30 40 50 60 70 80
##
## Info:
## Example of how to use libnotify correctly and at the same time comply to
## the new jaunty notification spec (read: visual guidelines)
##
## Run:
## chmod +x summary-body.py
## ./summary-body.py
##
## Copyright 2009 Canonical Ltd.
##
## Author:
## Mirco "MacSlow" Mueller <mirco.mueller@canonical.com>
##
## This program is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License version 3, as published
## by the Free Software Foundation.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranties of
## MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
## PURPOSE. See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import sys
import pynotify
# even in Python this is globally nasty :), do something nicer in your own code
capabilities = {'actions': False,
'body': False,
'body-hyperlinks': False,
'body-images': False,
'body-markup': False,
'icon-multi': False,
'icon-static': False,
'sound': False,
'image/svg+xml': False,
'x-canonical-private-synchronous': False,
'x-canonical-append': False,
'x-canonical-private-icon-only': False,
'x-canonical-truncation': False}
def initCaps ():
caps = pynotify.get_server_caps ()
if caps is None:
print "Failed to receive server caps."
sys.exit (1)
for cap in caps:
capabilities[cap] = True
def printCaps ():
info = pynotify.get_server_info ()
print "Name: " + info["name"]
print "Vendor: " + info["vendor"]
print "Version: " + info["version"]
print "Spec. Version: " + info["spec-version"]
caps = pynotify.get_server_caps ()
if caps is None:
print "Failed to receive server caps."
sys.exit (1)
print "Supported capabilities/hints:"
if capabilities['actions']:
print "\tactions"
if capabilities['body']:
print "\tbody"
if capabilities['body-hyperlinks']:
print "\tbody-hyperlinks"
if capabilities['body-images']:
print "\tbody-images"
if capabilities['body-markup']:
print "\tbody-markup"
if capabilities['icon-multi']:
print "\ticon-multi"
if capabilities['icon-static']:
pri
|
nt "\ticon-static"
if capabilities['sound']:
print "\tsound"
if capabilities['image/svg+xml']:
print "\timage/svg+xml"
if capabilities['x-canonical-private-synchronous']:
print "\tx-canonical-private-synchronous"
if capabilities['x-canonical-append']:
print "\tx-canonical-append"
if capabilities['x-canonical-privat
|
e-icon-only']:
print "\tx-canonical-private-icon-only"
if capabilities['x-canonical-truncation']:
print "\tx-canonical-truncation"
print "Notes:"
if info["name"] == "notify-osd":
print "\tx- and y-coordinates hints are ignored"
print "\texpire-timeout is ignored"
print "\tbody-markup is accepted but filtered"
else:
print "\tnone"
if __name__ == '__main__':
if not pynotify.init ("summary-body"):
sys.exit (1)
# call this so we can savely use capabilities dictionary later
initCaps ()
# show what's supported
printCaps ()
# try the summary-body case
n = pynotify.Notification ("Totem",
"This is a superfluous notification")
n.show ()
|
sanguinariojoe/FreeCAD
|
src/Mod/OpenSCAD/InitGui.py
|
Python
|
lgpl-2.1
| 5,293 | 0.013603 |
# OpenSCAD gui init module
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
import FreeCAD
class OpenSCADWorkbench ( Workbench ):
"OpenSCAD workbench object"
def __init__(self):
self.__class__.Icon = FreeCAD.getResourceDir() + "Mod/OpenSCAD/Resources/icons/OpenSCADWorkbench.svg"
self.__class__.MenuText = "OpenSCAD"
self.__class__.ToolTip = (
"OpenSCAD is an application for creating solid 3D CAD.\n"
"FreeCAD utizes OpenSCAD's capability as a script-only based modeller that uses its own description language\n"
"Note: the Mesh workbench heavily uses the boolean operations of this workbench because they are quite robust"
)
def Initialize(self):
def QT_TRANSLATE_NOOP(scope, text):
return text
import OpenSCAD_rc,OpenSCADCommands
commands = ['OpenSCAD_ReplaceObject','OpenSCAD_RemoveSubtree',
'OpenSCAD_RefineShapeFeature','OpenSCAD_MirrorMeshFeature',
'OpenSCAD_ScaleMeshFeature'
|
,'OpenSCAD_ResizeMeshFeature','OpenSCAD_IncreaseToleranceFeature',
'OpenSCAD_Edgestofaces', 'OpenSCAD_ExpandPlacements','OpenSCAD_ExplodeGroup']
toolbarcommands = ['OpenSCAD_ReplaceObject','OpenSCAD_RemoveSubtree',
'OpenSCAD_
|
ExplodeGroup','OpenSCAD_RefineShapeFeature',
'OpenSCAD_IncreaseToleranceFeature']
import PartGui
parttoolbarcommands = ['Part_CheckGeometry','Part_Primitives',
'Part_Builder','Part_Cut','Part_Fuse','Part_Common',
'Part_Extrude','Part_Revolve']
import FreeCAD
param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/OpenSCAD")
openscadfilename = param.GetString('openscadexecutable')
if not openscadfilename:
import OpenSCADUtils
openscadfilename = OpenSCADUtils.searchforopenscadexe()
if openscadfilename: #automatic search was succsessful
FreeCAD.addImportType("OpenSCAD Format (*.scad)","importCSG")
param.SetString('openscadexecutable',openscadfilename) #save the result
if openscadfilename:
commands.extend(['OpenSCAD_AddOpenSCADElement', 'OpenSCAD_MeshBoolean',
'OpenSCAD_Hull','OpenSCAD_Minkowski'])
toolbarcommands.extend(['OpenSCAD_AddOpenSCADElement', 'OpenSCAD_MeshBoolean',
'OpenSCAD_Hull','OpenSCAD_Minkowski'])
else:
FreeCAD.Console.PrintWarning('OpenSCAD executable not found\n')
self.appendToolbar(QT_TRANSLATE_NOOP('Workbench','OpenSCADTools'),toolbarcommands)
self.appendMenu('OpenSCAD',commands)
self.appendToolbar(QT_TRANSLATE_NOOP('Workbech','OpenSCAD Part tools'),parttoolbarcommands)
#self.appendMenu('OpenSCAD',["AddOpenSCADElement"])
###self.appendCommandbar("&Generic Tools",["ColorCodeShape"])
FreeCADGui.addIconPath(":/icons")
FreeCADGui.addLanguagePath(":/translations")
FreeCADGui.addPreferencePage(":/ui/openscadprefs-base.ui","OpenSCAD")
def GetClassName(self):
return "Gui::PythonWorkbench"
Gui.addWorkbench(OpenSCADWorkbench())
# Not all of the GUI tests will require an OpenSCAD binary (CSG import and export don't)
FreeCAD.__unit_test__ += ["TestOpenSCADGui"]
|
bkahlert/seqan-research
|
raw/workshop11/workshop2011-data-20110925/trunk/misc/seqan_instrumentation/py2exe/dist/classes/__init__.py
|
Python
|
mit
| 81 | 0 |
__a
|
ll__ = [
'diff', 'dirs', 'flushfile', 'id',
'stats', 'sy
|
nc'
]
|
google/struct2tensor
|
struct2tensor/prensor_value.py
|
Python
|
apache-2.0
| 9,379 | 0.00853 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A concrete prensor tree.
PrensorValue represents a tree where all the nodes are represented as ndarrays,
instead of tensors.
prensor = ...
assert isinstance(prensor, struct2tensor.Prensor)
with tf.Session() as sess:
pren
|
sor_value = sess.run(prensor)
assert isinstance(prensor_value, struct2tensor.PrensorValue)
"""
import collections
from typing import FrozenSet, Iterator, Mapping, Optional, Sequence, Union
import numpy as np
from struct2tensor import path
from struct2tensor import prensor
import te
|
nsorflow as tf
from tensorflow.python.client import session as session_lib # pylint: disable=g-direct-tensorflow-import
class RootNodeValue(object):
"""The value of the root."""
__slots__ = ["_size"]
def __init__(self, size: np.int64):
"""Creates a root node.
Args:
size: how many root objects there are.
"""
self._size = size
@property
def size(self):
return self._size
@property
def is_repeated(self):
return True
def schema_string(self):
return "repeated"
def data_string(self):
return "size: {}".format(self._size)
def __str__(self):
return "RootNode"
class ChildNodeValue(object):
"""The value of an intermediate node."""
__slots__ = ["_parent_index", "_is_repeated"]
def __init__(self, parent_index: np.ndarray, is_repeated: bool):
"""Creates a child node.
Args:
parent_index: a 1-D int64 ndarray where parent_index[i] represents the
parent index of the ith child.
is_repeated: a bool indicating if there can be more than one child per
parent.
"""
self._parent_index = parent_index
self._is_repeated = is_repeated
@property
def size(self):
"""Returns the size, as if this was the root prensor.
Returns:
A 1-D ndarray of size 1.
"""
return tf.shape(self.parent_index, out_type=tf.int64)
@property
def parent_index(self):
return self._parent_index
@property
def is_repeated(self):
return self._is_repeated
def schema_string(self) -> str:
return "repeated" if self.is_repeated else "optional"
def data_string(self):
return "parent_index: {}".format(self._parent_index)
def __str__(self):
return "ChildNode {} {}".format(self.schema_string(), self.data_string())
class LeafNodeValue(object):
"""The value of a leaf node."""
__slots__ = ["_parent_index", "_values", "_is_repeated"]
def __init__(self, parent_index: np.ndarray, values: np.ndarray,
is_repeated: bool):
"""Creates a leaf node.
Args:
parent_index: a 1-D int64 ndarray where parent_index[i] represents the
parent index of values[i]
values: a 1-D ndarray of equal length to parent_index.
is_repeated: a bool indicating if there can be more than one child per
parent.
"""
self._parent_index = parent_index
self._values = values
self._is_repeated = is_repeated
@property
def parent_index(self):
return self._parent_index
@property
def is_repeated(self):
return self._is_repeated
@property
def values(self):
return self._values
def data_string(self):
return "parent_index: {} values: {}".format(self._parent_index,
self._values)
def schema_string(self) -> str:
return u"{} {}".format("repeated" if self.is_repeated else "optional",
str(self.values.dtype))
def __str__(self):
return "{} {}".format("repeated" if self.is_repeated else "optional",
str(self.values.dtype))
NodeValue = Union[RootNodeValue, ChildNodeValue, LeafNodeValue] # pylint: disable=invalid-name
class PrensorValue(object):
"""A tree of NodeValue objects."""
__slots__ = ["_node", "_children"]
def __init__(self, node: NodeValue,
children: "collections.OrderedDict[path.Step, PrensorValue]"):
"""Construct a PrensorValue.
Do not call directly, instead call materialize(...) below.
Args:
node: the NodeValue of the root.
children: a map from edge to subtree.
"""
self._node = node
self._children = children
# TODO(martinz): This could be Value.
@property
def node(self) -> NodeValue:
"""The node of the root of the subtree."""
return self._node
def get_child(self, field_name: path.Step) -> Optional["PrensorValue"]:
"""Gets the child at field_name."""
return self._children.get(field_name)
def is_leaf(self) -> bool:
"""True iff the node value is a LeafNodeValue."""
return isinstance(self._node, LeafNodeValue)
def get_child_or_error(self, field_name: path.Step) -> "PrensorValue":
"""Gets the child at field_name."""
result = self._children.get(field_name)
if result is not None:
return result
raise ValueError("Field not found: {}".format(str(field_name)))
def get_descendant(self, p: path.Path) -> Optional["PrensorValue"]:
"""Finds the descendant at the path."""
result = self
for field_name in p.field_list:
result = result.get_child(field_name)
if result is None:
return None
return result
def get_descendant_or_error(self, p: path.Path) -> "PrensorValue":
"""Finds the descendant at the path."""
result = self.get_descendant(p)
if result is None:
raise ValueError("Missing path: {}".format(str(p)))
return result
def get_children(self) -> Mapping[path.Step, "PrensorValue"]:
"""A map from field name to subtree."""
return self._children
def get_descendants(self) -> Mapping[path.Path, "PrensorValue"]:
"""A map from paths to all subtrees."""
result = {path.Path([]): self}
for k, v in self._children.items():
subtree_descendants = v.get_descendants()
for k2, v2 in subtree_descendants.items():
result[path.Path([k]).concat(k2)] = v2
return result
def field_names(self) -> FrozenSet[path.Step]:
"""Returns the field names of the children."""
return frozenset(self._children.keys())
def _string_helper(self, field_name: str) -> Sequence[str]:
"""Helper for __str__ that outputs a list of lines."""
result = [
"{} {} {}".format(self.node.schema_string(), str(field_name),
self.node.data_string())
]
for k, v in self._children.items():
recursive = v._string_helper(k) # pylint: disable=protected-access
result.extend([" {}".format(x) for x in recursive])
return result
def _schema_string_helper(self, field_name: str) -> Sequence[str]:
"""Helper for __str__ that outputs a list of lines."""
result = [u"{} {}".format(self.node.schema_string(), str(field_name))]
for k, v in self._children.items():
recursive = v._string_helper(k) # pylint: disable=protected-access
result.extend([u" {}".format(x) for x in recursive])
return result
def schema_string(self):
"""Returns a string representing the schema of the Prensor."""
return u"\n".join(self._schema_string_helper(""))
def __str__(self):
"""Returns a string representing the schema of the Prensor."""
return "\n".join(self._string_helper(""))
def _prensor_value_from_type_spec_and_component_values(
prensor_type_spec: prensor._PrensorTypeSpec,
component_values: Iterator[Union[int, np.ndarray]]) -> PrensorValue:
"""Creates a PrensorValue from a _PrensorTypeSpec and components."""
# pylint: disable=protected-access
if prensor_type_spec._node_type == prensor_type_spec._NodeType.ROOT:
node = RootNodeValue(next(component_values))
elif prensor_type_spec._node_type == prensor
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Plot/DirectvsRNN.py
|
Python
|
mit
| 1,198 | 0.040902 |
'''
Copyleft Oct 24, 2015 Arya Iranmehr, PhD Student, Bafna's Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import pandas as pd
import pylab as plt
import matplotlib as mpl
from matplotlib.cm import *
import os,sys;home=os.path.expanduser('~') +'/'
mpl.rc('font', **{'family
|
': 'serif', 'serif': ['Computer Modern'], 'size':20}) ;
mpl.rc('text', usetex=True)
x=np.arange(0,1,1e-5)[1:-1]
s=0.01
def sig(z): return 1./(1+np.exp(-z))
fig=plt.figure
|
(figsize=(30,10), dpi=100)
fig.hold(True)
y_appr=np.log(x)- np.log(1-x)
y=np.log(x)- (1+s)*np.log(1-x)
x.shape
df=pd.DataFrame([x,y,y_appr],index=['x','y','z']).T
plt.subplot(1,2,1)
plt.plot(y_appr,x, color='red',linewidth=2, label='$\sigma(st/2-c)$')
plt.plot(y,x, color='blue',linewidth=2,label='$x_t$')
plt.xlim([-5,5]);plt.legend(loc='upper left')
plt.ylabel('$x_t$')
plt.xlabel('$st/2-c$')
plt.grid()
plt.subplot(1,2,2)
print (y_appr-y)
plt.plot(y,sig(y)-x, linewidth=2,label='Error');plt.legend(loc='upper left')
plt.ylabel('$|x_t-\sigma(st/2-c)|$')
plt.xlabel('$st/2-c$')
plt.xlim([-5,10])
plt.grid()
plt.suptitle('Approximation vs Exact Value of $x_t$ for $s=${}'.format(s))
# plt.savefig(home+'out/vineet/plots/apprx.png')
plt.show()
|
ParadropLabs/Paradrop
|
paradrop/daemon/paradrop/core/chute/chute_storage.py
|
Python
|
apache-2.0
| 3,342 | 0.001795 |
from __future__ import print_function
###################################################################
# Copyright 2013-2017 All Rights Reserved
# Authors: The Paradrop Team
###################################################################
import sys
from paradrop.base import settings
from paradrop.lib.utils.pd_storage import PDStorage
from .chute import Chute
class ChuteStorage(PDStorage):
"""
ChuteStorage class.
This class holds onto the list of Chutes on this AP.
It implements the PDStorage class which allows us to save the chuteList to disk transparently
"""
# Class variable of chute list so all instances see the same thing
chuteList = dict()
def __init__(self, filename=None, save_timer=settings.FC_CHUTESTORAGE_SAVE_TIMER):
if(not filename):
filename = settings.FC_CHUTESTORAGE_FILE
PDStorage.__init__(self, filename, save_timer)
# Has it been loaded?
if(len(ChuteStorage.chuteList) == 0):
self.loadFromDisk()
def setAttr(self, attr):
"""Save our attr however we want (as class variable for all to see)"""
ChuteStorage.chuteList = attr
def getAttr(self):
"""Get our attr (as class variable for all to see)"""
return ChuteStorage.chuteList
def getChuteList(self):
"""Return a list of the names of the chutes we know of."""
return ChuteStorage.chuteList.values()
def getChute(self, name):
"""Returns a reference to a chute we have in our cache, or None."""
return ChuteStorage.chuteList.get(name, None)
def deleteChute(self, ch):
"""Deletes a chute from the chute storage. Can be sent the chute object, or the chute name."""
if (isinstance(ch, Chute)):
del ChuteStorage.chuteList[ch.name]
else:
del ChuteStorage.chuteList[ch]
self.saveToDisk()
def saveChute(self, ch):
"""
Saves the chute provided in our internal chuteList.
|
Also since we just received a new chute to hold onto we should save our ChuteList to disk.
"""
# check if there is a version of the chute already
oldch = ChuteStorage.chuteList.get(ch.name, None)
if(oldch != None):
# we should merge these chutes so we don't lose any data
oldch.__dict__.update(ch.__dict__)
# TODO: do we need to deal with cache separate? Old code we did
else:
|
ChuteStorage.chuteList[ch.name] = ch
self.saveToDisk()
def clearChuteStorage(self):
ChuteStorage.chuteList.clear()
self.saveToDisk()
#
# Functions we override to implement PDStorage Properly
#
def attrSaveable(self):
"""Returns True if we should save the ChuteList, otherwise False."""
return (type(ChuteStorage.chuteList) == dict)
@classmethod
def get_chute(cls, name):
return cls.chuteList[name]
if(__name__ == '__main__'): # pragma: no cover
def usage():
print('Usage: $0 -ls : print chute storage details')
exit(0)
try:
if(sys.argv[1] != '-ls'):
usage()
except Exception as e:
print(e)
usage()
cs = ChuteStorage()
chutes = cs.getChuteList()
for ch in chutes:
print(ch)
|
xuweiliang/Codelibrary
|
openstack_dashboard/dashboards/admin/networks/agents/tables.py
|
Python
|
apache-2.0
| 3,868 | 0.001551 |
# Copyright 2014 Kylincloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from horizon.utils import filters as utils_filters
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class DeleteDHCPAgent(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete DHCP Agent",
u"Delete DHCP Agents",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted DHCP Agent",
u"Deleted DHCP Agents",
count
)
policy_rules = (("network", "delete_agent"),)
def delete(self, request, obj_id):
network_id = self.table.kwargs['network_id']
network_info = api.neutron.network_get(request, network_id)
try:
api.neutron.remove_network_from_dhcp_agent(request, obj_id,
network_id)
api.nova.systemlogs_create(request,
|
network_info.name,
record_action.DELETEAGENT)
except Exception as e:
msg = _('Failed to delete agent: %s') % e
LOG.info(msg)
api.nova.systemlogs_create(request, network_info.name,
|
record_action.DELETEAGENT, result=False, detail=msg)
redirect = reverse('horizon:admin:networks:detail',
args=[network_id])
exceptions.handle(request, msg, redirect=redirect)
class AddDHCPAgent(tables.LinkAction):
name = "add"
verbose_name = _("Add DHCP Agent")
url = "horizon:admin:networks:adddhcpagent"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "update_agent"),)
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
def get_agent_status(agent):
if agent.admin_state_up:
return _('Enabled')
return _('Disabled')
def get_agent_state(agent):
if agent.alive:
return _('Up')
return _('Down')
class DHCPAgentsFilterAction(tables.FilterAction):
name = "agents"
class DHCPAgentsTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('ID'), hidden=True)
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_agent_status, verbose_name=_('Status'))
state = tables.Column(get_agent_state, verbose_name=_('Admin State'))
heartbeat_timestamp = tables.Column('heartbeat_timestamp',
verbose_name=_('Updated At'),
filters=(utils_filters.parse_isotime,
filters.timesince))
class Meta(object):
name = "agents"
verbose_name = _("DHCP Agents")
table_actions = (AddDHCPAgent, DeleteDHCPAgent,
DHCPAgentsFilterAction,)
row_actions = (DeleteDHCPAgent,)
hidden_title = False
|
xyproto/gosignal
|
pyo/examples/effects/03_detuned_waveguides.py
|
Python
|
gpl-3.0
| 513 | 0.02729 |
#!/usr/bin/env python
# encoding: utf-8
"""
Detuned waveguide bank.
"""
from py
|
o import *
import random
s = Server(sr=44100, nchnls=2, buffersize=512, duplex=0).boot()
src = SfPlayer("../snds/ounkmaster.aif", loop=True, mul=.1)
lf = Sine(freq=[random.uniform(.005, .015) for i in range(8)],
mul=[.02,.04,.06,.08,.1,.12,.14,.16],
add=[50,100,150,200,250,300,350,400])
lf2 = Sine(.005, mul=.2, add=.7)
det_wg =
|
AllpassWG(src, freq=lf, feed=.999, detune=lf2, mul=.25).out()
s.gui(locals())
|
xfouloux/Flexget
|
flexget/plugins/input/sonarr.py
|
Python
|
mit
| 5,907 | 0.00237 |
from __future__ import unicode_literals, division, absolute_import
from urlparse import urlparse
import logging
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('sonarr')
class Sonarr(object):
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_ended': {'type': 'boolean', 'default': True},
'only_monitored': {'type': 'boolean', 'default': True},
'include_data': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
def on_task_input(self, task, config):
"""
This plugin returns ALL of the shows monitored by Sonarr.
Return ended shows by default and does not return unmonitored
show by default.
Syntax:
sonarr:
base_url=<value>
port=<value>
api_key=<value>
include_ended=<yes|no>
only_monitored=<yes|no>
include_data=<yes|no>
Options base_url and api_key are required.
Use with input plugin like discover and/or cofnigure_series.
Example:
download-tv-task:
configure_series:
settings:
quality:
- 720p
from:
sonarr:
base_url: http://localhost
port: 8989
api_key: MYAPIKEY1123
discover:
what:
- emit_series: yes
from:
torrentz: any
download:
/download/tv
Note that when using the configure_series plugin with Sonarr
you are basically synced to it, so removing a show in Sonarr will
remove it in flexget as well,which good be positive or negative,
depending on your usage.
"""
parsedurl = urlparse(config.get('base_url'))
url = '%s://%s:%s%s/api/series' % (parsedurl.scheme, parsedurl.netloc, config.get('port'), parsedurl.path)
headers = {'X-Api-Key': config['a
|
pi_key']}
try:
json = task.requests.get(url, headers=headers).json()
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s://%s:%s%s. Error: %s'
% (parsedurl.scheme, parsedurl.netloc, config.get('port'),
parsedurl.path, e))
entries = []
# Dictionary based on Sonarr's quality list.
qualities = {0: ''
|
,
1: 'sdtv',
2: 'dvdrip',
3: '1080p webdl',
4: '720p hdtv',
5: '720p webdl',
6: '720p bluray',
7: '1080p bluray',
8: '480p webdl',
9: '1080p hdtv',
10: '1080p bluray'}
# Retrieves Sonarr's profile list if include_data is set to true
if config.get('include_data'):
url2 = '%s://%s:%s%s/api/profile' % (parsedurl.scheme, parsedurl.netloc, config.get('port'), parsedurl.path)
try:
profiles_json = task.requests.get(url2, headers=headers).json()
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s://%s:%s%s. Error: %s'
% (parsedurl.scheme, parsedurl.netloc, config.get('port'),
parsedurl.path, e))
for show in json:
fg_quality = '' # Initializes the quality parameter
entry = None
if show['monitored'] or not config.get('only_monitored'): # Checks if to retrieve just monitored shows
if config.get('include_ended') or show['status'] != 'ended': # Checks if to retrieve ended shows
if config.get('include_data'): # Check if to retrieve quality & path
for profile in profiles_json:
if profile['id'] == show['profileId']: # Get show's profile data from all possible profiles
current_profile = profile
fg_quality = qualities[current_profile['cutoff']['id']] # Sets profile cutoff quality as show's quality
entry = Entry(title=show['title'],
url='',
series_name=show['title'],
tvdb_id=show.get('tvdbId'),
tvrage_id=show.get('tvRageId'),
# configure_series plugin requires that all settings will have the configure_series prefix
configure_series_quality=fg_quality)
if entry.isvalid():
entries.append(entry)
else:
log.error('Invalid entry created? %s' % entry)
# Test mode logging
if entry and task.options.test:
log.info("Test mode. Entry includes:")
log.info(" Title: %s" % entry["title"])
log.info(" URL: %s" % entry["url"])
log.info(" Show name: %s" % entry["series_name"])
log.info(" TVDB ID: %s" % entry["tvdb_id"])
log.info(" TVRAGE ID: %s" % entry["tvrage_id"])
log.info(" Quality: %s" % entry["configure_series_quality"])
# continue
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Sonarr, 'sonarr', api_ver=2)
|
chillpop/RELAX-HARDER
|
effects/base.py
|
Python
|
mit
| 10,042 | 0.004979 |
from __future__ import print_function
import numpy
import time
import traceback
import colorsys
import random
class EffectLayer(object):
"""Abstract base class for one layer of an LED light effect. Layers operate on a shared framebuffer,
adding their own contribution to the buffer and possibly blending or overlaying with data from
prior layers.
The 'frame' passed to each render() function is an array of LEDs. Each LED is a 3-element list
with the red, green, and blue components each as floating point values with a normalized
brightness range of [0, 1]. If a component is beyond this range, it will be clamped during
conversion to the hardware color format.
"""
transitionFadeTime = 1.0
maximum_errors = 5
def render(self, params, frame):
raise NotImplementedError("Implement render() in your EffectLayer subclass")
def safely_render(self, params, frame):
if not hasattr(self, 'error_count'):
self.error_count = 0
try:
if self.error_count < EffectLayer.maximum_errors:
self.render(params, frame)
except Exception as err:
error_log = open('error.log','a')
error_log.write(time.asctime(time.gmtime()) + " UTC" + " : ")
traceback.print_exc(file=error_log)
print("ERROR:", err, "in", self)
self.error_count += 1
if self.error_count >= EffectLayer.maximum_errors:
print("Disabling", self, "for throwing too many errors")
class HeadsetResponsiveEffectLayer(EffectLayer):
"""A layer effect that responds to the MindWave headset in some way.
Two major differe
|
nces from EffectLayer:
1) Constructor expects four paramters:
-- re
|
spond_to: the name of a field in EEGInfo (threads.HeadsetThread.EEGInfo).
Currently this means either 'attention' or 'meditation'
-- smooth_response_over_n_secs: to avoid rapid fluctuations from headset
noise, averages the response metric over this many seconds
-- minimum_response_level: if the response level is below this, the layer isn't rendered
-- inverse: If this is true, the layer will respond to (1-response_level)
instead of response_level
2) Subclasses now only implement the render_responsive() function, which
is the same as EffectLayer's render() function but has one extra
parameter, response_level, which is the current EEG value of the indicated
field (assumed to be on a 0-1 scale, or None if no value has been read yet).
"""
def __init__(self, respond_to, smooth_response_over_n_secs=0, minimum_response_level=None, inverse=False):
# Name of the eeg field to influence this effect
if respond_to not in ('attention', 'meditation'):
raise Exception('respond_to was "%s" -- should be "attention" or "meditation"'
% respond_to)
self.respond_to = respond_to
self.smooth_response_over_n_secs = smooth_response_over_n_secs
self.measurements = []
self.timestamps = []
self.last_eeg = None
self.last_response_level = None
self.minimum_response_level = minimum_response_level
# We want to smoothly transition between values instead of jumping
# (as the headset typically gives one reading per second)
self.fading_to = None
self.inverse = inverse
def start_fade(self, new_level):
if not self.last_response_level:
self.last_response_level = new_level
else:
self.fading_to = new_level
def end_fade(self):
self.last_response_level = self.fading_to
self.fading_to = None
def calculate_response_level(self, params, use_eeg2=False):
now = time.time()
response_level = None
# Update our measurements, if we have a new one
eeg = params.eeg2 if use_eeg2 else params.eeg1
if eeg and eeg != self.last_eeg and eeg.on:
if self.fading_to:
self.end_fade()
# Prepend newest measurement and timestamp
self.measurements[:0] = [getattr(eeg, self.respond_to)]
self.timestamps[:0] = [now]
self.last_eeg = eeg
# Compute the parameter to send to our rendering function
N = len(self.measurements)
idx = 0
while idx < N:
dt = self.timestamps[0] - self.timestamps[idx]
if dt >= self.smooth_response_over_n_secs:
self.measurements = self.measurements[:(idx + 1)]
self.timestamps = self.timestamps[:(idx + 1)]
break
idx += 1
self.start_fade(sum(self.measurements) * 1.0 / len(self.measurements))
response_level = self.last_response_level
elif self.fading_to:
# We assume one reading per second, so a one-second fade
fade_progress = now - self.timestamps[0]
if fade_progress >= 1:
self.end_fade()
response_level = self.last_response_level
else:
response_level = (
fade_progress * self.fading_to +
(1 - fade_progress) * self.last_response_level)
if response_level and self.inverse:
response_level = 1 - response_level
return response_level
def render(self, params, frame):
response_level = self.calculate_response_level(params)
if self.minimum_response_level == None or response_level >= self.minimum_response_level:
self.render_responsive(params, frame, response_level)
def render_responsive(self, params, frame, response_level):
raise NotImplementedError(
"Implement render_responsive() in your HeadsetResponsiveEffectLayer subclass")
########################################################
# Simple EffectLayer implementations and examples
########################################################
class ColorLayer(EffectLayer):
"""Simplest layer, draws a static RGB color"""
def __init__(self, color):
self.color = color
def render(self, params, frame):
frame[:] += self.color
class RGBLayer(EffectLayer):
"""Simplest layer, draws a static RGB color cube."""
def render(self, params, frame):
length = len(frame)
step_size = 1.0 / length
hue = 0.0
for pixel in xrange(0, length):
frame[pixel] = colorsys.hsv_to_rgb(hue, 1, 1)
hue += step_size
class MultiplierLayer(EffectLayer):
""" Renders two layers in temporary frames, then adds the product of those frames
to the frame passed into its render method
"""
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
def render(self, params, frame):
temp1 = numpy.zeros(frame.shape)
temp2 = numpy.zeros(frame.shape)
self.layer1.render(params, temp1)
self.layer2.render(params, temp2)
numpy.multiply(temp1, temp2, temp1)
numpy.add(frame, temp1, frame)
class BlinkyLayer(EffectLayer):
"""Test our timing accuracy: Just blink everything on and off every other frame."""
on = False
def render(self, params, frame):
self.on = not self.on
frame[:] += self.on
class ColorBlinkyLayer(EffectLayer):
on = False
def render(self, params, frame):
self.on = not self.on
color = numpy.array(colorsys.hsv_to_rgb(random.random(),1,1))
if self.on:
frame[:] += color
class SnowstormLayer(EffectLayer):
transitionFadeTime = 1.0
def render(self, params, frame):
numpy.add(frame, numpy.random.rand(params.num_pixels, 1), frame)
class TechnicolorSnowstormLayer(EffectLayer):
transitionFadeTime = 1.5
def render(self, params, frame):
numpy.add(frame, numpy.random.rand(params.num_pixels, 3), frame)
class WhiteOutLayer(EffectLayer):
""" Sets everything to white """
transitionFadeTime = 0.5
def render(self, params
|
nashif/zephyr
|
boards/xtensa/intel_adsp_cavs15/tools/adsplog.py
|
Python
|
apache-2.0
| 8,018 | 0.000499 |
#!/usr/bin/python3
#
# Copyright (c) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import time
import subprocess
import mmap
# Log reader for the trace output buffer on a ADSP device.
#
# When run with no arguments, it will detect the device, dump the
# contents of the trace buffer and continue to poll for more output.
# The "--no-history" argument can be passed to suppress emission of the
# history, and emit only new output. This can be useful for test
# integration where the user does not want to see any previous runs in
# the log output.
#
# The trace buffer is inside a shared memory region exposed by the
# audio PCI device as a BAR at index 4. The hardware provides 4 128k
# "windows" starting at 512kb in the BAR which the DSP firmware can
# map to 4k-aligned locations within its own address space. By
# protocol convention log output is an 8k region at window index 3.
#
# The 8k window is treated as an array of 64-byte "slots", each of
# which is prefixed by a magic number, which should be 0x55aa for log
# data, followed a 16 bit "ID" number, followed by a null-terminated
# string in the final 60 bytes (or 60 non-null bytes of log data).
# The DSP firmware will write sequential IDs into the buffer starting
# from an ID of zero in the first slot, and wrapping at the end.
MAP_SIZE = 8192
SLOT_SIZE = 64
NUM_SLOTS = int(MAP_SIZE / SLOT_SIZE)
SLOT_MAGIC = 0x55aa
WIN_OFFSET = 0x80000
WIN_IDX = 3
WIN_SIZE = 0x20000
LOG_OFFSET = WIN_OFFSET + WIN_IDX * WIN_SIZE
# List of known ADSP devices by their PCI IDs
DEVICES = ["8086:5a98"]
mem = None
for dev in DEVICES:
# Find me a way to do this detection as cleanly in python as shell, I
# dare you.
barfile = subprocess.Popen(["sh", "-c",
"echo -n "
"$(dirname "
f" $(fgrep PCI_ID={dev.upper()} "
" /sys/bus/pci/devices/*/uevent))"
"/resource4"],
stdout=subprocess.PIPE).stdout.read()
if not os.path.exists(barfile):
continue
if not os.access(barfile, os.R_OK):
sys.stderr.write(f"ERROR: Cannot open {barfile} for reading.")
sys.exit(1)
fd = open(barfile)
mem = mmap.mmap(fd.fileno(), MAP_SIZE, offset=LOG_OFFSET,
prot=mmap.PROT_READ)
if mem is None:
sys.stderr.write("ERROR: No ADSP device found.")
sys.exit(1)
# Returns a tuple of (id, msg) if the slot is valid, or (-1, "") if
# the slot does not contain firmware trace data
def read_slot(slot, mem):
off = slot * SLOT_SIZE
magic = (mem[off + 1] << 8) | mem[off]
sid = (mem[off + 3] << 8) | mem[off + 2]
if magic != SLOT_MAGIC:
return (-1, "")
# This dance because indexing large variable-length slices of
# the mmap() array seems to produce garbage....
msgbytes = []
for i in range(4, SLOT_SIZE):
b = mem[off+i]
if b == 0:
break
msgbytes.append(b)
msg = bytearray(len(msgbytes))
for i, elem in enumerate(msgbytes):
msg[i] = elem
return (sid, msg.decode(encoding="utf-8", errors="ignore"))
def read_hist(start_slot):
id0, msg = read_slot(start_slot, mem)
# An invalid slot zero means no data has ever been placed in the
# trace buffer, which is likely a system reset condition. Back
# off for one second, because continuing to read the buffer has
# been observed to hang the flash process (which I think can only
# be a hardware bug).
if start_slot == 0 and id0 < 0:
sys.stdout.write("===\n=== [ADSP Device Reset]\n===\n")
sys.stdout.flush()
time.sleep(1)
return (0, 0, "")
# Start at zero and read forward to get the last data in the
# buffer. We are always guaranteed that slot zero will contain
# valid data if any slot contains valid d
|
ata.
last_id = id0
final_slot = start_slot
for i in range(start_slot + 1, NUM_SLOTS):
id, s = read_slot(i, mem)
if id != ((last_id + 1) & 0xffff):
break
msg += s
final_slot = i
last_id = id
final_id = last_id
# Now read backwards from the end to get the prefix blocks from
# the last wraparound
last_id = id0
for i in range(NUM_SLOTS - 1, final_slot, -1):
id, s = read_slot(i, mem)
|
if id < 0:
break
# Race protection: the other side might have clobbered the
# data after we read the ID, make sure it hasn't changed.
id_check = read_slot(i, mem)[0]
if id_check != id:
break
if ((id + 1) & 0xffff) == last_id:
msg = s + msg
last_id = id
# If we were unable to read forward from slot zero, but could read
# backward, then this is a wrapped buffer being currently updated
# into slot zero. See comment below.
if final_slot == start_slot and last_id != id0:
return None
return ((final_slot + 1) % NUM_SLOTS, (final_id + 1) & 0xffff, msg)
# Returns a tuple containing the next slot to expect data in, the ID
# that slot should hold, and the full string history of trace data
# from the buffer. Start with slot zero (which is always part of the
# current string if there is any data at all) and scan forward and
# back to find the maximum extent.
def trace_history():
# This loop is a race protection for the situation where the
# buffer has wrapped and new data is currently being placed into
# slot zero. In those circumstances, slot zero will have a valid
# magic number but its sequence ID will not correlate with the
# previous and next slots.
ret = None
while ret is None:
ret = read_hist(0)
if ret is None:
ret = read_hist(1)
return ret
# Loop, reading the next slot if it has new data. Otherwise check the
# full buffer and see if history is discontiguous (i.e. what is in the
# buffer should be a proper suffix of what we have stored). If it
# doesn't match, then just print it (it's a reboot or a ring buffer
# overrun). If nothing has changed, then sleep a bit and keep
# polling.
def main():
next_slot, next_id, last_hist = trace_history()
# We only have one command line argument, to suppress the history
# dump at the start (so CI runs don't see e.g. a previous device
# state containing logs from another test, and get confused)
if len(sys.argv) < 2 or sys.argv[1] != "--no-history":
sys.stdout.write(last_hist)
while True:
id, smsg = read_slot(next_slot, mem)
if id == next_id:
next_slot = int((next_slot + 1) % NUM_SLOTS)
next_id = (id + 1) & 0xffff
last_hist += smsg
sys.stdout.write(smsg)
else:
slot2, id2, msg2 = trace_history()
# Device reset:
if slot2 == 0 and id2 == 0 and msg2 == "":
next_id = 1
next_slot = slot2
last_hist = ""
if not last_hist.endswith(msg2):
# On a mismatch, go back and check one last time to
# address the race where a new slot wasn't present
# just JUST THEN but is NOW.
id3, s3 = read_slot(next_slot, mem)
if id3 == next_id:
next_slot = int((next_slot + 1) % NUM_SLOTS)
next_id = (next_id + 1) & 0xffff
last_hist += s3
sys.stdout.write(s3)
continue
# Otherwise it represents discontiguous data, either a
# reset of an overrun, just dump what we have and
# start over.
next_slot = slot2
last_hist = msg2
sys.stdout.write(msg2)
else:
sys.stdout.flush()
time.sleep(0.10)
if __name__ == "__main__":
main()
|
vinco/django-socialregistration
|
socialregistration/views.py
|
Python
|
mit
| 10,506 | 0.006663 |
from django.conf import settings
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import View, TemplateView
from socialregistration.clients.oauth import OAuthError
from socialregistration.mixins import SocialRegistration
GENERATE_USERNAME = getattr(settings, 'SOCIALREGISTRATION_GENERATE_USERNAME', False)
USERNAME_FUNCTION = getattr(settings, 'SOCIALREGISTRATION_GENERATE_USERNAME_FUNCTION',
'socialregistration.utils.generate_username')
FORM_CLASS = getattr(settings, 'SOCIALREGISTRATION_SETUP_FORM',
'socialregistration.forms.UserForm')
INITAL_DATA_FUNCTION = getattr(settings, 'SOCIALREGISTRATION_INITIAL_DATA_FUNCTION',
None)
class Setup(SocialRegistration, View):
"""
Setup view to create new Django users from third party APIs.
"""
template_name = 'socialregistration/setup.html'
def get_form(self):
"""
Return the form to be used. The return form is controlled
with ``SOCIALREGISTRATION_SETUP_FORM``.
"""
return self.import_attribute(FORM_CLASS)
def get_username_function(self):
"""
Return a function that can generate a username. The function
is controlled with ``SOCIALREGISTRATION_GENERATE_USERNAME_FUNCTION``.
"""
return self.import_attribute(USERNAME_FUNCTION)
def get_initial_data(self, request, user, profile, client):
"""
Return initial data for the setup form. The function can be
controlled with ``SOCIALREGISTRATION_INITIAL_DATA_FUNCTION``.
:param request: The current request object
:param user: The unsaved user object
:param profile: The unsaved profile object
:param client: The API client
"""
if INITAL_DATA_FUNCTION:
func = self.import_attribute(INITAL_DATA_FUNCTION)
return func(request, user, profile, client)
return {}
def generate_username_and_redirect(self, request, user, profile, client):
"""
Generate a username and then redirect the user to the correct place.
This method is called when ``SOCIALREGISTRATION_GENERATE_USERNAME``
is set.
:param request: The current request object
:param user: The unsaved user object
:param profile: The unsaved profile object
:param client: The API client
"""
func = self.get_username_function()
user.username = func(user, profile, client)
user.set_unusable_password()
user.save()
profile.user = user
profile.save()
user = profile.authenticate()
self.send_connect_signal(request, user, profile, client)
self.login(request, user)
self.send_login_signal(request, user, profile, client)
self.delete_session_data(request)
return HttpResponseRedirect(self.get_next(request))
def get(self, request):
"""
When signing a new user up - either display a setup form, or
generate the username automatically.
"""
try:
user, profile, client = self.get_session_data(request)
except KeyError:
return self.render_to_response(dict(
error=_("Social profile is missing from your session.")))
if GENERATE_USERNAME:
return self.generate_username_and_redirect(request, user, profile, client)
form = self.get_form()(initial=self.get_initial_data(request, user, profile, client))
return self.render_to_response(dict(form=form))
def post(self, request):
"""
Save the user and profile, login and send the right signals.
"""
try:
user, profile, client = self.get_session_data(request)
except KeyError:
return self.render_to_response(dict(
error=_("A social profile is missing from your session.")))
form = self.get_form()(request.POST, request.FILES,
initial=self.get_initial_data(request, user, profile, client))
if not form.is_valid():
return self.render_to_response(dict(form=form))
user, profile = form.save(request, user, profile, client)
user = profile.authenticate()
self.send_connect_signal(request, user, profile, client)
self.login(request, user)
self.send_login_signal(request, user, profile, client)
self.delete_session_data(request)
return HttpResponseRedirect(self.get_next(request))
class Logout(View):
"""
Log the user out of Django. This **does not** log the user out
of third party sites.
"""
def get(self, request):
logout(request)
url = getattr(settings, 'LOGOUT_REDIRECT_URL', '/')
return HttpResponseRedirect(url)
class OAuthRedirect(SocialRegistration, View):
"""
Base class for both OAuth and OAuth2 redirects.
:param client: The API client class that should be used.
:param template_name: The error template.
"""
# The OAuth{1,2} client to be used
client = None
# The template to render in case of errors
template_name = None
def post(self, request):
"""
Create a client, store it in the user's session and redirect the user
to the API provider to authorize our app and permissions.
"""
request.session['next'] = self.get_next(request)
client = self.get_client()()
request.session[self.get_client().get_session_key()] = client
try:
return HttpResponseRedirect(client.get_redirect_url())
except OAuthError, error:
return self.render_to_response({'error': error})
class OAuthCallback(SocialRegistration, View):
"""
Base class for OAuth and OAuth2
|
callback views.
:param client: The API client class that should be used.
:param template_name: The error template.
"""
# The OAuth{1,2} client to be used
client = None
# The template to render in case of errors
template_name = None
def get_redirect(self):
"""
Return a URL that will set up the correct models if the
OAuth flow succeeded. Subclasses **must** override this
method.
"""
|
raise NotImplementedError
def get(self, request):
"""
Called after the user is redirected back to our application.
Tries to:
- Complete the OAuth / OAuth2 flow
- Redirect the user to another view that deals with login, connecting
or user creation.
"""
try:
client = request.session[self.get_client().get_session_key()]
client.complete(dict(request.GET.items()))
request.session[self.get_client().get_session_key()] = client
return HttpResponseRedirect(self.get_redirect())
except KeyError:
return self.render_to_response({'error': "Session expired."})
except OAuthError, error:
return self.render_to_response({'error': error})
class SetupCallback(SocialRegistration, TemplateView):
"""
Base class for OAuth and OAuth2 login / connects / registration.
"""
template_name = 'socialregistration/setup.error.html'
def get(self, request):
"""
Called after authorization was granted and the OAuth flow
successfully completed.
Tries to:
- Connect the remote account if the user is logged in already
- Log the user in if a local profile of the remote account
exists already
- Create a user and profile object if none of the above succeed
and redirect the user further to either capture some data via
form or generate a username automatically
"""
try:
client = requ
|
tiradoe/Giflocker
|
giflocker/locker/admin.py
|
Python
|
lgpl-3.0
| 143 | 0.013986 |
from django.contr
|
ib import admin
from locker.models import Gif
class GifAdmin(admin.ModelAdmin):
pass
admin.site.register(Gif, GifAdmi
|
n)
|
indictranstech/erpnext
|
erpnext/education/doctype/assessment_result/test_assessment_result.py
|
Python
|
agpl-3.0
| 521 | 0.017274 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.education.api import get_grade
# test_records = frappe.get_test_records('Assessment Result')
class TestAssessmentResult(unittest.TestCase):
def test_grade(self):
grade = get_grade("_Test Grading Scale", 80)
self.assertEquals("A", grade)
|
grade = get_grade("_Test Grading Scale", 70)
self.assertEquals("B", gra
|
de)
|
mikedelong/aarhus
|
demos/clusters_from_topics.py
|
Python
|
apache-2.0
| 9,957 | 0.002913 |
# http://stats.stackexchange.com/questions/28904/how-to-cluster-lda-lsi-topics-generated-by-gensim
# coding:utf-8
import cPickle as pickle
import glob
import logging
import os
import scipy
import scipy.sparse
import string
import sys
import time
from collections import defaultdict
import gensim.matutils
import gensim.utils
import numpy
from gensim import corpora, models, similarities
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english")
data_dir = os.path.join(os.getcwd(), 'data/')
output_dir = os.path.join(os.getcwd(), 'output/')
work_dir = os.path.join(os.getcwd(), 'model', os.path.basename(__file__).rstrip('.py'))
if not os.path.exists(work_dir):
os.mkdir(work_dir)
os.chdir(work_dir)
logger = logging.getLogger('text_similar')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# convert to unicode
def to_unicode(arg_text):
result = arg_text.lower()
if not isinstance(result, unicode):
result = result.decode('utf-8', 'ignore')
result = ' '.join(
["".join([character for character in unicode(word) if character not in string.punctuation]) for word in
result.split(' ') if not any([word.startswith('http:'), word.startswith('https:'),
word.startswith('mailto:'), word.endswith('.com'),
word.endswith('.org')])])
return result
def to_unicode_unrolled(arg_text):
t = arg_text.lower()
result = []
if not isinstance(t, unicode):
t = t.decode('utf-8', 'ignore')
for word in t.split(' '):
b0 = word.startswith(u'http:')
b6 = word.startswith(u'<http:')
b1 = word.startswith(u'https:')
b2 = word.startswith(u'mailto:')
b3 = word.endswith(u'.com')
b4 = word.endswith(u'.org')
b5 = any([b0, b1, b2, b3, b4, b6])
if not b5:
word = ' '.join(
["".join([character for character in unicode(word) if character not in string.punctuation])])
result.append(word)
return " ".join(result)
def remove_stopwords_and_stem(arg_text):
result = [stemmer.stem(item) for item in arg_text if item not in stopwords.words('english')]
return result
class TextSimilar(gensim.utils.SaveLoad):
def __init__(self):
self.conf = {}
self.dictionary = None
self.docs = None
self.fname = None
self.lda = None
self.lda_similarity_index = None
self.lda_tfidf = None
self.lda_tfidf_similarity_index = None
self.logent = None
self.logent_similarity_index = None
self.lsi = None
self.lsi_similarity_index = None
self.method = None
self.para = None
self.similar_index = None
self.tfidf = None
def _preprocess(self):
# todo write a more pythonic version of this function and use it
docs = [to_unicode_unrolled(open(f, 'r').read().strip()).split() for f in glob.glob(self.fname)]
logger.debug('ingested files into big array with length %d' % len(docs))
docs = [remove_stopwords_and_stem(item) for item in docs]
logger.debug('removed stopwords and stemmed')
pickle.dump(docs, open(self.conf['fname_docs'], 'wb'))
logger.debug('pickle dump to %s done' % self.conf['fname_docs'])
dictionary = corpora.Dictionary(docs)
dictionary.save(self.conf['fname_dict'])
logger.debug('dictionary save to %s done' % self.conf['fname_dict'])
corpus = [dictionary.doc2bow(doc) for doc in docs]
corpora.MmCorpus.serialize(self.conf['fname_corpus'], corpus)
logger.debug('corpus serialize to %s done' % self.conf['fname_corpus'])
return docs, dictionary, corpus
def _generate_conf(self):
fname = self.fname[self.fname.rfind('/') + 1:]
self.conf['fname_docs'] = '%s.docs' % fname
self.conf['fname_dict'] = '%s.dict' % fname
self.conf['fname_corpus'] = '%s.mm' % fname
def train(self, arg_fname, is_pre=True, method='lsi', **params):
self.fname = arg_fname
self.method = method
self._generate_conf()
if is_pre:
self.docs, self.dictionary, corpus = self._preprocess()
else:
self.docs = pickle.load(open(self.conf['fname_docs']))
self.dictionary = corpora.Dictionary.load(self.conf['fname_dict'])
corpus = cor
|
pora.MmCorpus(self.conf['fname_corpus'])
|
if params is None:
params = {}
logger.info("training TF-IDF model")
self.tfidf = models.TfidfModel(corpus, id2word=self.dictionary)
corpus_tfidf = self.tfidf[corpus]
if method == 'lsi':
logger.info("training LSI model")
self.lsi = models.LsiModel(corpus_tfidf, id2word=self.dictionary, **params)
self.lsi.print_topics(-1)
self.lsi_similarity_index = similarities.MatrixSimilarity(self.lsi[corpus_tfidf])
self.para = self.lsi[corpus_tfidf]
elif method == 'lda_tfidf':
logger.info("training LDA model")
# try 6 workers here instead of original 8
self.lda_tfidf = models.LdaMulticore(corpus_tfidf, id2word=self.dictionary, workers=6, **params)
self.lda_tfidf.print_topics(-1)
self.lda_tfidf_similarity_index = similarities.MatrixSimilarity(self.lda[corpus_tfidf])
self.para = self.lda[corpus_tfidf]
elif method == 'lda':
logger.info("training LDA model")
# try 6 workers here instead of original 8
self.lda = models.LdaMulticore(corpus, id2word=self.dictionary, workers=6, **params)
self.lda.print_topics(-1)
self.lda_similarity_index = similarities.MatrixSimilarity(self.lda[corpus])
self.para = self.lda[corpus]
elif method == 'logentropy':
logger.info("training a log-entropy model")
self.logent = models.LogEntropyModel(corpus, id2word=self.dictionary)
self.logent_similarity_index = similarities.MatrixSimilarity(self.logent[corpus])
self.para = self.logent[corpus]
else:
msg = "unknown semantic method %s" % method
logger.error(msg)
raise NotImplementedError(msg)
def doc2vec(self, doc):
bow = self.dictionary.doc2bow(to_unicode(doc).split())
if self.method == 'lsi':
return self.lsi[self.tfidf[bow]]
elif self.method == 'lda':
return self.lda[bow]
elif self.method == 'lda_tfidf':
return self.lda[self.tfidf[bow]]
elif self.method == 'logentropy':
return self.logent[bow]
def find_similar(self, doc, n=10):
vec = self.doc2vec(doc)
sims = self.similar_index[vec]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
for elem in sims[:n]:
idx, value = elem
print (' '.join(self.docs[idx]), value)
def get_vectors(self):
return self._get_vector(self.para)
@staticmethod
def _get_vector(corpus):
def get_max_id():
maxid = -1
for document in corpus:
maxid = max(maxid, max(
[-1] + [fieldid for fieldid, _ in document])) # [-1] to avoid exceptions from max(empty)
return maxid
num_features = 1 + get_max_id()
index = numpy.empty(shape=(len(corpus), num_features), dtype=numpy.float32)
for docno, vector in enumerate(corpus):
if docno % 1000 == 0:
logger.info("PROGRESS: at document #%i/%i" % (docno, len(corpus)))
if isinstance(vector, numpy.ndarray):
pass
elif scipy.sparse.issparse(vector):
vector = vector.toarray().flatten()
else:
vector = gensim.matutils.unitvec(gensim.matutils.sparse2full(vector, num_features))
index[docno] = vector
return index
def cluster(vectors, ts, k=30, arg_method=None):
from sklearn.cluster import
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/pygments/styles/abap.py
|
Python
|
apache-2.0
| 727 | 0 |
"""
pygments.styles.abap
~~~~~~~~~~~~~~~~~~~~
ABAP workbench like style.
|
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator
class AbapStyle(Style):
default_style = ""
styles = {
Comment: 'italic #888',
Comment.Special: '#888',
Keyword:
|
'#00f',
Operator.Word: '#00f',
Name: '#000',
Number: '#3af',
String: '#5a2',
Error: '#F00',
}
|
ibegleris/Single-mode-FOPO
|
src/data_plotters_animators.py
|
Python
|
bsd-3-clause
| 10,230 | 0.006549 |
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.constants import c
import h5py
import sys
import warnings
warnings.simplefilter("ignore", UserWarning)
font = {'size': 18}
mpl.rc('font', **font)
def w2dbm(W, floor=-100):
"""This function converts a power given in W to a power given in dBm.
Inputs::
W(float): power in units of W
Returns::
Power in units of dBm(float)
"""
if type(W) != np.ndarray:
if W > 0:
return 10. * np.log10(W) + 30
elif W == 0:
return floor
else:
print(W)
raise(ZeroDivisionError)
a = 10. * (np.ma.log10(W)).filled(floor/10-3) + 30
return a
class Plotter_saver(object):
def __init__(self, plots, filesaves, fv, t):
if plots and filesaves:
self.exporter = self.plotter_saver_both
elif plots and not(filesaves):
self.exporter = self.plotter_only
elif not(plots) and filesaves:
self.exporter = self.saver_only
else:
sys.exit("You are not exporting anything,\
wasted calculation")
#t = t[np.newaxis,3,:]
self.fv, self.t,self.lv = [self.reshape_x_axis(x) for x in (fv,t, 1e-3*c/fv)]
return None
def reshape_x_axis(self, x):
return np.reshape(x, int(x.shape[0]*x.shape[1]))
def initiate_reshape(self, u, U,nm):
u, U = (np.reshape(i, [nm,int(u.shape[0]*u.shape[1])]) for i in (u, U))
return u, U
def plotter_saver_both(self, index, int_fwm, sim_wind, u, U, P0_p, P0_s,
f_p, f_s, ro, mode_names, pump_wave='',
filename=None, title=None, im=0, plots=True):
u,U = self.initiate_reshape(u,U,int_fwm.nm)
self.plotter(index, int_fwm, sim_wind, u, U, P0_p, P0_s,
f_p, f_s, ro, mode_names, pump_wave,
filename, title, im, plots)
self.saver(index, int_fwm, sim_wind, u, U, P0_p, P0_s, f_p, f_s,
ro, mode_names, pump_wave, filename, title,
im, plots)
return None
def plotter_only(self, index, int_fwm, sim_wind, u, U, P0_p, P0_s,
f_p, f_s, ro, mode_names, pump_wave='',
filename=None, title=None, im=0, plots=True):
u,U = self.initiate_reshape(u,U,int_fwm.nm)
self.plotter(index, int_fwm, sim_wind, u, U, P0_p, P0_s,
f_p, f_s, ro, mode_names, pump_wave,
filename, title, im, plots)
return None
def saver_only(self, index, int_fwm, sim_wind, u, U, P0_p, P0_s,
f_p, f_s, ro, mode_names, pump_wave='',
filename=None, title=None, im=0, plots=True):
u,U = self.initiate_reshape(u,U,int_fwm.nm)
self.saver(index, int_fwm, sim_wind, u, U, P0_p, P0_s, f_p, f_s,
ro, mode_names, pump_wave, filename, title,
im, plots)
return None
def plotter(self, index, int_fwm, sim_wind, u, U, P0_p, P0_s,
f_p, f_s, ro, mode_names, pump_wave='',
filename=None, title=None, im=0, plots=True):
"""Plots many modes"""
x, y = 1e-3*c/self.fv, w2dbm(np.abs(U)**2)
xlim, ylim = [800, 1400], [-80, 100]
xlabel, ylabel = r'$\lambda (nm)$', r'$Spectrum (a.u.)$'
filesave = 'output'+pump_wave+'/output' + \
str(index) + '/figures/wavelength/'+filename
plot_multiple_modes(int_fwm.nm, x, y, mode_names,
ylim, xlim, xlabel, ylabel, title, filesave, im)
# Frequency
x, y = self.fv, w2dbm(sim_wind.dt[0]**2*np.abs(U)**2)# - np.max(w2dbm(sim_wind.dt[0]**2*np.abs(U)**2))
xlim, ylim = [np.min(x), np.max(x)], [np.min(y) + 0.1*np.min(y), 1]
xlim, ylim = [np.min(x), np.max(x)], [-50,100]
xlabel, ylabel = r'$f (THz)$', r'$Spectrum (a.u.)$'
filesave = 'output'+pump_wave+'/output' + \
str(index) + '/figures/frequency/'+filename
plot_multiple_modes(int_fwm.nm, x, y, mode_names,
ylim, xlim, xlabel, ylabel, title, filesave, im)
# Time
x, y = self.t, np.abs(u)**2
xlim, ylim = [np.min(x), np.max(x)], [6.8, 7.8]
xlabel, ylabel = r'$time (ps)$', r
|
'$Spectrum (W)$'
filesave = 'output'+pump_wave+'/output' + \
str(index) + '/figures/time/'+filename
plot_multiple_modes(int_fwm.nm, x, y, mode_names,
ylim, xlim, xlabel, ylabel, title, filesave, im)
return None
def saver(self, index, int_fwm, sim_wind, u, U, P0_p, P0_s, f_p, f_s
, ro, mode_names, pump_wave='', filename=None, title
|
=None,
im=0, plots=True):
"""Dump to HDF5 for postproc"""
if filename[:4] != 'port':
layer = filename[-1]+'/'+filename[:-1]
else:
layer = filename
if layer[0] is '0':
extra_data = np.array([int_fwm.z, int_fwm.nm,P0_p, P0_s, f_p, f_s, ro])
save_variables('data_large', layer, filepath='output'+pump_wave+'/output'+str(index)+'/data/', U=U, t=self.t,
fv=self.fv, extra_data = extra_data)
else:
save_variables('data_large', layer, filepath='output'+pump_wave+'/output'+str(index)+'/data/', U=U)
return None
def plot_multiple_modes(nm, x, y, mode_names, ylim, xlim, xlabel, ylabel, title, filesave=None, im=None):
"""
Dynamically plots what is asked of it for multiple modes given at set point.
"""
fig = plt.figure(figsize=(20.0, 10.0))
plt.subplots_adjust(hspace=0.1)
for i, v in enumerate(range(nm)):
v = v+1
ax1 = plt.subplot(nm, 1, v)
plt.plot(x, y[i, :], '-', label=mode_names[i])
ax1.legend(loc=2)
ax1.set_ylim(ylim)
ax1.set_xlim(xlim)
if i != nm - 1:
ax1.get_xaxis().set_visible(False)
ax = fig.add_subplot(111, frameon=False)
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
ax.set_title(title)
plt.grid(True)
ax.yaxis.set_label_coords(-0.05, 0.5)
ax.xaxis.set_label_coords(0.5, -0.05)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if type(im) != int:
newax = fig.add_axes([0.8, 0.8, 0.2, 0.2], anchor='NE')
newax.imshow(im)
newax.axis('off')
if filesave == None:
plt.show()
else:
plt.savefig(filesave, bbox_inched='tight')
plt.close(fig)
return None
def animator_pdf_maker(rounds, pump_index):
"""
Creates the animation and pdf of the FOPO at different parts of the FOPO
using convert from imagemagic. Also removes the pngs so be carefull
"""
print("making pdf's and animations.")
space = ('wavelength', 'freequency', 'time')
for sp in space:
file_loc = 'output/output'+str(pump_index)+'/figures/'+sp+'/'
strings_large = ['convert '+file_loc+'00.png ']
for i in range(4):
strings_large.append('convert ')
for ro in range(rounds):
for i in range(4):
strings_large[i+1] += file_loc+str(ro)+str(i+1)+'.png '
for w in range(1, 4):
if i == 5:
break
strings_large[0] += file_loc+str(ro)+str(w)+'.png '
for i in range(4):
os.system(strings_large[i]+file_loc+str(i)+'.pdf')
file_loca = file_loc+'portA/'
file_locb = file_loc+'portB/'
string_porta = 'convert '
string_portb = 'convert '
for i in range(rounds):
string_porta += file_loca + str(i) + '.png '
string_portb += file_locb + str(i) + '.png '
string_porta += file_loca+'porta.pdf '
string_portb += file_locb+'portb.pdf '
os.system(string_porta)
os.system(string_portb)
for i in range(4):
os.system(
'convert -delay 30 '+file_loc+str(i)+'.pdf '+file_loc+str(i)+'
|
sanxiatianma/spider
|
src/spider/settings.py
|
Python
|
apache-2.0
| 483 | 0.004141 |
# -*- coding: utf-8 -*-
# Scrapy settings for spider project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'spider'
SPIDER_MODULES = ['spid
|
er.s
|
piders']
NEWSPIDER_MODULE = 'spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'spider (+http://www.yourdomain.com)'
|
google/cloudprint_logocert
|
_log.py
|
Python
|
apache-2.0
| 3,118 | 0.003849 |
"""Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Function to provide a logger that writes to stdout, memory, and files.
Logger objects are used to control how and when messages are logged. This
function will perform some general housekeeping and organization. It will also
existing loggers don't get extra handlers added to them if this code is called
multiple times.
"""
import logging
import os
from StringIO import StringIO
import sys
import time
def GetLogger(name, logdir=None, loglevel='info', stdout=False):
"""Return a new logger, or reference to an existing logge
|
r.
Args:
name: string, name of logger.
logdir: string, path to a directly to place log files.
loglevel: string, debug level of logger.
stdout: boolean, True = send messages to stdout and logfile.
False = only send messages to log file.
Returns:
initialized logger.
Since Python loggers are a singleton, logging.getLogger() will always return
a reference to the current logger with identical names. This funct
|
ion uses
3 handlers, so if handlers == 0 the logger requires proper configuration
of handlers and log files.
"""
logger = logging.getLogger(name)
if not logger.handlers:
datetime_str = time.strftime('%Y%B%d_%H%M%S', time.localtime())
log_filename = '%s%s%s' % (name, datetime_str, '.log')
if not logdir:
logdir = '/tmp/logfiles'
if not os.path.isdir(logdir):
try:
os.makedirs(logdir)
except IOError:
print 'Error creating log directory!'
sys.exit(1)
logfile = os.path.join(logdir, log_filename)
strlog = StringIO()
c = logging.StreamHandler()
s = logging.StreamHandler(strlog)
h = logging.FileHandler(logfile)
hf = logging.Formatter('%(asctime)s, %(name)s %(levelname)s: %(message)s')
cf = logging.Formatter('%(name)s %(levelname)s: %(message)s')
sf = logging.Formatter('%(name)s %(levelname)s: %(message)s')
logger.addHandler(h)
logger.addHandler(s)
h.setFormatter(hf)
s.setFormatter(sf)
if stdout:
logger.addHandler(c)
c.setFormatter(cf)
levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
logger.setLevel(levels.get(loglevel, logging.INFO))
logger.debug(
'Invocation started. Logger %s\nLogger Name: %s\nLog Mode: %s',
logfile, name, loglevel)
else:
logger.debug('Logger %s is already initialized', name)
return logger
|
hrayr-artunyan/shuup
|
shuup_tests/xtheme/test_editor_view.py
|
Python
|
agpl-3.0
| 6,457 | 0.002168 |
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import pytest
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from shuup.apps.provides import override_provides
from shuup.utils.excs import Problem
from shuup.xtheme import XTHEME_GLOBAL_VIEW_NAME
from shuup.xtheme.layout import Layout
from shuup.xtheme.models import SavedViewConfig, SavedViewConfigStatus
from shuup.xtheme.plugins.consts import FALLBACK_LANGUAGE_CODE
from shuup.xtheme.testing import override_current_theme_class
from shuup.xtheme.views.editor import EditorView, ROW_CELL_LIMIT
from shuup_tests.utils import printable_gibberish
from shuup_tests.utils.faux_users import SuperUser
from shuup_tests.utils.forms import get_form_data
from shuup_tests.xtheme.utils import FauxTheme, plugin_override
@contextmanager
def initialize_editor_view(view_name, placeholder_name, request=None):
if request is None:
request = RequestFactory().get("/")
request.user = SuperUser()
if hasattr(request.GET, "_mutable"):
request.GET._mutable = True # Ahem
request.GET.update({
"theme": FauxTheme.identifier,
"view": view_name,
"ph": placeholder_name
})
with plugin_override():
with override_provides("xtheme", ["shuup_tests.xtheme.utils:FauxTheme"]):
with override_current_theme_class(FauxTheme):
yield EditorView(request=request, args=(), kwargs={})
def get_test_layout_and_svc():
svc = SavedViewConfig(
theme_identifier=FauxTheme.identifier,
view_name=printable_gibberish(),
status=SavedViewConfigStatus.CURRENT_DRAFT
)
layout = Layout(FauxTheme, "ph")
layout.add_plugin("text", {"text": "hello"})
svc.set_layout_data(layout.placeholder_name, layout)
svc.save()
return layout, svc
def test_anon_cant_edit(rf):
request = rf.get("/")
request.user = AnonymousUser()
with pytest.raises(Problem):
EditorView.as_view()(request)
def test_unknown_theme_fails(rf):
request = rf.get("/", {"theme": printable_gibberish()})
request.user = SuperUser()
with pytest.raises(Problem):
EditorView.as_view()(request)
@pytest.mark.django_db
def test_editor_view_functions():
layout, svc = get_test_layout_and_svc()
with initialize_editor_view(svc.view_name, layout.placeholder_name) as view_obj:
assert isinstance(view_obj, EditorView)
view_obj.request.GET.update({"x": 0, "y": 0})
view_obj.dispatch(view_obj.request)
assert view_obj.current_cell
assert view_obj.current_cell.serialize() == layout.get_cell(0, 0).serialize()
# Go through the motions of adding and removing stuff programmatically
view_obj.dispatch_change_plugin(plugin="text") # Well it was text to begin with, but...
assert len(view_obj.layout.rows[0]) == 1
view_obj.dispatch_add_cell(y=-1)
assert len(view_obj.layout.rows[0]) == 1
view_obj.dispatch_add_cell(y=0)
assert len(view_obj.layout.rows[0]) == 2
view_obj.dispatch_add_row()
assert len(view_obj.layout) == 2
assert len(view_obj.layout.rows[1]) == 1
view_obj.dispatch_add_cell(y=1)
assert len(view_obj.layout.rows[1]) == 2
view_obj.dispatch_del_cell(x=1, y=1)
assert len(view_obj.layout.rows[1]) == 1
view_obj.dispatch_del_row(y=1)
assert len(view_obj.layout) == 1
@pytest.mark.django_db
def test_editor_save(rf):
layout, svc = get_test_layout_and_svc()
with initialize_editor_view(svc.view_name, layout.placeholder_name) as view_obj:
view_obj.request.GET.update({"x": 0, "y": 0})
view_obj.dispatch(view_obj.request)
assert view_obj.current_cell
assert view_obj.form
assert "general" in view_obj.form.forms
assert "plugin" in view_obj.form.forms
form_data = get_form_data(view_obj.form, prepared=True)
new_text = printable_gibberish()
form_data["plugin-text_%s" % FALLBACK_LANGUAGE_CODE] = new_text
form_data["save"] = "1"
request = rf.post("/pepe/", data=form_data) # sort of rare pepe
request.GET = dict(request.GET, x=0, y=0)
with initialize_editor_view(svc.view_name, layout.placeholder_name, request) as view_obj:
view_obj.dispatch(request)
assert view_obj.form
assert not view_obj.form.errors
assert view_obj.current_cell.config["text"] == {FALLBACK_LANGUAGE_CODE: new_text}
@pytest.mark.django_db
def test_editor_view_commands():
with initialize_editor_view(printable_gibberish(), printable_gibberish()) as view_obj:
view_obj.request.method = "POST"
view_obj.request.POST = {"command": "add_row"}
view_obj._populate_vars() # don't tell anyone we're calling a private method here
assert len(view_obj.layout) == 0
view_obj.dispatch(view_obj.request)
assert len(view_obj.layout) == 1
@pytest.mark.django_db
def test_editor_view_unknown_command():
with initialize_editor_view(printable_gibberish(), printable_gibberish()) as view_obj:
view_obj.request.method = "POST"
view_obj.request.POST = {"command": printable_gibberish()}
with pytest.raises(Problem):
view_obj.dispatch(view_obj.request)
@pytest.mark.django_db
def test_editor_cell_limits():
layout, svc = get_test_layout_and_svc()
with initialize_editor_view(svc.view_name, layout.placeholder_name) as view_obj:
view_obj.request.GET.update({"x": 0, "y": 0})
view_obj.dispatch(view_obj.request)
for i in range(1, RO
|
W_CELL_LIMIT):
view_obj.dispatch_add_cell(y=0)
assert len(view_obj.layout.rows[0]) == ROW_CELL_LIMIT
with pytest.raises(ValueError):
view_obj.dispatc
|
h_add_cell(y=0)
@pytest.mark.django_db
def test_get_global_placeholder():
request = RequestFactory().get("/")
layout, svc = get_test_layout_and_svc()
with initialize_editor_view(svc.view_name, layout.placeholder_name, request=request) as view_obj:
view_name_1 = view_obj.dispatch(view_obj.request).context_data["view"].view_config.view_name
view_obj.request.GET.update({"x": 0, "y": 0, "global_type": True})
view_name_2 = view_obj.dispatch(view_obj.request).context_data["view"].view_config.view_name
assert view_name_1 != view_name_2
assert view_name_2 == XTHEME_GLOBAL_VIEW_NAME
|
yasoob/PythonRSSReader
|
venv/lib/python2.7/dist-packages/oneconf/paths.py
|
Python
|
mit
| 3,861 | 0.000518 |
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Canonical
#
# Authors:
# Didier Roche <didrocks@ubuntu.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUTa
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
try:
from configparser import NoSectionError, NoOptionError, RawConfigParser
except ImportError:
# Python 2
from ConfigParser import NoSectionError, NoOptionError, RawConfigParser
from xdg import BaseDirectory as xdg
ONECONF_OVERRIDE_FILE = "/tmp/oneconf.override"
ONECONF_DATADIR = '/usr/share/oneconf/data'
ONECONF_CACHE_DIR = os.path.join(xdg.xdg_cache_home, "oneconf")
PACKAGE_LIST_PREFIX = "package_list"
OTHER_HOST_FILENAME = "other_hosts"
PENDING_UPLOAD_FILENAME = "pending_up
|
load"
HOST_DATA_FILENAME = "host"
LOGO_PREFIX = "logo"
LAST_SYNC_DAT
|
E_FILENAME = "last_sync"
_datadir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
# In both Python 2 and 3, _datadir will be a relative path, however, in Python
# 3 it will start with "./" while in Python 2 it will start with just the file
# name. Normalize this, since the path string is used in the logo_checksum
# calculation.
if not os.path.isabs(_datadir) and not _datadir.startswith('./'):
_datadir = os.path.join(os.curdir, _datadir)
if not os.path.exists(_datadir):
# take the paths file if loaded from networksync module
#
# 2014-03-17 barry: It's probably not a good idea to use __file__, since
# the behavior of that has changed between Python 3.3 and 3.4. Prior to
# 3.4, __file__ was a relative path, but in 3.4 it became absolute (which
# it always should have been). Because the file's *path* is the input to
# the logo checksum (as opposed to the file's contents, because...?) this
# value actually matters.
#
# However, making the FAKE_WALLPAPER path below absolute breaks the
# package's build because inside a chroot, the absolute path of __file__
# is unpredictable. LP: #1269898.
#
# The solution then is to make the FAKE_WALLPAPER path relative to the
# current working directory, via os.path.relpath(). So first, we ensure
# it's absolute (for older Pythons) and then relpath it. *That's* the
# path that will be the input to the SHA224 checksum.
parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
_datadir = os.path.join(parent, "data")
if not os.path.exists(_datadir):
_datadir = ONECONF_DATADIR
LOGO_BASE_FILENAME = os.path.join(_datadir, 'images', 'computer.png')
WEBCATALOG_SILO_DIR = "/tmp"
FAKE_WALLPAPER = None # Fake wallpaper for tests
FAKE_WALLPAPER_MTIME = None # Fake wallpaper for tests
config = RawConfigParser()
try:
config.read(ONECONF_OVERRIDE_FILE)
ONECONF_CACHE_DIR = config.get('TestSuite', 'ONECONF_CACHE_DIR')
WEBCATALOG_SILO_DIR = config.get('TestSuite', 'WEBCATALOG_SILO_DIR')
FAKE_WALLPAPER = os.path.relpath(os.path.abspath(os.path.join(
os.path.dirname(_datadir), config.get('TestSuite', 'FAKE_WALLPAPER'))))
try:
FAKE_WALLPAPER_MTIME = config.get('TestSuite', 'FAKE_WALLPAPER_MTIME')
except NoOptionError:
FAKE_WALLPAPER_MTIME = None
except NoSectionError:
pass
WEBCATALOG_SILO_SOURCE = os.path.join(WEBCATALOG_SILO_DIR, "source")
WEBCATALOG_SILO_RESULT = os.path.join(WEBCATALOG_SILO_DIR, "result")
|
Uli1/mapnik
|
scons/scons-local-2.4.0/SCons/Tool/tar.py
|
Python
|
lgpl-2.1
| 2,503 | 0.006392 |
"""SCons.Tool.tar
Tool-specific initialization for tar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation th
|
e rights to use, copy, modify, me
|
rge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tar.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
tars = ['tar', 'gtar']
TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR')
TarBuilder = SCons.Builder.Builder(action = TarAction,
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for tar to an Environment."""
try:
bld = env['BUILDERS']['Tar']
except KeyError:
bld = TarBuilder
env['BUILDERS']['Tar'] = bld
env['TAR'] = env.Detect(tars) or 'gtar'
env['TARFLAGS'] = SCons.Util.CLVar('-c')
env['TARCOM'] = '$TAR $TARFLAGS -f $TARGET $SOURCES'
env['TARSUFFIX'] = '.tar'
def exists(env):
return env.Detect(tars)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Travelport-Czech/apila
|
tasks/DynamoTable.py
|
Python
|
mit
| 6,621 | 0.014197 |
from Task import Task
import os.path
import yaml
import botocore
import name_constructor
import time
import sys
class DynamoTable(Task):
"""Create or remove a table by yaml definition file"""
known_params = {
'name': 'name of the table to be created or removed',
'source': 'full name of the file with the definition (see demo/sample_reservation.yml)',
'state': 'table can be in two states: present (it is the default state) or absent'
}
required_params = ( 'name', 'source' )
required_configs = ('user', 'branch')
task_name = 'dynamo-table'
def __str__(self):
if self.name:
return self.name
else:
return "Create a table '%s' from '%s'" % (self.params['name'], os.path.abspath(self.params['source']))
def run(self, clients, cache):
client = clients.get('dynamodb')
table_name = name_constructor.table_name(self.params['name'], self.config)
if 'state' in self.params and self.params['state'] == 'absent':
return self.make_table_absent(client, table_name)
else:
return self.make_table_present(client, table_name)
def make_table_absent(self, client, table_name):
try:
table_def = client.describe_table(TableName=table_name)['Table']
except botocore.exceptions.ClientError as e:
return (True, '')
self.wait_for_table(client, table_name)
client.delete_table(TableName=table_name)
return (True, self.CHANGED)
def make_table_present(self, client, table_name):
try:
new_def = yaml.load(open(self.params['source']).read())
except Exception as e:
return (False, str(e))
try:
table_def = client.describe_table(TableName=table_name)['Table']
except botocore.exceptions.ClientError as e:
return self.create(client, table_name, new_def)
request = self.build_update_request(table_def, new_def)
if not request:
return (True, '')
self.process_update_request(client, request, table_name)
return (True, self.CHANGED)
def process_update_request(self, client, request, table_name):
if 'GlobalSecondaryIndexUpdates' in request:
for index_request in request['GlobalSecondaryIndexUpdates']:
new_request = { 'TableName': table_name, 'AttributeDefinitions': request['AttributeDefinitions'], 'GlobalSecondaryIndexUpdates': [index_request]}
self.modify_table(client, new_request, table_name)
if 'ProvisionedThroughput' in request:
new_request = { 'TableName': table_name, 'ProvisionedThroughput': request['ProvisionedThroughput'] }
self.modify_table(client, new_request, table_name)
def wait_for_table(self, client, table_name):
def rotate(t):
# animation = ('|', '\\', '-', '/')
animation = (':.. ', '.:. ', '..: ', '.:. ')
sys.stdout.write('\b'*len(animation)+animation[t % len(animation)])
sys.stdout.flush()
retry = 600
sys.stdout.write('\r')
while True:
table_def = client.describe_table(TableName=table_name)['Table']
busy_reason = self.table_busy_reason(table_def)
if busy_reason == '':
break
retry -= 1
if retry < 1:
raise Exception("%s too long." % busy_reason)
rotate(retry)
time.sleep(1)
def modify_table(self, client, request, table_name):
self.wait_for_table(client, table_name)
client.update_table(**request)
def table_busy_reason(self, table_def):
if table_def['TableStatus'] != 'ACTIVE':
return 'Table is in state %s' % table_def['TableStatus']
if 'GlobalSecondaryIndexes' in table_def:
for index in table_def['GlobalSecondaryIndexes']:
if index['IndexStatus'] != 'ACTIVE':
return 'Index %s is in state %s' % (index['IndexName'], index['IndexStatus'])
return ''
def build_update_request(self, table_def, new_def):
request = {}
old_indexes = self.get_indexes_by_name(self.construct_secondary_indexes(table_def['GlobalSecondaryIndexes']))
new_indexes = self.get_indexes_by_name(self.construct_secondary_indexes(new_def['GlobalSecondaryIndexes']))
updates = []
for index_name in old_indexes:
if index_name not in new_indexes:
updates.append({ 'Delete': { 'IndexName': index_name }})
for (index_name, index) in new_indexes.iteritems():
if index_name in old_indexes:
if index != old_indexes[index_name]:
updates.append({ 'Delete': { 'IndexName': index_name }})
updates.append({ 'Create': index})
else:
updates.append({ 'Create': index})
if updates:
request['GlobalSecondaryIndexUpdates'] = updates
request['AttributeDefinitions'] = new_def['AttributeDefinitions']
old_provisioning = self.construct_provisioned_throughput(table_def['ProvisionedThroughput'])
new_provisioning = self.construct_provisioned_throughput(new_def['ProvisionedThroughput'])
if old_provisioning != new_provisioning:
request['ProvisionedThroughput'] = new_provisioning
return request
def get_indexes_by_name(self, indexes):
out = {}
for index in indexes:
out[index['IndexName']] = index
return out
def construct_provisioned_throughput(self, idef):
return {
'ReadCapacityUnits': idef['ReadCapacityUnits'],
'WriteCapacityUnits': idef['WriteCapacityUnits']
}
def construct_secondary_indexes(self, idefs):
outs = []
for idef in idefs:
out = {
'IndexName': idef['IndexName'],
'KeySchema': idef['KeySchema'],
'Projection': idef['Projection']
}
if 'ProvisionedThroughput' in idef:
out['ProvisionedThroughput'] = self.construct_provisioned_throughput(idef['ProvisionedThroughput'])
outs.append(out)
return outs
def create(self, client, table_name, new_def):
params = {
'AttributeDefinitions': new_def['AttributeDefinitions'],
'TableName': table_name,
'KeySchema': new_def['KeySchema'] if 'KeySchema' in new_def else [],
'ProvisionedThroughput': self.construct_provisioned_throughput(new_def['ProvisionedThroughput'])
}
if 'LocalSecondaryIndexes' in new_def:
params['LocalSecondaryIndexes'] = self.construct_secondary_indexes(new_def['LocalSecondaryIndexes'])
if 'GlobalSecondaryIndexes' in new_def:
params['GlobalSecondaryIndexes'] = self.construct_secondary_indexes(new_def['GlobalSecondaryIndexes'
|
])
if 'StreamSpecification' in new_def:
params['StreamSpecification'] = new_def['StreamSpecifi
|
cation']
try:
client.create_table(**params)
except botocore.exceptions.ClientError as e:
return (False, str(e))
return (True, self.CREATED)
|
willkg/douglas
|
douglas/tests/test_yeararchives.py
|
Python
|
mit
| 668 | 0 |
from nose.tools import eq_
from douglas.tests import PluginTest
from douglas.plugins import yeararchives
class Test_yeararchives(PluginTest):
def setUp(self):
PluginTest.setUp(self, yeararchives)
def tearDown(self):
PluginTest.tearDown(self)
de
|
f test_parse_path_info(self):
testdata = [
('', None),
('/', None),
|
('/2003', ('2003', None)),
('/2003/', ('2003', None)),
('/2003/index', ('2003', None)),
('/2003/index.theme', ('2003', 'theme')),
]
for data, expected in testdata:
eq_(yeararchives.parse_path_info(data), expected)
|
IITBinterns13/edx-platform-dev
|
common/lib/xmodule/xmodule/modulestore/mongo/draft.py
|
Python
|
agpl-3.0
| 9,889 | 0.002225 |
from datetime import datetime
from xmodule.modulestore import Location, namedtuple_to_son
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import own_metadata
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore.mongo.base import MongoModuleStore
from pytz import UTC
DRAFT = 'draft'
# Things w/ these categories should never be marked as version='draft'
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
def as_draft(location):
"""
Returns the Location that is the draft for `location`
"""
return Location(location).replace(revision=DRAFT)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return Location(location).replace(revision=None)
def wrap_draft(item):
"""
Sets `item.is_draft` to `True` if the item is a
draft, and `False` otherwise. Sets the item's location to the
non-draft location in either case
"""
setattr(item, 'is_draft', it
|
em.location.revision == DRAFT)
item.location = item.location.replace(revision=None)
return item
class DraftModuleStore(MongoModuleStore):
"""
This mixin modifies a modulestore to give it draft semantics.
That is, edits made to units are stored to locations that have the revision DRAFT,
and when reads are made, they first read with revision DRAFT, and then fall back
to the baseline revision only if DR
|
AFT doesn't exist.
This module also includes functionality to promote DRAFT modules (and optionally
their children) to published modules.
"""
def get_item(self, location, depth=0):
"""
Returns an XModuleDescriptor instance for the item at location.
If location.revision is None, returns the item with the most
recent revision
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
location: Something that can be passed to Location
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_item(as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_item(location, depth=depth))
def get_instance(self, course_id, location, depth=0):
"""
Get an instance of this location, with policy for course_id applied.
TODO (vshnayder): this may want to live outside the modulestore eventually
"""
try:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, as_draft(location), depth=depth))
except ItemNotFoundError:
return wrap_draft(super(DraftModuleStore, self).get_instance(course_id, location, depth=depth))
def get_items(self, location, course_id=None, depth=0):
"""
Returns a list of XModuleDescriptor instances for the items
that match location. Any element of location that is None is treated
as a wildcard that matches any value
location: Something that can be passed to Location
depth: An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of calls to
get_children() to cache. None indicates to cache all descendents
"""
draft_loc = as_draft(location)
draft_items = super(DraftModuleStore, self).get_items(draft_loc, course_id=course_id, depth=depth)
items = super(DraftModuleStore, self).get_items(location, course_id=course_id, depth=depth)
draft_locs_found = set(item.location.replace(revision=None) for item in draft_items)
non_draft_items = [
item
for item in items
if (item.location.revision != DRAFT
and item.location.replace(revision=None) not in draft_locs_found)
]
return [wrap_draft(item) for item in draft_items + non_draft_items]
def clone_item(self, source, location):
"""
Clone a new item that is a copy of the item at the location `source`
and writes it to `location`
"""
if Location(location).category in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
return wrap_draft(super(DraftModuleStore, self).clone_item(source, as_draft(location)))
def update_item(self, location, data, allow_not_found=False):
"""
Set the data in the item specified by the location to
data
location: Something that can be passed to Location
data: A nested dictionary of problem data
"""
draft_loc = as_draft(location)
try:
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
except ItemNotFoundError, e:
if not allow_not_found:
raise e
return super(DraftModuleStore, self).update_item(draft_loc, data)
def update_children(self, location, children):
"""
Set the children for the item specified by the location to
children
location: Something that can be passed to Location
children: A list of child item identifiers
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
return super(DraftModuleStore, self).update_children(draft_loc, children)
def update_metadata(self, location, metadata):
"""
Set the metadata for the item specified by the location to
metadata
location: Something that can be passed to Location
metadata: A nested dictionary of module metadata
"""
draft_loc = as_draft(location)
draft_item = self.get_item(location)
if not getattr(draft_item, 'is_draft', False):
self.clone_item(location, draft_loc)
if 'is_draft' in metadata:
del metadata['is_draft']
return super(DraftModuleStore, self).update_metadata(draft_loc, metadata)
def delete_item(self, location, delete_all_versions=False):
"""
Delete an item from this modulestore
location: Something that can be passed to Location
"""
super(DraftModuleStore, self).delete_item(as_draft(location))
if delete_all_versions:
super(DraftModuleStore, self).delete_item(as_published(location))
return
def get_parent_locations(self, location, course_id):
'''Find all locations that are the parents of this location. Needed
for path_to_location().
returns an iterable of things that can be passed to Location.
'''
return super(DraftModuleStore, self).get_parent_locations(location, course_id)
def publish(self, location, published_by_id):
"""
Save a current draft to the underlying modulestore
"""
draft = self.get_item(location)
draft.cms.published_date = datetime.now(UTC)
draft.cms.published_by = published_by_id
super(DraftModuleStore, self).update_item(location, draft._model_data._kvs._data)
super(DraftModuleStore, self).update_children(location, draft._model_data._kvs._children)
super(DraftModuleStore, self).update_metadata(location, own_metadata(draft))
self.delete_item(location)
def unpublish(self, location):
"""
Turn the published version int
|
nortikin/sverchok
|
nodes/list_struct/shuffle.py
|
Python
|
gpl-3.0
| 3,555 | 0.001125 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import random
import bpy
from bpy.prop
|
s import BoolProperty, IntProperty, StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, changable_sockets
import numpy as np
from numpy import random as np_random, ndarray, array
class ListShuffleNode(bpy.types.Node, SverchCustomTreeNode):
'''
Triggers: Randomize list order
Tooltip: Change randomly the order of the elements in a list
'''
bl_id
|
name = 'ListShuffleNode'
bl_label = 'List Shuffle'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_LIST_SHUFFLE'
level: IntProperty(name='level_to_Shuffle', default=2, min=1, update=updateNode)
seed: IntProperty(name='Seed', default=0, update=updateNode)
typ: StringProperty(name='typ', default='')
newsock: BoolProperty(name='newsock', default=False)
def draw_buttons(self, context, layout):
layout.prop(self, 'level', text="level")
if 'seed' not in self.inputs:
layout.prop(self, 'seed', text="Seed")
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "data")
self.inputs.new('SvStringsSocket', "seed").prop_name = 'seed'
self.outputs.new('SvStringsSocket', 'data')
def sv_update(self):
if 'data' in self.inputs and self.inputs['data'].links:
inputsocketname = 'data'
outputsocketname = ['data']
changable_sockets(self, inputsocketname, outputsocketname)
def process(self):
if self.outputs[0].is_linked and self.inputs[0].is_linked:
seed = self.inputs['seed'].sv_get(deepcopy=False)[0][0]
random.seed(seed)
np_random.seed(seed)
data = self.inputs['data'].sv_get(deepcopy=False)
output = self.shuffle(data, self.level)
self.outputs['data'].sv_set(output)
def shuffle(self, data, level):
level -= 1
if level:
if level == 1 and isinstance(data, ndarray):
out = np.array(data)
for row in out:
np_random.shuffle(row)
return out
out = []
for l in data:
out.append(self.shuffle(l, level))
return out
elif isinstance(data, list):
l = data.copy()
random.shuffle(l)
return l
elif isinstance(data, tuple):
data = list(data)
random.shuffle(data)
return tuple(data)
elif isinstance(data, ndarray):
out = array(data)
np_random.shuffle(out)
return out
def register():
bpy.utils.register_class(ListShuffleNode)
def unregister():
bpy.utils.unregister_class(ListShuffleNode)
|
eHealthAfrica/LMIS
|
LMIS/core/api/serializers.py
|
Python
|
gpl-2.0
| 3,856 | 0.001815 |
"""
core/api/serializers.py is the module for core model api data serializers
"""
#import core django module
from django.contrib.auth.models import User, Permission
#import external modules
from rest_framework import serializers
#import project modules
from core.models import (Product, ProductCategory, UnitOfMeasurement, UOMCategory, CompanyCategory, Company,
Currency, Rate, Contact, Address, EmployeeCategory, Employee, ProductPresentation,
ModeOfAdministration, ProductItem, ProductFormulation)
class UserSerializer(serializers.ModelSerializer):
"""
REST API serializer for User model
"""
class Meta:
model = User
class BaseModelSerializer(serializers.ModelSerializer):
"""
Base Model Serializer for models
"""
created_by = UserSerializer(required=False, read_only=True)
modified_by = UserSerializer(required=False, read_only=True)
class ProductCategorySerializer(BaseModelSerializer):
"""
REST API Serializer for ProductCategory model
"""
class Meta:
model = ProductCategory
class ProductSerializer(BaseModelSerializer):
"""
REST API Serializer for Product models
"""
class Meta:
model = Product
class UOMCategorySerializer(BaseModelSerializer):
"""
REST API Serializer for UOMCategory model
"""
class Meta:
model = UOMCategory
class UnitOfMeasurementSerializer(BaseModelSerializer):
"""
REST API Serializer for UnitOfMeasurement model
"""
class Meta:
model = UnitOfMeasurement
class CompanyCategorySerializer(BaseModelSerializer):
"""
REST API serializer for CompanyCategory model
"""
class Meta:
model = CompanyCategory
class CompanySerializer(BaseModelSerializer):
"""
REST API serializer for Company model
"""
class Meta:
model = Company
class CurrencySerializer(BaseModelSerializer):
"""
REST API serializer for Currency model
"""
class Meta:
model = Currency
fields = ('code', 'name', 'symbol', 'symbol_position', 'rates',)
class RateSerializer(BaseModelSerializer):
"""
REST API serializer for Rate model
"""
class Meta:
model = Rate
class ContactSerializer(BaseModelSerializer):
"""
REST API serializer for Contact model
"""
class Meta:
model = Contact
class AddressSerializer(BaseModelSerializer):
"""
REST API serializer for Address model
"""
class Meta:
model = Address
class EmployeeCategorySerializer(BaseMode
|
lSerializer):
"""
REST API serializer for EmployeeCategory
"""
class Meta:
model = EmployeeCategory
class EmployeeSerializer(BaseModelSerializer):
"""
REST API serializer for Employee
"""
class Meta:
model = Employee
class PermissionSerializer(BaseModelSerializer):
"""
REST API serializer for Permission model
"""
class
|
Meta:
model = Permission
class ProductPresentationSerializer(BaseModelSerializer):
"""
REST API serializer for ProductPresentation model
"""
class Meta:
model = ProductPresentation
class ModeOfAdministrationSerializer(BaseModelSerializer):
"""
REST API serializer for ModeOfAdministration model
"""
class Meta:
model = ModeOfAdministration
class ProductItemSerializer(BaseModelSerializer):
"""
REST API serializer for ProductItem model
"""
class Meta:
model = ProductItem
class ProductFormulationSerializer(BaseModelSerializer):
"""
REST API serializer for ProductFormulation model, it can be Lyophilized, Liquid or Not Applicable
"""
class Meta:
model = ProductFormulation
|
SalesforceFoundation/CumulusCI
|
cumulusci/tasks/github/merge.py
|
Python
|
bsd-3-clause
| 11,772 | 0.003058 |
import http.client
from github3 import GitHubError
import github3.exceptions
from cumulusci.core.exceptions import GithubApiNotFoundError
from cumulusci.core.utils import process_bool_arg
from cumulusci.tasks.github.base import BaseGithubTask
class MergeBranch(BaseGithubTask):
task_docs = """
Merges the most recent commit on the current branch into other branches depending on the value of source_branch.
If source_branch is a branch that does not start with the specified branch_prefix, then the commit will be
merged to all branches that begin with branch_prefix and are not themselves child branches (i.e. branches don't contain '__' in their name).
If source_branch begins with branch_prefix, then the commit is merged to all child branches of source_branch.
"""
task_options = {
"commit": {
"description": "The commit to merge into feature branches. Defaults to the current head commit."
},
"source_branch": {
"description": "The source branch to merge from. Defaults to project__git__default_branch."
},
"branch_prefix": {
"description": "A list of prefixes of branches that should receive the merge. Defaults to project__git__prefix_feature"
},
"update_future_releases": {
"description": "If source_branch is a release branch, then merge all future release branches that exist. Defaults to False."
},
}
def _init_options(self, kwargs):
super()._init_options(kwargs)
if "commit" not in self.options:
self.options["commit"] = self.project_config.repo_commit
if "branch_prefix" not in self.options:
self.options[
"branch_prefix"
] = self.project_config.project__git__prefix_feature
if "source_branch" not in self.options:
self.options[
"source_branch"
] = self.project_config.project__git__default_branch
self.options["update_future_releases"] = process_bool_arg(
self.options.get("update_future_releases") or False
)
def _init_task(self):
super()._init_task()
self.repo = self.get_repo()
def _run_task(self):
self._validate_source_branch(self.options["source_branch"])
branches_to_merge = self._get_branches_to_merge()
for branch in branches_to_merge:
self._merge(
branch.name,
self.options["source_branch"],
self.options["commit"],
)
def _validate_source_branch(self, source_branch):
"""Validates that the source branch exists in the repository"""
try:
self.repo.branch(source_branch)
except github3.exceptions.NotFoundError:
message = f"Branch {source_branch} not found"
raise GithubApiNotFoundError(message)
def _get_existing_prs(self, source_branch, branch_prefix):
"""Returns the existing pull requests from the source branch
to other branches that are candidates for merging."""
existing_prs = []
for pr in self.repo.pull_requests(state="open"):
if pr.base.ref.startswith(branch_prefix) and pr.head.ref == source_branch:
existing_prs.append(pr.base.ref)
return existing_prs
def _get_branches_to_merge(self):
"""
If source_branch is the default branch (or a branch that doesn't start with a prefix), we
gather all branches with branch_prefix that are not child branches.
NOTE: We only include the _next_ closes release branch when automerging from main.
A change on main may conflict with teh current contents of the lowest release branch.
In this case, we would like for that conflict to only need to be resolved once
(not once for each release branch).
If source_branch starts with branch prefix, we gather
all branches with branch_prefix that are direct descendents of source_branch.
If update_future_releases is True, and source_branch is a release branch
then we also collect all future release branches.
"""
repo_branches = list(self.repo.branches())
next_release = self._get_next_release(repo_branches)
update_future_releases = self._update_future_releases(next_release)
child_branches = []
main_descendents = []
release_branches = []
for branch in repo_branches:
# check for adding future release branches
if update_future_releases and self._is_future_release_branch(
branch.name, next_release
):
release_branches.append(branch)
continue
# check if we looking at the source_branch
if branch.name == self.options["source_branch"]:
self.logger.debug(f"Skipping branch {branch.name}: is source branch")
continue
# check for branch prefix match
elif not branch.name.startswith(self.options["branch_prefix"]):
self.logger.debug(
f"Skipping branch {branch.name}: does not match prefix '{self.options['branch_prefix']}'"
)
continue
# check if source_branch doesn't have prefix and is not a child (e.g. main)
elif (
not self.options["source_branch"].startswith(
self.options["branch_prefix"]
)
and "__" not in branch.name
):
# only merge to the lowest numbered release branch
# when merging from a branch without a prefix (e.g. main)
if self._is_future_release_branch(branch.name, next_release):
continue
main_descendents.append(branch)
# else, we have a branch that starts with branch_prefix
# check is this branch is a direct descendent
elif self._is_source_branch_direct_descendent(branch.name):
child_branches.append(bra
|
nch)
# else not a direct descendent
else:
self.logger.debug(
f"Skipping branch {branch.name}: is not a direct descendent of {self.options['source_branch']}"
)
to_merge = []
if child_branches:
self.logger.debug(
f"Found child branches to update: {[branch.name for branch in child_branches]}"
)
to_merge = child_branches
elif self.options["source_branch"].sta
|
rtswith(self.options["branch_prefix"]):
self.logger.debug(
f"No children found for branch {self.options['source_branch']}"
)
if release_branches:
self.logger.debug(
f"Found future release branches to update: {[branch.name for branch in release_branches]}"
)
to_merge = to_merge + release_branches
if main_descendents:
self.logger.debug(
f"Found descendents of {self.options['source_branch']} to update: {[branch.name for branch in main_descendents]}"
)
to_merge = to_merge + main_descendents
return to_merge
def _get_next_release(self, repo_branches):
"""Returns the integer that corresponds to the lowest release number found on all release branches.
NOTE: We assume that once a release branch is merged that it will be deleted.
"""
release_nums = [
int(branch.name.split("/")[1])
for branch in repo_branches
if self._is_release_branch(branch.name)
]
next_release = sorted(release_nums)[0] if release_nums else None
return next_release
def _update_future_releases(self, next_release):
"""Determines whether or not to update future releases.
Returns True if all of the below checks are True. False otherwise.
Checks:
(1) Did we receive the 'update_future_release' flag?
(2) Is the source_branch a release branc
|
fnp/fnpdjango
|
tests/tests/test_utils_settings.py
|
Python
|
agpl-3.0
| 398 | 0.005038 |
# This file is part of FNPDjango, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See README.md for more information.
#
from django.conf import settings
from django.test import TestCase
class UtilsSettingsTestCase(Test
|
Case):
def test_lazy_ugettext_lazy(self):
self.assertEqual(str(settings.TEST_LAZY_UGETTEXT_LAZY),
"Lazy setting
|
.")
|
chronicle/api-samples-python
|
feeds/create_azure_ad_context_feed.py
|
Python
|
apache-2.0
| 4,356 | 0.008494 |
#!/usr/bin/env python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Executable sample for creating a Azure AD Context Feed.
Creating other feeds requires changing this sample code.
"""
import argparse
import json
from typing import Any, Mapping
from google.auth.transport import requests
from common import chronicle_auth
from common import regions
CHRONICLE_API_BASE_URL = "https://backstory.googleapis.com"
def create_azure_ad_context_feed(http_session: requests.AuthorizedSession,
tokenendpoint: str, clientid: str,
clientsecret: str, retrievedevices: bool,
retrievegroups: bool) -> Mapping[str, Any]:
"""Creates a new Azure AD Context feed.
Args:
http_session: Authorized session for HTTP requests.
tokenendpoint: A string which represents endpoint to connect to.
clientid: A string
|
which represents Id of the credential to use.
clientsecret: A string which represents secret of the credential to use.
retrievedevices: A boolean to indicate whether to retrieve devices or not.
retrievegroups: A boolean to indicate whether to retrieve groups or not.
Returns:
New Azure AD Feed.
Raises:
requ
|
ests.exceptions.HTTPError: HTTP request resulted in an error
(response.status_code >= 400).
"""
url = f"{CHRONICLE_API_BASE_URL}/v1/feeds/"
body = {
"details": {
"feedSourceType": "API",
"logType": "AZURE_AD_CONTEXT",
"azureAdContextSettings": {
"authentication": {
"tokenEndpoint": tokenendpoint,
"clientId": clientid,
"clientSecret": clientsecret
},
"retrieveDevices": retrievedevices,
"retrieveGroups": retrievegroups
}
}
}
response = http_session.request("POST", url, json=body)
# Expected server response:
# {
# "name": "feeds/e0eb5fb0-8fbd-4f0f-b063-710943ad7812",
# "details": {
# "logType": "AZURE_AD_CONTEXT",
# "feedSourceType": "API",
# "azureAdContextSettings": {
# "authentication": {
# "tokenEndpoint": "tokenendpoint.example.com",
# "clientId": "clientid_example",
# "clientSecret": "clientsecret_example"
# },
# "retrieveDevices": true
# }
# },
# "feedState": "PENDING_ENABLEMENT"
# }
if response.status_code >= 400:
print(response.text)
response.raise_for_status()
return response.json()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
chronicle_auth.add_argument_credentials_file(parser)
regions.add_argument_region(parser)
parser.add_argument(
"-te",
"--tokenendpoint",
type=str,
required=True,
help="token endpoint")
parser.add_argument(
"-ci",
"--clientid",
type=str,
required=True,
help="client id")
parser.add_argument(
"-cs",
"--clientsecret",
type=str,
required=True,
help="client secret")
parser.add_argument(
"-rd",
"--retrievedevices",
type=bool,
required=True,
help="retrieve devices")
parser.add_argument(
"-rg",
"--retrievegroups",
type=str,
required=True,
help="retrieve groups")
args = parser.parse_args()
CHRONICLE_API_BASE_URL = regions.url(CHRONICLE_API_BASE_URL, args.region)
session = chronicle_auth.initialize_http_session(args.credentials_file)
new_feed = create_azure_ad_context_feed(session, args.tokenendpoint,
args.clientid, args.clientsecret,
args.retrievedevices,
args.retrievegroups)
print(json.dumps(new_feed, indent=2))
|
openstack/cloudkitty
|
cloudkitty/db/sqlalchemy/alembic/versions/464e951dc3b8_initial_migration.py
|
Python
|
apache-2.0
| 1,314 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial migration
Revision ID: 464e951dc3b8
Revises: None
Create Date: 2014-08-05 17:41:34.470183
"""
# revision identifiers, used by Alembic.
revision = '464e951dc3b8'
down_revision = None
from alembic import op # noqa: E402
import sqlalchemy as sa # noqa: E402
def upgrade():
op.create_table(
'states',
sa.Column('name', sa.String(length=2
|
55), nullable=False),
sa.Column('state', sa.BigInteger(), nullable=False),
sa.Column('s_metadata', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'modules_state',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('state', sa.Boolean(), nullable=False),
sa.PrimaryKeyConst
|
raint('name'))
|
jotes/pontoon
|
pontoon/test/fixtures/base.py
|
Python
|
bsd-3-clause
| 3,576 | 0.000559 |
import pytest
from pontoon.test import factories
@pytest.fixture
def admin():
"""Admin - a superuser"""
return factories.UserFactory.create(userna
|
me="admin", is_superuser=True,)
@pytest.fixture
def client_superuser(client, admin):
"""Provides a client with a logged in superuser. """
client.force_login(admin)
return client
@pytest.fixture
def user_a():
return factories.UserFactory(username="user_a")
@pytest.fixture
def user_b():
return factories.UserFactory(username="user_b")
@pytest.fixture
def user_c():
return fa
|
ctories.UserFactory(username="user_c")
@pytest.fixture
def member(client, user_a):
"""Provides a `LoggedInMember` with the attributes `user` and `client`
the `client` is authenticated
"""
class LoggedInMember(object):
def __init__(self, user, client):
client.force_login(user)
self.client = client
self.user = user
return LoggedInMember(user_a, client)
@pytest.fixture
def locale_a():
return factories.LocaleFactory(code="kg", name="Klingon",)
@pytest.fixture
def google_translate_locale(locale_a):
"""Set the Google Cloud Translation API locale code for locale_a"""
locale_a.google_translate_code = "bg"
locale_a.save()
return locale_a
@pytest.fixture
def ms_locale(locale_a):
"""Set the Microsoft API locale code for locale_a"""
locale_a.ms_translator_code = "gb"
locale_a.save()
return locale_a
@pytest.fixture
def locale_b():
return factories.LocaleFactory(code="gs", name="Geonosian",)
@pytest.fixture
def project_a():
return factories.ProjectFactory(
slug="project_a", name="Project A", repositories=[],
)
@pytest.fixture
def project_b():
return factories.ProjectFactory(slug="project_b", name="Project B")
@pytest.fixture
def system_project_a():
return factories.ProjectFactory(
slug="system_project_a",
name="System Project A",
repositories=[],
system_project=True,
)
@pytest.fixture
def resource_a(project_a):
return factories.ResourceFactory(
project=project_a, path="resource_a.po", format="po"
)
@pytest.fixture
def resource_b(project_b):
return factories.ResourceFactory(
project=project_b, path="resource_b.po", format="po"
)
@pytest.fixture
def entity_a(resource_a):
return factories.EntityFactory(resource=resource_a, string="entity a")
@pytest.fixture
def entity_b(resource_b):
return factories.EntityFactory(resource=resource_b, string="entity b")
@pytest.fixture
def project_locale_a(project_a, locale_a):
return factories.ProjectLocaleFactory(project=project_a, locale=locale_a,)
@pytest.fixture
def translation_a(locale_a, project_locale_a, entity_a, user_a):
"""Return a translation.
Note that we require the `project_locale_a` fixture because a
valid ProjectLocale is needed in order to query Translations.
"""
translation_a = factories.TranslationFactory(
entity=entity_a,
locale=locale_a,
user=user_a,
string="Translation for entity_a",
)
translation_a.locale.refresh_from_db()
translation_a.entity.resource.project.refresh_from_db()
return translation_a
@pytest.fixture
def tag_a(resource_a, project_a, locale_a):
# Tags require a TranslatedResource to work.
factories.TranslatedResourceFactory.create(resource=resource_a, locale=locale_a)
tag = factories.TagFactory.create(slug="tag", name="Tag", project=project_a,)
tag.resources.add(resource_a)
return tag
|
eReuse/DeviceHub
|
ereuse_devicehub/resources/device/mobile/settings.py
|
Python
|
agpl-3.0
| 722 | 0 |
import copy
from ereuse_devicehub.resources.device.schema import Device
from ereuse_devicehub.resources.device.
|
settings import DeviceSubSettings
class Mobile(Device):
imei = {
'type': 'string',
'unique': True
}
meid = {
'type': 'string',
'unique': True
}
type = {
'type': 'string',
'allowed': {'Smartphone', 'Tablet'},
'required': True
}
manufacturer = copy.copy(Device.manufacturer)
manufacturer['required'] = True
serialNumber = copy.copy(Device.serialNumber)
serialNumber['re
|
quired'] = True
model = copy.copy(Device.model)
model['required'] = True
class MobileSettings(DeviceSubSettings):
_schema = Mobile
|
badock/nova
|
nova/db/discovery/api.py
|
Python
|
apache-2.0
| 232,570 | 0.001303 |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of Discovery backend."""
import collections
import copy
import datetime
import functools
import sys
import threading
import time
import uuid
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo.db.sqlalchemy import session as db_session
from oslo.db.sqlalchemy import utils as sqlalchemyutils
from oslo.utils import excutils
from oslo.utils import timeutils
import six
# from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
from sqlalchemy import MetaData
# from sqlalchemy import or_
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
# from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _, _LI
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
try:
from nova import quota
except:
pass
# RIAK
from nova.db.discovery.query import or_
from nova.db.discovery.query import and_
import itertools
import traceback
import uuid
import pprint
import riak
import inspect
from inspect import getmembers
from sqlalchemy.util._collections import KeyedTuple
import netaddr
from sqlalchemy.sql.expression import BinaryExpression
from sqlalchemy.orm.evaluator import EvaluatorCompiler
from sqlalchemy.orm.collections import InstrumentedList
from nova.db.discovery import models
import pytz
from desimplifier import ObjectDesimplifier
from utils import find_table_name
from query import RiakModelQuery
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will
|
consider duplicate host
|
names '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
LOG = logging.getLogger(__name__)
_ENGINE_FACADE = None
_LOCK = threading.Lock()
def _create_facade_lazily():
global _LOCK, _ENGINE_FACADE
if _ENGINE_FACADE is None:
with _LOCK:
if _ENGINE_FACADE is None:
_ENGINE_FACADE = db_session.EngineFacade.from_config(CONF)
return _ENGINE_FACADE
def get_engine(use_slave=False):
facade = _create_facade_lazily()
return facade.get_engine(use_slave=use_slave)
# def get_session(use_slave=False, **kwargs):
# facade = _create_facade_lazily()
# return facade.get_session(use_slave=use_slave, **kwargs)
class ControlledExecution():
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
class FakeSession():
def add(self, *objs):
for obj in objs:
obj.save()
def query(self, *entities, **kwargs):
return RiakModelQuery(*entities, **kwargs)
def begin(self, *args, **kwargs):
return ControlledExecution()
def flush(self, *args, **kwargs):
pass
def get_session(use_slave=False, **kwargs):
# facade = _create_facade_lazily(use_slave)
# return facade.get_session(**kwargs)
return FakeSession()
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warn(_("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
# TODO: modified model_query
def model_query(context, *args, **kwargs):
# base_model = kwargs["base_model"]
# models = args
return RiakModelQuery(*args, **kwargs)
def exact_filter(query, model, filters, legal_keys):
"""Applies exact match filtering to a query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param model: model object the query applies to, for IN-style
filtering
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
c
|
pioneers/topgear
|
python/forseti2/piemos_field_cmd.py
|
Python
|
apache-2.0
| 2,272 | 0.007042 |
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
import cStringIO as StringIO
import struct
import header
class piemos_field_cmd(object):
__slots__ = ["header", "isFlash", "isStart", "isLeft", "rfid_uid"]
def __init__(self):
self.header = None
self.isFlash = False
self.isStart = False
self.isLeft = False
self.rfid_uid = 0
def encode(self):
buf = StringIO.StringIO()
buf.write(piemos_field_cmd._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
assert self.header._get_packed_fingerprint() == header.header._get_packed_fingerprint()
self.header._encode_one(buf)
buf.write(struct.pack(">bbbq", self.isFlash, self.isStart, self.isLeft, self.rfid_uid))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = StringIO.StringIO(data)
if buf.read(8) != piemos_field_cmd._get_packed_fingerprint():
raise ValueError("Decode error")
return piemos_field_cmd._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = piemos_field_cmd()
self.header = header.header._decode_one(buf)
self.isFlash, self.isStart, self.isLeft, self.rfid_uid = struct.unpack(">bbbq", buf.read(11))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if piemos_field_cmd in parents: return 0
ne
|
wparents = parents + [piemos_field_cmd]
tmphash = (0x41930ef51bb056ba+ header.header._get_hash_recursive(newparents)) & 0x
|
ffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if piemos_field_cmd._packed_fingerprint is None:
piemos_field_cmd._packed_fingerprint = struct.pack(">Q", piemos_field_cmd._get_hash_recursive([]))
return piemos_field_cmd._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
godlygeek/LightRender
|
simple_rainbow.py
|
Python
|
mit
| 807 | 0.006196 |
#!/usr/bin/env python
from constants import CARTESIAN_COORDS
import color
|
sys
import sys
class Pattern(object):
center_x, center_y = 0, 0
i = 0
def next_frame(self):
self.i += 1
def get_color(self, x, y):
d = (x ** 2 + y ** 2) ** 0.5
d *= 0.1 # scale the bands
d -= 0.025 * self.i # frame step size
r, g, b = colorsys.hsv_to_rgb(d%1, 1, 1)
red =
|
255 * r
green = 255 * g
blue = 255 * b
c = (int(red), int(green), int(blue))
return c
p = Pattern()
for frame in range(6000):
for x, y in CARTESIAN_COORDS:
color = p.get_color(x, y)
r, g, b = color
sys.stdout.write(chr(r))
sys.stdout.write(chr(g))
sys.stdout.write(chr(b))
sys.stdout.flush()
p.next_frame()
|
Uli1/mapnik
|
scons/scons-local-2.4.0/SCons/Script/SConscript.py
|
Python
|
lgpl-2.1
| 24,468 | 0.002738 |
"""SCons.Script.SConscript
This module defines the Python API provided to SConscript and SConstruct
files.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
__revision__ = "src/engine/SCons/Script/SConscript.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
import SCons
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Environment
import SCons.Errors
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Platform
import SCons.SConf
import SCons.Script.Main
import SCons.Tool
import SCons.Util
import collections
import os
import os.path
import re
import sys
import traceback
# The following variables used to live in this module. Some
# SConscript files out there may have referred to them directly as
# SCons.Script.SConscript.*. This is now supported by some special
# handling towards the bottom of the SConscript.__init__.py module.
#Arguments = {}
#ArgList = []
#BuildTargets = TargetList()
#CommandLineTargets = []
#DefaultTargets = []
class SConscriptReturn(Exception):
pass
launch_dir = os.path.abspath(os.curdir)
GlobalDict = None
# global exports set by Export():
global_exports = {}
# chdir flag
sconscript_chdir = 1
def get_calling_namespaces():
"""Return the locals and globals for the function that called
into this module in the current call stack."""
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
# Find the first frame that *isn't* from this file. This means
# that we expect all of the SCons frames that implement an Export()
# or SConscript() call to be in this file, so that we can identify
# the first non-Script.SConscript frame as the user's local calling
# environment, and the locals and globals dictionaries from that
# frame as the calling namespaces. See the comment below preceding
# the DefaultEnvironmentCall block for even more explanation.
while frame.f_globals.get("__name__") == __name__:
frame = frame.f_back
return frame.f_locals, frame.f_globals
def compute_exports(exports):
"""Compute a dictionary of exports given one of the parameters
to the Export() function or the exports argument to SConscript()."""
loc, glob = get_calling_namespaces()
retval = {}
try:
for export in exports:
if SCons.Util.is_Dict(export):
retval.update(export)
else:
try:
retval[export] = loc[export]
except KeyError:
retval[export] = glob[export]
except KeyError, x:
raise SCons.Errors.UserError("Export of non-existent variable '%s'"%x)
return retval
class Frame(object):
"""A frame on the SConstruct/SConscript call stack"""
def __init__(self, fs, exports, sconscript):
self.globals = BuildDefaultGlobals()
self.retval = None
self.prev_dir = fs.getcwd()
self.exports = compute_exports(exports) # exports from the calling SConscript
# make sure the sconscript attr is a Node.
if isinstance(sconscript, SCons.Node.Node):
self.sconscript = sconscript
elif sconscript == '-':
self.sconscript = None
else:
self.sconscript = fs.File(str(sconscript))
# the SConstruct/SConscript call stack:
call_stack = []
# For documentation on the methods in this file, see the scons man-page
def Return(*vars, **kw):
retval = []
try:
fvars = SCons.Util.flatten(vars)
for var in fvars:
for v in var.split():
retval.append(call_stack[-1].globals[v])
except KeyError, x:
raise SCons.Errors.UserError("Return of non-existent variable '%s'"%x)
if len(retval) == 1:
call_stack[-1].retval = retval[0]
else:
call_stack[-1].retval = tuple(retval)
stop = kw.get('stop', True)
if stop:
raise SConscriptReturn
stack_bottom = '% Stack boTTom %' # hard to define a variable w/this name :)
def _SConscript(fs, *files, **kw):
top = fs.Top
sd = fs.SConstruct_dir.rdir()
exports = kw.get('exports', [])
# evaluate each SConscript file
results = []
for fn in files:
call_stack.append(Frame(fs, exports, fn))
old_sys_path = sys.path
try:
SCons.Script.sconscript_reading = SCons.Script.sconscript_reading + 1
if fn == "-":
exec sys.stdin in call_stack[-1].gl
|
obals
else:
if isinstance(fn, SCons.Node.
|
Node):
f = fn
else:
f = fs.File(str(fn))
_file_ = None
# Change directory to the top of the source
# tree to make sure the os's cwd and the cwd of
# fs match so we can open the SConscript.
fs.chdir(top, change_os_dir=1)
if f.rexists():
actual = f.rfile()
_file_ = open(actual.get_abspath(), "r")
elif f.srcnode().rexists():
actual = f.srcnode().rfile()
_file_ = open(actual.get_abspath(), "r")
elif f.has_src_builder():
# The SConscript file apparently exists in a source
# code management system. Build it, but then clear
# the builder so that it doesn't get built *again*
# during the actual build phase.
f.build()
f.built()
f.builder_set(None)
if f.exists():
_file_ = open(f.get_abspath(), "r")
if _file_:
# Chdir to the SConscript directory. Use a path
# name relative to the SConstruct file so that if
# we're using the -f option, we're essentially
# creating a parallel SConscript directory structure
# in our local directory tree.
#
# XXX This is broken for multiple-repository cases
# where the SConstruct and SConscript files might be
# in different Repositories. For now, cross that
# bridge when someone comes to it.
try:
src_dir = kw['src_dir']
except KeyError:
ldir = fs.Dir(f.dir.get_path(sd))
else:
ldir = fs.Dir(src_dir)
if not ldir.is_under(f.dir):
# They specified a source directory, but
# it's above the SConscript directory.
# Do the sensible thing and just use the
# SConcript directory.
ldir = fs.Dir(f.dir.g
|
VHAINNOVATIONS/BCDS
|
Model/scripts/ear_validation/python/aggregateContention.py
|
Python
|
apache-2.0
| 9,448 | 0.037786 |
import os
import re
import cx_Oracle
import collections
import datetime
earContentionCode = [2200,2210,2220,3140,3150,4130,4210,4700,4920,5000,5010,5710,6850]
#Primary query, Look for all claims/contentions where the participant has at least one contention with an ear-related contention code.
#Organize them based first by participant id, then claim id and finally by profile date descending.
SQL="select rcc.ptcpnt_vet_id, \
bnft_claim_id, \
date_of_claim, \
prfil_dt, \
claim_ro_number, \
cntntn_id, \
cntntn_clsfcn_id, \
cntntn_clmant_txt, \
p.dob, \
end_prdct_type_cd \
from combo_rating_corp_claim rcc \
left join ah2626_person p on p.ptcpnt_vet_id = rcc.ptcpnt_vet_id \
inner join v_ear_claim_source cs on cs.vet_id = rcc.ptcpnt_vet_id and cs.claim_id = rcc.bnft_claim_id \
where prfil_dt >= date_of_claim \
order by rcc.ptcpnt_vet_id desc,bnft_claim_id,prfil_dt"
class AggregateContention:
def __init__(self):
self.VET_ID = None
self.CLAIM_ID = None
self.DOB = 0
self.END_PRODUCT_CODE = None
self.RO_NUMBER = 0
self.CLAIM_DATE = None
self.MAX_PROFILE_DATE = None
self.CONTENTION_COUNT = 0
self.EAR_CONTENTION_COUNT = 0
self.C2200 = 0
self.C2210 = 0
self.C2220 = 0
self.C3140 = 0
self.C3150 = 0
self.C4130 = 0
self.C4210 = 0
self.C4700 = 0
self.C4920 = 0
self.C5000 = 0
self.C5010 = 0
self.C5710 = 0
self.C6850 = 0
self.TXT_LOSS = 0
self.TXT_TINITU = 0
def __str__(self):
from pprint import pprint
return str(vars(self))
class Contention:
def __init__(self, ptcpnt_vet_id, bnft_claim_id, claim_date, prfil_dt, claim_ro_number, cntntn_id, cntntn_clsfcn_id, cntntn_clmant_txt, dob, end_prdct_type_cd):
self.ptcpnt_vet_id = ptcpnt_vet_id
self.bnft_claim_id = bnft_claim_id
self.claim_date = claim_date
self.prfil_dt = prfil_dt
self.claim_ro_number = claim_ro_number
self.cntntn_id = cntntn_id
self.cntntn_clsfcn_id = cntntn_clsfcn_id
self.cntntn_clmant_txt = cntntn_clmant_txt
if not dob is None:
self.dob = int(dob)
else:
self.dob = None
self.end_prdct_type_cd = end_prdct_type_cd
def __str__(self):
from pprint import pprint
return str(vars(self))
connection = cx_Oracle.connect('developer/D3vVV0Rd@127.0.0.1:1521/DEV.BCDSS')
writeCursor = connection.cursor()
writeCursor.prepare('INSERT INTO DEVELOPER.V_EAR_AGGREGATE_CONTENTION (VET_ID, CLAIM_ID, END_PRODUCT_CODE, CLAIM_DATE, CONTENTION_COUNT, EAR_CONTENTION_COUNT, C2200,C2210, C2220,C3140,C3150,C4130,C4210,C4700,C4920,C5000,C5010,C5710, C6850, TXT_LOSS, TXT_TINITU, DOB, RO_NUMBER, MAX_PROFILE_DATE) \
VALUES (:VET_ID, :CLAIM_ID, :END_PRODUCT_CODE, :CLAIM_DATE, :CONTENTION_COUNT, :EAR_CONTENTION_COUNT, \
:C2200, :C2210, :C2220, :C3140, :C3150, :C4130 , :C4210, :C4700, :C4920, :C5000, :C5010, :C5710, :C6850, \
:TXT_LOSS, :TXT_TINITU, \
:DOB, :RO_NUMBER, :MAX_PROFILE_DATE)')
print(str(datetime.datetime.now()))
cursor = connection.cursor()
cursor.execute(SQL)
aggregateContention = None
counterAggregateContention = None
totalContentions = None
totalEarContentions = None
maxProfileDate = None
currBenefitClaim = -1
currParticipant = -1
counter = 0;
for row in cursor:
if counter == 1000: #Commit every 1000 records. Improvement would be to look into aggregate inserts
connection.commit()
counter=0
contention = Contention(row[0],row[1],row[2],row[3],row[4],row[5],row[6], row[7], row[8], row[9]) #Map loose fields into a Contention object. (Contention is a convenience object)
if currBenefitClaim != contention.bnft_claim_id: #Process insert statement and reset aggregation variables when claim id changes
if currBenefitClaim != -1: #Skip if first time through
#Perform all aggregation calculations before inserting and resetting
aggregateContention.CONTENTION_COUNT = sum(totalContentions.values())
aggregateContention.EAR_CONTENTION_COUNT = sum(totalEarContentions.values())
aggregateContention.MAX_PROFILE_DATE = maxProfileDate[currBenefitClaim]
writeCursor.execute(None, {'VET_ID' :agg
|
regateContention.VET_ID, 'CLAIM_ID' :aggregateContention.CLAIM_ID, 'END_PRODUCT_CODE' :aggregateContention.END_PRODUCT_CODE, 'CLAIM_DATE' :aggregateContention.CLAIM_DATE, 'C
|
ONTENTION_COUNT' :aggregateContention.CONTENTION_COUNT, 'EAR_CONTENTION_COUNT' :aggregateContention.EAR_CONTENTION_COUNT,
'C2200' :counterAggregateContention.C2200, 'C2210' :counterAggregateContention.C2210, 'C2220' :counterAggregateContention.C2220, 'C3140' :counterAggregateContention.C3140, 'C3150' :counterAggregateContention.C3150, 'C4130' :counterAggregateContention.C4130, 'C4210' :counterAggregateContention.C4210, 'C4700' :counterAggregateContention.C4700, 'C4920' :counterAggregateContention.C4920, 'C5000' :counterAggregateContention.C5000, 'C5010' :counterAggregateContention.C5010, 'C5710' :counterAggregateContention.C5710, 'C6850' :counterAggregateContention.C6850,
'TXT_LOSS' :counterAggregateContention.TXT_LOSS, 'TXT_TINITU' :counterAggregateContention.TXT_TINITU,
'DOB' :aggregateContention.DOB, 'RO_NUMBER' :aggregateContention.RO_NUMBER, 'MAX_PROFILE_DATE' :aggregateContention.MAX_PROFILE_DATE})
counter += 1
currBenefitClaim = contention.bnft_claim_id #Reset claim id
if currParticipant != contention.ptcpnt_vet_id :
currParticipant = contention.ptcpnt_vet_id #Reset participant id
counterAggregateContention = AggregateContention()
#Capture all claim/person level items that do not change per contention
aggregateContention = AggregateContention()
aggregateContention.VET_ID = contention.ptcpnt_vet_id
aggregateContention.CLAIM_ID = currBenefitClaim
aggregateContention.RO_NUMBER = contention.claim_ro_number
aggregateContention.DOB = contention.dob
aggregateContention.CLAIM_DATE = contention.claim_date
aggregateContention.END_PRODUCT_CODE = contention.end_prdct_type_cd
#Reset the counters
totalContentions = collections.Counter();
totalEarContentions = collections.Counter();
maxProfileDate = collections.Counter();
maxProfileDate[currBenefitClaim] = contention.prfil_dt #If a claim has multiple profile dates, because of the sorting, we always end up with the most recent profile date
totalContentions[currBenefitClaim] += 1 #For every contention add one
if contention.cntntn_clsfcn_id in earContentionCode:
totalEarContentions[currBenefitClaim] +=1 #For any contention that is ear-related, add one
#Use regex to look for a hit and then if it hits make it true. No need to track how many times, just true or false
if re.search("Loss",contention.cntntn_clmant_txt,re.IGNORECASE):
counterAggregateContention.TXT_LOSS += 1
if re.search("Tinnitus",contention.cntntn_clmant_txt,re.IGNORECASE):
counterAggregateContention.TXT_TINITU += 1
#Simply test the codes and again true or false
if contention.cntntn_clsfcn_id == 2200:
counterAggregateContention.C2200 += 1
if contention.cntntn_clsfcn_id == 2210:
counterAggregateContention.C2210 += 1
if contention.cntntn_clsfcn_id == 2220:
counterAggregateContention.C2220 += 1
if contention.cntntn_clsfcn_id == 3140:
counterAggregateContention.C3140 += 1
if contention.cntntn_clsfcn_id == 3150:
counterAggregateContention.C3150 += 1
if contention.cntntn_clsfcn_id == 4130:
counterAggregateContention.C4130 += 1
if contention.cntntn_clsfcn_id == 4210:
counterAggregateContention.C4210 += 1
if contention.cntntn_clsfcn_id == 4700:
counterAggregateContention.C4700 += 1
if contention.cntntn_clsfcn_id == 4920:
counterAggregateContention.C4920 += 1
if contention.cntntn_clsfcn_id == 5000:
counterAggregateContention.C5000 += 1
if contention.cntntn_clsfcn_id == 5010:
counterAggregateContention.C5010 += 1
if contention.cntntn_clsfcn_id == 5710:
counterAggregateContention.C5710 += 1
if contention.cntntn_clsfcn_id == 6850:
counterAggregateContention.C6850 += 1
#A bit strange looking but due to Python's identation approach this occurs after the for loop in order to capture the last claim.
aggregateContention.CONTENTION_COUNT = sum(totalContentions.values())
aggregateContention.EAR_CONTENTION_COUNT = sum(totalEarContentions.values())
aggregateContention.MAX_PROFILE_DATE = maxProfileDat
|
slarosa/QGIS
|
python/plugins/sextante/algs/Explode.py
|
Python
|
gpl-2.0
| 3,641 | 0.006042 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
Explode.py
---------------------
Date : August 2012
Copy
|
right : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
**********
|
*****************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from sextante.core.GeoAlgorithm import GeoAlgorithm
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from sextante.parameters.ParameterVector import ParameterVector
from sextante.core.QGisLayers import QGisLayers
from sextante.outputs.OutputVector import OutputVector
class Explode(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/../images/toolbox.png")
#===========================================================================
def processAlgorithm(self, progress):
vlayer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT))
output = self.getOutputFromName(self.OUTPUT)
vprovider = vlayer.dataProvider()
fields = vprovider.fields()
writer = output.getVectorWriter(fields, QGis.WKBLineString, vlayer.crs() )
outFeat = QgsFeature()
inGeom = QgsGeometry()
nElement = 0
features = QGisLayers.features(vlayer)
nFeat = len(features)
for feature in features:
nElement += 1
progress.setPercentage((nElement*100)/nFeat)
inGeom = feature.geometry()
atMap = feature.attributes()
segments = self.extractAsSingleSegments( inGeom )
outFeat.setAttributes( atMap )
for segment in segments:
outFeat.setGeometry(segment)
writer.addFeature(outFeat)
del writer
def extractAsSingleSegments( self, geom ):
segments = []
if geom.isMultipart():
multi = geom.asMultiPolyline()
for polyline in multi:
segments.extend( self.getPolylineAsSingleSegments(polyline))
else:
segments.extend( self.getPolylineAsSingleSegments(geom.asPolyline()))
return segments
def getPolylineAsSingleSegments(self, polyline):
segments = []
for i in range(len(polyline)-1):
ptA = polyline[i]
ptB = polyline[i+1]
segment = QgsGeometry.fromPolyline([ptA, ptB])
segments.append(segment)
return segments
def defineCharacteristics(self):
self.name = "Explode lines"
self.group = "Vector geometry tools"
self.addParameter(ParameterVector(self.INPUT, "Input layer",ParameterVector.VECTOR_TYPE_LINE))
self.addOutput(OutputVector(self.OUTPUT, "Output layer"))
|
appsembler/edx-platform
|
openedx/core/djangoapps/appsembler/settings/settings/production_cms.py
|
Python
|
agpl-3.0
| 1,171 | 0.003416 |
"""
Settings for Appsembler on CMS in Production.
"""
import sentry_sdk
from openedx.core.djangoapps.appsembler.settings.settings import production_common
def plugin_settings(settings):
"""
Appsembler CMS overrides for both production AND devstac
|
k.
Make sure those are compatible for devstack via defensive coding.
This file, however, won't run in test environments.
"""
production_common.plugin_settings(settings)
settings.APPSEMBLER_SECRET_KEY = settings.AUTH_TOKENS.get("APPSEMBLER_SECRET_KEY")
settings.INTERCOM_APP_ID = settings.AUTH_TOKENS.get("INTERC
|
OM_APP_ID")
settings.INTERCOM_APP_SECRET = settings.AUTH_TOKENS.get("INTERCOM_APP_SECRET")
settings.FEATURES['ENABLE_COURSEWARE_INDEX'] = True
settings.FEATURES['ENABLE_LIBRARY_INDEX'] = True
settings.ELASTIC_FIELD_MAPPINGS = {
"start_date": {
"type": "date"
}
}
if settings.SENTRY_DSN:
sentry_sdk.set_tag('app', 'cms')
settings.SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
settings.XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5
settings.HIJACK_LOGIN_REDIRECT_URL = '/home'
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.1/Lib/test/test_bigaddrspace.py
|
Python
|
mit
| 1,284 | 0.003894 |
from test import support
from test.support import bigaddrspacetest, MAX_Py_ssize_t
import unittest
import operator
import sys
class StrTest(unittest.TestCase):
@bigaddrspacetest
def test_concat(self):
s1 = 'x' * MAX_Py_ssize_t
self.assertRaises(OverflowError, operator.add, s1, '?')
@bigaddrspacetest
def test_optimized_concat(self):
x = 'x' * MAX_Py_ssize_t
try:
x = x + '?' # this statement uses a fast path in ceval.c
except OverflowError:
pass
else:
self.fail("should have raised OverflowError")
try:
x += '?' # this statement uses a fast path in ceval.c
except OverflowError:
pass
else:
self.fail("should have raised OverflowError")
self.assertEquals(len(x), MAX_Py_ssize_t)
### the following test is pending a patch
# (http://mail.python.org/pi
|
permail/python-dev/2006-July/067774.html)
#@bigaddrspacetest
#def test_repeat(self):
# self.assertRaises(OverflowError, operator.mul, 'x', MAX_Py_ssize_t + 1)
def test_main():
support.run_unittest(StrTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
support.set_memlimit(
|
sys.argv[1])
test_main()
|
Rediker-Software/litle-sdk-for-python
|
litleSdkPythonTest/functional/TestAuth.py
|
Python
|
mit
| 7,012 | 0.011266 |
#Copyright (c) 2011-2012 Litle & Co.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
class TestAuth(unittest.TestCase):
def testSimpleAuthWithCard(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("000",response.response)
def testSimpleAuthWithPaypal(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '12344'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
paypal = litleXmlFields.payPal()
paypal.payerId = "1234"
paypal.token = "1234"
paypal.transactionId = '123456'
authorization.paypal = paypal
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("Approved",response.message)
def testSimpleAuthWithSecondaryAmountAndApplepay(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 110
authorization.orderSource = 'ecommerce'
authorization.secondaryAmount = '10'
applepay = litleXmlFields.applepayType()
applepay.data = "4100000000000000"
applepay.signature = "sign"
applepay.version = '1'
header=litleXmlFields.applepayHeaderType()
header.applicationData='e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
header.ephemeralPublicKey ='e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
header.publicKeyHash='e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
header.transactionId='1024'
applepay.header=header
authorization.applepay = applepay
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("Insufficient Funds",response.message)
self.assertEquals(110,response.applepayResponse.transactionAmount)
def testPosWithoutCapabilityAndEntryMode(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '123456'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
pos = litleXmlFields.pos()
pos.cardholderId = "pin"
authorization.pos = pos
card = litleXmlFields.cardType()
card.number = "4100000000000002"
card.expDate = "1210"
card.type = 'VI'
card.cardValidationNum = '1213'
authorization.card = card
litle = litle
|
OnlineRequest(config)
with self.assertRaises(Exception):
litle.sendRequest(authorization)
def testAccountUpdate(self):
authorization = litleXmlFields.authorization()
|
authorization.orderId = '12344'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100100000000000"
card.expDate = "1210"
card.type = 'VI'
card.cardValidationNum = '1213'
authorization.card = card
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("4100100000000000",response.accountUpdater.originalCardInfo.number)
def testTrackData(self):
authorization = litleXmlFields.authorization()
authorization.id = 'AX54321678'
authorization.reportGroup = 'RG27'
authorization.orderId = '12z58743y1'
authorization.amount = 12522
authorization.orderSource = 'retail'
billToAddress = litleXmlFields.contact()
billToAddress.zip = '95032'
authorization.billToAddress = billToAddress
card = litleXmlFields.cardType()
card.track = "%B40000001^Doe/JohnP^06041...?;40001=0604101064200?"
authorization.card = card
pos = litleXmlFields.pos()
pos.capability = 'magstripe'
pos.entryMode = 'completeread'
pos.cardholderId = 'signature'
authorization.pos = pos
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals('Approved', response.message)
def testListOfTaxAmounts(self):
authorization = litleXmlFields.authorization()
authorization.id = '12345'
authorization.reportGroup = 'Default'
authorization.orderId = '67890'
authorization.amount = 10000
authorization.orderSource = 'ecommerce'
enhanced = litleXmlFields.enhancedData()
dt1 = litleXmlFields.detailTax()
dt1.taxAmount = 100
enhanced.detailTax.append(dt1)
dt2 = litleXmlFields.detailTax()
dt2.taxAmount = 200
enhanced.detailTax.append(dt2)
authorization.enhancedData = enhanced
card = litleXmlFields.cardType()
card.number = '4100000000000001'
card.expDate = '1215'
card.type = 'VI'
authorization.card = card
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals('Approved', response.message)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestAuth)
return suite
if __name__ =='__main__':
unittest.main()
|
loanzen/probe-py
|
probe/tests.py
|
Python
|
mit
| 1,727 | 0.006369 |
import unittest
from probe import SearchApi, CompanyApi
class TestProbeAPI(unittest.TestCase):
def test_search_company_loanzen(self):
api = SearchApi()
companies = api.companies_get('1.1', filters='{"nameStartsWith": "loanzen"}')
#print type(companies.data), companies.data.companies
self.assertFalse(len(companies.data.companies) == 0)
def test_search_authorized_signatory(self):
api = SearchApi()
directors = api.authorized_signatories_get('1.1', filters='{"pan": "ANQPK6045G"}')
#print directors.data.authorized_signatories
self.assertFalse(len(directors.data.authorized_signatories) == 0)
def test_get_company_details_empty(self):
api = CompanyApi()
compa
|
ny = api.companies_cin_get('1.1', 'U24239DL2002PTC114413')
#print company.data.company
self.assertEquals(company.data.company.cin, 'U24239DL2002PTC114413')
def test_get_company_authorized_signatories(self):
api = CompanyApi()
signatories = api.companies_cin_authorized_signatories_get('1.1', 'U24239DL2002PTC114413')
#print signatories.data.autho
|
rized_signatories
self.assertFalse(len(signatories.data.authorized_signatories) == 0)
def test_get_company_charges(self):
api = CompanyApi()
charges = api.companies_cin_charges_get('1.1', 'U24239DL2002PTC114413')
#print charges.data.charges
self.assertFalse(len(charges.data.charges) == 0)
def test_get_company_financials(self):
api = CompanyApi()
financials = api.companies_cin_financials_get('1.1', 'U24239DL2002PTC114413')
print financials.data.financials
if __name__ == '__main__':
unittest.main()
|
ingenieroariel/inasafe
|
safe_qgis/keywords_dialog.py
|
Python
|
gpl-3.0
| 24,200 | 0.000579 |
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**GUI Keywords Dialog.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.. todo:: Check raster is single band
"""
__author__ = 'tim@linfiniti.com'
__version__ = '0.5.0'
__revision__ = '$Format:%H$'
__date__ = '21/02/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSignature
from odict import OrderedDict
from safe_qgis.keywords_dialog_base import Ui_KeywordsDialogBase
from safe_qgis.keyword_io import KeywordIO
from safe_qgis.help import Help
from safe_qgis.utilities import getExceptionWithStacktrace
from safe_qgis.exceptions import InvalidParameterException
from safe.common.exceptions import InaSAFEError
# Don't remove this even if it is flagged as unused by your ide
# it is needed for qrc:/ url resolution. See Qt Resources docs.
import safe_qgis.resources # pylint: disable=W0611
#see if we can import pydev - see development docs for details
try:
from pydevd import * # pylint: disable=F0401
print 'Remote debugging is enabled.'
DEBUG = True
except ImportError:
print 'Debugging was disabled'
class KeywordsDialog(QtGui.QDialog, Ui_KeywordsDialogBase):
"""Dialog implementation class for the Risk In A Box keywords editor."""
def __init__(self, parent, iface, theDock=None):
"""Constructor for the dialog.
.. note:: In QtDesigner the advanced editor's predefined keywords
list should be shown in english always, so when adding entries to
cboKeyword, be sure to choose :safe_qgis:`Properties<<` and untick
the :safe_qgis:`translatable` property.
Args:
* parent - parent widget of this dialog
* iface - a Quantum GIS QGisAppInterface instance.
* theDock - Optional dock widget instance that we can notify of
changes to the keywords.
Returns:
not applicable
Raises:
no exceptions explicitly raised
"""
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle(self.tr(
'InaSAFE %s Keywords Editor' % __version__))
self.keywordIO = KeywordIO()
# note the keys should remain untranslated as we need to write
# english to the keywords file. The keys will be written as user data
# in the combo entries.
# .. seealso:: http://www.voidspace.org.uk/python/odict.html
self.standardExposureList = OrderedDict([('population [density]',
self.tr('population [density]')),
('population [count]',
self.tr('population [count]')),
('building',
self.tr('building')),
('building [osm]',
self.tr('building [osm]')),
('building [sigab]',
self.tr('building [sigab]')),
('roads',
self.tr('roads'))])
self.standardHazardList = OrderedDict([('earthquake [MMI]',
self.tr('earthquake [MMI]')),
('tsunami [m]',
self.tr('tsunami [m]')),
('tsunami [wet/dry]',
self.tr('tsunami [wet/dry]')),
('tsunami [feet]',
self.tr('tsunami [feet]')),
('flood [m]',
self.tr('flood [m]')),
('flood [wet/dry]',
self.tr('flood [wet/dry]')),
('flood [feet]', self.tr('flood [feet]')),
('tephra [kg2/m2',
self.tr('tephra [kg2/m2]'))])
# Save reference to the QGIS interface and parent
self.iface = iface
self.parent = parent
self.dock = theDock
# Set up things for context help
myButton = self.buttonBox.button(QtGui.QDialogButtonBox.Help)
QtCore.QObject.connect(myButton, QtCore.SIGNAL('clicked()'),
self.showHelp)
self.helpDialog = None
# set some inital ui state:
self.pbnAdvanced.setChecked(True)
self.pbnAdvanced.toggle()
self.radPredefined.setChecked(True)
self.adjustSize()
#myButton = self.buttonBox.button(QtGui.QDia
|
logButtonBox.Ok)
#myButton.setEnabled(False)
self.layer = self.iface.activeLayer()
if self.layer:
self.loadStateFromKeywords()
def showHelp(self):
"""Load the help text for the keywords safe_qgis"""
if not self.helpDialog:
self.helpDialog = Help(self.iface.mainWindow(), 'keywords')
self.helpDialog.show()
# prevents actions being handled twice
@pyqtS
|
ignature('bool')
def on_pbnAdvanced_toggled(self, theFlag):
"""Automatic slot executed when the advanced button is toggled.
.. note:: some of the behaviour for hiding widgets is done using
the signal/slot editor in designer, so if you are trying to figure
out how the interactions work, look there too!
Args:
theFlag - boolean indicating the new checked state of the button
Returns:
None.
Raises:
no exceptions explicitly raised."""
if theFlag:
self.pbnAdvanced.setText(self.tr('Hide advanced editor'))
else:
self.pbnAdvanced.setText(self.tr('Show advanced editor'))
self.adjustSize()
# prevents actions being handled twice
@pyqtSignature('bool')
def on_radHazard_toggled(self, theFlag):
"""Automatic slot executed when the hazard radio is toggled.
Args:
theFlag - boolean indicating the new checked state of the button
Returns:
None.
Raises:
no exceptions explicitly raised."""
if not theFlag:
return
self.setCategory('hazard')
self.updateControlsFromList()
# prevents actions being handled twice
@pyqtSignature('bool')
def on_radExposure_toggled(self, theFlag):
"""Automatic slot executed when the hazard radio is toggled on.
Args:
theFlag - boolean indicating the new checked state of the button
Returns:
None.
Raises:
no exceptions explicitly raised."""
if not theFlag:
return
self.setCategory('exposure')
self.updateControlsFromList()
# prevents actions being handled twice
@pyqtSignature('int')
def on_cboSubcategory_currentIndexChanged(self, theIndex=None):
"""Automatic slot executed when the subcategory is changed.
When the user changes the subcategory, we will extract the
subcategory and dataype or unit (depending on if it is a hazard
or exposure subcategory) from the [] after the name.
Args:
None
Returns:
None.
Raises:
no exceptions explicitly raised."""
del theIndex
myItem = self.cboSubcategory.itemData(
self.cboSubcategory.currentIndex()).toString()
myText = str(myItem)
if myText == self.tr('Not Set'):
self.removeItemByKey('subcategory')
return
myTokens = myText.split(' ')
if len(myTokens) < 1:
|
geggo/pyface
|
pyface/ui/qt4/python_editor.py
|
Python
|
bsd-3-clause
| 6,248 | 0.004001 |
#------------------------------------------------------------------------------
# Copyright (c) 2007, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
# However, when used with the GPL version of PyQt the additional terms described in the PyQt GPL exception also apply
#
# Author: Riverbank Computing Limited
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
# Standard library imports.
import sys
# Major package imports.
from pyface.qt import QtCore, QtGui
# Enthought library imports.
from traits.api import Bool, Event, provides, Unicode
# Local imports.
from pyface.i_python_editor import IPythonEditor, MPythonEditor
from pyface.key_pressed_event import KeyPressedEvent
from pyface.widget import Widget
from pyface.ui.qt4.code_editor.code_widget import AdvancedCodeWidget
@provides(IPythonEditor)
class PythonEditor(MPythonEditor, Widget):
""" The toolkit specific implementation of a PythonEditor. See the
IPythonEditor interface for the API documentation.
"""
#### 'IPythonEditor' interface ############################################
dirty = Bool(False)
path = Unicode
show_line_numbers = Bool(True)
#### Events ####
changed = Event
key_pressed = Event(KeyPressedEvent)
###########################################################################
# 'object' interface.
###########################################################################
def __init__(self, parent, **traits):
super(PythonEditor, self).__init__(**traits)
self.control = self._create_control(parent)
###########################################################################
# 'PythonEditor' interface.
###########################################################################
def load(self, path=None):
""" Loads the contents of the editor.
"""
if path is None:
path = self.path
# We will have no path for a new script.
if len(path) > 0:
f = open(self.path, 'r')
text = f.read()
f.close()
else:
text = ''
self.control.code.setPlainText(text)
self.dirty = False
def save(self, path=None):
""" Saves the contents of the editor.
"""
if path is None:
path = self.path
f = open(path, 'w')
f.write(self.control.code.toPlainText())
f.close()
self.dirty = False
def select_line(self, lineno):
""" Selects the specified line.
"""
self.control.code.set_line_column(lineno, 0)
self.control.code.moveCursor(QtGui.QTextCursor.EndOfLine,
|
QtGui.QTextCursor.KeepAnchor)
###########################################################################
# Trait handlers.
###########################################################################
def _pa
|
th_changed(self):
self._changed_path()
def _show_line_numbers_changed(self):
if self.control is not None:
self.control.code.line_number_widget.setVisible(
self.show_line_numbers)
self.control.code.update_line_number_width()
###########################################################################
# Private interface.
###########################################################################
def _create_control(self, parent):
""" Creates the toolkit-specific control for the widget.
"""
self.control = control = AdvancedCodeWidget(parent)
self._show_line_numbers_changed()
# Install event filter to trap key presses.
event_filter = PythonEditorEventFilter(self, self.control)
self.control.installEventFilter(event_filter)
self.control.code.installEventFilter(event_filter)
# Connect signals for text changes.
control.code.modificationChanged.connect(self._on_dirty_changed)
control.code.textChanged.connect(self._on_text_changed)
# Load the editor's contents.
self.load()
return control
def _on_dirty_changed(self, dirty):
""" Called whenever a change is made to the dirty state of the
document.
"""
self.dirty = dirty
def _on_text_changed(self):
""" Called whenever a change is made to the text of the document.
"""
self.changed = True
class PythonEditorEventFilter(QtCore.QObject):
""" A thin wrapper around the advanced code widget to handle the key_pressed
Event.
"""
def __init__(self, editor, parent):
super(PythonEditorEventFilter, self).__init__(parent)
self.__editor = editor
def eventFilter(self, obj, event):
""" Reimplemented to trap key presses.
"""
if self.__editor.control and obj == self.__editor.control and \
event.type() == QtCore.QEvent.FocusOut:
# Hack for Traits UI compatibility.
self.__editor.control.emit(QtCore.SIGNAL('lostFocus'))
elif self.__editor.control and obj == self.__editor.control.code and \
event.type() == QtCore.QEvent.KeyPress:
# Pyface doesn't seem to be Unicode aware. Only keep the key code
# if it corresponds to a single Latin1 character.
kstr = event.text()
try:
kcode = ord(str(kstr))
except:
kcode = 0
mods = event.modifiers()
self.key_pressed = KeyPressedEvent(
alt_down = ((mods & QtCore.Qt.AltModifier) ==
QtCore.Qt.AltModifier),
control_down = ((mods & QtCore.Qt.ControlModifier) ==
QtCore.Qt.ControlModifier),
shift_down = ((mods & QtCore.Qt.ShiftModifier) ==
QtCore.Qt.ShiftModifier),
key_code = kcode,
event = event)
return super(PythonEditorEventFilter, self).eventFilter(obj, event)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Facebook/Actions/Fitness/Walks/UpdateWalk.py
|
Python
|
apache-2.0
| 4,891 | 0.005316 |
# -*- coding: utf-8 -*-
###############################################################################
#
# UpdateWalk
# Updates an existing walk action.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under th
|
e Apache License, Version 2
|
.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateWalk(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateWalk Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateWalk, self).__init__(temboo_session, '/Library/Facebook/Actions/Fitness/Walks/UpdateWalk')
def new_input_set(self):
return UpdateWalkInputSet()
def _make_result_set(self, result, path):
return UpdateWalkResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateWalkChoreographyExecution(session, exec_id, path)
class UpdateWalkInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateWalk
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(UpdateWalkInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((required, string) The id of the action to update.)
"""
super(UpdateWalkInputSet, self)._set_input('ActionID', value)
def set_Course(self, value):
"""
Set the value of the Course input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the course.)
"""
super(UpdateWalkInputSet, self)._set_input('Course', value)
def set_EndTime(self, value):
"""
Set the value of the EndTime input for this Choreo. ((optional, date) The time that the user ended the action (e.g. 2013-06-24T18:53:35+0000).)
"""
super(UpdateWalkInputSet, self)._set_input('EndTime', value)
def set_ExpiresIn(self, value):
"""
Set the value of the ExpiresIn input for this Choreo. ((optional, integer) The amount of time (in milliseconds) from the publish_time that the action will expire.)
"""
super(UpdateWalkInputSet, self)._set_input('ExpiresIn', value)
def set_Message(self, value):
"""
Set the value of the Message input for this Choreo. ((optional, string) A message attached to this action. Setting this parameter requires enabling of message capabilities.)
"""
super(UpdateWalkInputSet, self)._set_input('Message', value)
def set_Place(self, value):
"""
Set the value of the Place input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the location associated with this action.)
"""
super(UpdateWalkInputSet, self)._set_input('Place', value)
def set_Tags(self, value):
"""
Set the value of the Tags input for this Choreo. ((optional, string) A comma separated list of other profile IDs that also performed this action.)
"""
super(UpdateWalkInputSet, self)._set_input('Tags', value)
class UpdateWalkResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateWalk Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((boolean) The response from Facebook.)
"""
return self._output.get('Response', None)
class UpdateWalkChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateWalkResultSet(response, path)
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_12/ar_12/test_artificial_32_Quantization_PolyTrend_12_12_0.py
|
Python
|
bsd-3-clause
| 267 | 0.086142 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_
|
artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order =
|
12);
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/traitlets/tests/test_traitlets.py
|
Python
|
bsd-2-clause
| 66,467 | 0.005793 |
# encoding: utf-8
"""Tests for traitlets.traitlets."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#
# Adapted from enthought.traits, Copyright (c) Enthought, Inc.,
# also under the terms of the Modified BSD License.
import pickle
import re
import sys
from ._warnings import expected_warnings
from unittest import TestCase
import pytest
from pytest import mark
from traitlets import (
HasTraits, MetaHasTraits, TraitType, Any, Bool, CBytes, Dict, Enum,
Int, CInt, Long, CLong, Integer, Float, CFloat, Complex, Bytes, Unicode,
TraitError, Union, All, Undefined, Type, This, Instance, TCPAddress,
List, Tuple, ObjectName, DottedObjectName, CRegExp, link, directional_link,
ForwardDeclaredType, ForwardDeclaredInstance, validate, observe, default,
observe_compat, BaseDescriptor, HasDescriptors,
)
import six
def change_dict(*ordered_values):
change_names = ('name', 'old', 'new', 'owner', 'type')
return dict(zip(change_names, ordered_values))
#-----------------------------------------------------------------------------
# Helper classes for testing
#-----------------------------------------------------------------------------
class HasTraitsStub(HasTraits):
def notify_change(self, change):
self._notify_name = change['name']
self._notify_old = change['old']
self._notify_new = change['new']
self._notify_type = change['type']
#-----------------------------------------------------------------------------
# Test classes
#-----------------------------------------------------------------------------
class TestTraitType(TestCase):
def test_get_undefined(self):
class A(HasTraits):
a = TraitType
a = A()
with self.assertRaises(TraitError):
a.a
def test_set(self):
class A(HasTraitsStub):
a = TraitType
a = A()
a.a = 10
self.assertEqual(a.a, 10)
self.assertEqual(a._notify_name, 'a')
self.assertEqual(a._notify_old, Undefined)
self.assertEqual(a._notify_new, 10)
def test_validate(self):
class MyTT(TraitType):
def validate(self, inst, value):
return -1
class A(HasTraitsStub):
tt = MyTT
a = A()
a.tt = 10
self.assertEqual(a.tt, -1)
def test_default_validate(self):
class MyIntTT(TraitType):
def validate(self, obj, value):
if isinstance(value, int):
return value
self.error(obj, value)
class A(HasTraits):
tt = MyIntTT(10)
a = A()
self.assertEqual(a.tt, 10)
# Defaults are validated when the HasTraits is instantiated
class B(HasTraits):
tt = MyIntTT('bad default')
self.assertRaises(TraitError, B)
def test_info(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertEqual(A.tt.info(), 'any value')
def test_error(self):
class A(HasTraits):
tt = TraitType
a = A()
self.assertRaises(TraitError, A.tt.error, a, 10)
def test_deprecated_dynamic_initializer(self):
class A(HasTraits):
x = Int(10)
def _x_default(self):
return 11
class B(A):
x = Int(20)
class C(A):
def _x_default(self):
return 21
a = A()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
b = B()
self.assertEqual(b.x, 20)
self.assertEqual(b._trait_values, {'x': 20})
c = C()
self.assertEqual(c._trait_values, {})
self.assertEqual(c.x, 21)
self.assertEqual(c._trait_values, {'x': 21})
# Ensure that the base class remains unmolested when the _default
# initializer gets overridden in a subclass.
a = A()
c = C()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
def test_dynamic_initializer(self):
class A(HasTraits):
|
x = Int(10)
@default('x')
def _default_x(self):
return
|
11
class B(A):
x = Int(20)
class C(A):
@default('x')
def _default_x(self):
return 21
a = A()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
b = B()
self.assertEqual(b.x, 20)
self.assertEqual(b._trait_values, {'x': 20})
c = C()
self.assertEqual(c._trait_values, {})
self.assertEqual(c.x, 21)
self.assertEqual(c._trait_values, {'x': 21})
# Ensure that the base class remains unmolested when the _default
# initializer gets overridden in a subclass.
a = A()
c = C()
self.assertEqual(a._trait_values, {})
self.assertEqual(a.x, 11)
self.assertEqual(a._trait_values, {'x': 11})
def test_tag_metadata(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10).tag(b=3, c=4)
self.assertEqual(a.metadata, {'a': 1, 'b': 3, 'c': 4})
def test_metadata_localized_instance(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10)
b = MyIntTT(10)
a.metadata['c'] = 3
# make sure that changing a's metadata didn't change b's metadata
self.assertNotIn('c', b.metadata)
def test_union_metadata(self):
class Foo(HasTraits):
bar = (Int().tag(ta=1) | Dict().tag(ta=2, ti='b')).tag(ti='a')
foo = Foo()
# At this point, no value has been set for bar, so value-specific
# is not set.
self.assertEqual(foo.trait_metadata('bar', 'ta'), None)
self.assertEqual(foo.trait_metadata('bar', 'ti'), 'a')
foo.bar = {}
self.assertEqual(foo.trait_metadata('bar', 'ta'), 2)
self.assertEqual(foo.trait_metadata('bar', 'ti'), 'b')
foo.bar = 1
self.assertEqual(foo.trait_metadata('bar', 'ta'), 1)
self.assertEqual(foo.trait_metadata('bar', 'ti'), 'a')
def test_union_default_value(self):
class Foo(HasTraits):
bar = Union([Dict(), Int()], default_value=1)
foo = Foo()
self.assertEqual(foo.bar, 1)
def test_deprecated_metadata_access(self):
class MyIntTT(TraitType):
metadata = {'a': 1, 'b': 2}
a = MyIntTT(10)
with expected_warnings(["use the instance .metadata dictionary directly"]*2):
a.set_metadata('key', 'value')
v = a.get_metadata('key')
self.assertEqual(v, 'value')
with expected_warnings(["use the instance .help string directly"]*2):
a.set_metadata('help', 'some help')
v = a.get_metadata('help')
self.assertEqual(v, 'some help')
def test_trait_types_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Int
def test_trait_types_list_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = List(Int)
def test_trait_types_tuple_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Tuple(Int)
def test_trait_types_dict_deprecated(self):
with expected_warnings(["Traits should be given as instances"]):
class C(HasTraits):
t = Dict(Int)
class TestHasDescriptorsMeta(TestCase):
def test_metaclass(self):
self.assertEqual(type(HasTraits), MetaHasTraits)
class A(HasTraits):
a = Int()
a = A()
self.assertEqual(type(a.__class__), MetaHasTraits)
self.assertEqual(a.a,0)
a
|
sotetsuk/memozo
|
memozo/memozo.py
|
Python
|
mit
| 5,664 | 0.002119 |
import os
impo
|
rt functools
import codecs
import pickle
from . import utils
class Memozo(object):
def __init__(self, path='./'):
self.base_path = path
memozo_file
|
= os.path.join(self.base_path, utils.MEMOZO_FILE_NAME)
if not os.path.exists(memozo_file):
with codecs.open(memozo_file, 'w', encoding=utils.ENCODING) as f:
f.write('datetime\thash\tfile name\tfunction name\tparameters\n')
f.write('--------\t----\t---------\t-------------\t----------\n')
def __call__(self, name=None, ext='file'):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
with open(file_path, 'r') as f:
obj = f.readlines()
return obj
obj = func(*args, **kwargs)
with open(file_path, 'w') as f:
f.writelines(obj)
utils.write(self.base_path, _name, func.__name__, args_str)
return obj
return _wrapper
return wrapper
def codecs(self, name=None, ext='file', encoding=None):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
with codecs.open(file_path, 'r', encoding) as f:
obj = f.readlines()
return obj
obj = func(*args, **kwargs)
with codecs.open(file_path, 'w', encoding) as f:
f.writelines(obj)
utils.write(self.base_path, _name, func.__name__, args_str)
return obj
return _wrapper
return wrapper
def generator(self, name=None, ext='file', line_type='str', delimiter='\t'):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
# get cached data path
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
# if cached data exists, return generator using cached data
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
def gen_cached_data():
with codecs.open(file_path, 'r', utils.ENCODING) as f:
for line in f:
if line_type == 'tuple':
line = line.split(delimiter)
yield line
return gen_cached_data()
gen = func(*args, **kwargs)
# if no cached data exists, generator not only yield value but save value at each iteration
def generator_with_cache(gen, file_path):
with codecs.open(file_path, 'w', utils.ENCODING) as f:
for e in gen:
if line_type == 'str':
f.write(e)
elif line_type == 'tuple':
f.write(delimiter.join(e) + '\n')
yield e
utils.write(self.base_path, _name, func.__name__, args_str)
return generator_with_cache(gen, file_path)
return _wrapper
return wrapper
def pickle(self, name=None, ext='pickle', protocol=None):
def wrapper(func):
_name = func.__name__ if name is None else name
@functools.wraps(func)
def _wrapper(*args, **kwargs):
bound_args = utils.get_bound_args(func, *args, **kwargs)
args_str = utils.get_args_str(bound_args)
sha1 = utils.get_hash(_name, func.__name__, args_str)
file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext))
if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path):
with open(file_path, 'rb') as f:
obj = pickle.load(f)
return obj
obj = func(*args, **kwargs)
with open(file_path, 'wb') as f:
pickle.dump(obj, f, protocol=protocol)
utils.write(self.base_path, _name, func.__name__, args_str)
return obj
return _wrapper
return wrapper
|
snap-stanford/ogb
|
ogb/graphproppred/__init__.py
|
Python
|
mit
| 302 | 0 |
from .evaluate import Evaluator
from .dat
|
aset import GraphPropPredDataset
try:
from .dataset_pyg import PygGraphPr
|
opPredDataset
except ImportError:
pass
try:
from .dataset_dgl import DglGraphPropPredDataset
from .dataset_dgl import collate_dgl
except (ImportError, OSError):
pass
|
WarmongeR1/feedly-filter
|
apps/filters/management/commands/load_data.py
|
Python
|
mit
| 1,534 | 0.001304 |
# -*- encoding: utf-8 -*-
import csv
from allauth.socialaccount.models import SocialToken
from django.core.management.base import BaseCommand
from apps.filters.filter import get_api
import os
from django.conf import settings
from yaml import load
from apps.filters.models import Collection, Entry, Category
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
folder = settings.APPS_DIR.path('filters', 'data').root
config_path = os.path.join(folder, 'collections.yaml')
assert os.path.exists(config_path)
with open(config_path, 'r') as fio:
config = load(fio.read())
for item in config:
collection, _ = Collection.objects.get_or_create(
tit
|
le=item['name'],
)
if not collection.description:
collection.description = item['description']
collection.save()
with open(os.path.join(folde
|
r, item['file']), 'r') as fio:
reader = csv.DictReader(fio)
for i, row in enumerate(reader):
categories = []
for category in row['category'].split(','):
categories.append(Category.objects.get_or_create(title=category.strip())[0])
entry, _ = Entry.objects.get_or_create(value=row['value'], type=row['type'])
entry.category.add(*categories)
collection.entries.add(entry)
|
CtheSky/pycparser
|
examples/cdecl.py
|
Python
|
bsd-3-clause
| 6,339 | 0.001893 |
#-----------------------------------------------------------------
# pycparser: cdecl.py
#
# Example of the CDECL tool using pycparser. CDECL "explains" C type
# declarations in plain English.
#
# The AST generated by pycparser from the given declaration is traversed
# recursively to build the explanation. Note that the declaration must be a
# valid external declaration in C. All the types used in it must be defined with
# typedef, or parsing will fail. The definition can be arbitrary - pycparser
# doesn't really care what the type is defined to be, only that it's a type.
#
# For example:
#
# c_decl = 'typedef int Node; const Node* (*ar)[10];'
#
# explain_c_declaration(c_decl)
# => ar is a pointer to array[10] of pointer to const Node
#
# struct and typedef are expanded when according arguments are set:
#
# explain_c_declaration(c_decl, expand_typedef=True)
# => ar is a pointer to array[10] of pointer to const int
#
# c_decl = 'struct P {int x; int y;} p;'
#
# explain_c_declaration(c_decl)
# => p is a struct P
#
# explain_c_declaration(c_decl, expand_struct=True)
# => p is a struct P containing {x is a int, y is a int}
#
# Eli Bendersky [http://eli.thegreenplace.net]
# License: BSD
#-----------------------------------------------------------------
import copy
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import c_parser, c_ast
def explain_c_declaration(c_decl, expand_struct=False, expand_typedef=False):
""" Parses the declaration in c_decl and returns a text
explanation as a string.
The last external node of the string is used, to allow
earlier typedefs for used types.
"""
parser = c_parser.CParser()
try:
node = parser.parse(c_decl, filename='<stdin>')
except c_parser.ParseError:
e = sys.exc_info()[1]
return "Parse error:" + str(e)
if (not isinstance(node, c_ast.FileAST) or
not isinstance(node.ext[-1], c_ast.Decl)
):
return "Not a valid declaration"
try:
expanded = expand_struct_typedef(node.ext[-1], node,
expand_struct=expand_struct,
expand_typedef=expand_typedef)
except Exception as e:
return "Not a valid declaration: " + str(e)
return _explain_decl_node(expanded)
def _explain_decl_node(decl_node):
""" Receives a c_ast.Decl note and returns its explanation in
English.
"""
storage = ' '.join(decl_node.storage) + ' ' if decl_node.storage else ''
return (decl_node.name +
" is a " +
storage +
_explain_type(decl_node.type))
def _explain_type(decl):
""" Recursively explains a type decl node
"""
typ = type(decl)
if typ == c_ast.TypeDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + _explain_type(decl.type)
elif typ == c_ast.Typename or typ == c_ast.Decl:
return _explain_type(decl.type)
elif typ == c_ast.IdentifierType:
return ' '.join(decl.names)
elif typ == c_ast.PtrDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + 'pointer to ' + _explain_type(decl.type)
elif typ == c_ast.ArrayDecl:
arr = 'array'
if decl.dim: arr += '[%s]' % decl.dim.value
return arr + " of " + _explain_type(decl.type)
elif typ == c_ast.FuncDecl:
if decl.args:
params = [_explain_type(param) for param in decl.args.params]
args = ', '.join(params)
else:
args = ''
return ('function(%s) returning ' % (args) +
_explain_type(decl.type))
elif typ == c_ast.Struct:
decls = [_explain_decl_node(mem_decl) for mem_decl in decl.decls]
members = ', '.join(decls)
return ('struct%s ' % (' ' + decl.name if decl.name else '') +
('containing {%s}' % members if members else ''))
def expand_struct_typedef(cdecl, file_ast, expand_struct=False, expand_typedef=False):
"""Expand struct & typedef in context of file_ast and return a new expanded node"""
decl_copy = copy.deepcopy(cdecl)
_expand_in_place(decl_copy, file_ast, expand_struct, expand_typedef)
return decl_copy
def _expand_in_place(decl, file_ast, expand_struct=False, expand_typedef=False):
"""Recursively expand struct & typedef in place, throw Exception if
undeclared struct or typedef are used
|
"""
typ = type(decl)
if typ in (c_ast.Decl, c_ast.TypeDecl, c_ast.PtrDecl, c_ast.ArrayDecl):
decl.type = _expand_in_place(decl.type, file_ast, expand_struct, expand_typedef)
elif typ == c_ast.Struct:
if not decl.decls:
struct = _find_struct(decl.name, file_ast)
if not struct:
|
raise Exception('using undeclared struct %s' % decl.name)
decl.decls = struct.decls
for i, mem_decl in enumerate(decl.decls):
decl.decls[i] = _expand_in_place(mem_decl, file_ast, expand_struct, expand_typedef)
if not expand_struct:
decl.decls = []
elif (typ == c_ast.IdentifierType and
decl.names[0] not in ('int', 'char')):
typedef = _find_typedef(decl.names[0], file_ast)
if not typedef:
raise Exception('using undeclared type %s' % decl.names[0])
if expand_typedef:
return typedef.type
return decl
def _find_struct(name, file_ast):
"""Receives a struct name and return declared struct object in file_ast
"""
for node in file_ast.ext:
if (type(node) == c_ast.Decl and
type(node.type) == c_ast.Struct and
node.type.name == name):
return node.type
def _find_typedef(name, file_ast):
"""Receives a type name and return typedef object in file_ast
"""
for node in file_ast.ext:
if type(node) == c_ast.Typedef and node.name == name:
return node
if __name__ == "__main__":
if len(sys.argv) > 1:
c_decl = sys.argv[1]
else:
c_decl = "char *(*(**foo[][8])())[];"
print("Explaining the declaration: " + c_decl + "\n")
print(explain_c_declaration(c_decl) + "\n")
|
amahabal/PySeqsee
|
farg/core/exceptions.py
|
Python
|
gpl-3.0
| 2,834 | 0.008116 |
# Copyright (C) 2011, 2012 Abhijit Mahabal
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <http://www.gnu.org/licenses/>
"""Exception classes in FARG core.
.. Note::
Exceptions specific to an individual application should live in that application's
directory.
"""
from itertools import takewhile
import traceback
class FargError(Exception):
"""Base class for untrappable errors (indicating bugs)."""
def __init__(self, msg=''):
Exception.__init__(self)
#: Message to be displayed.
self.msg = msg
self.stack_trace = list(takewhile((lambda x: x.find('FargError.__init__') == -1),
traceback.format_stack(limit=8)))
print('FargError: %s:%s' % (msg, self.stack_trace))
def __str__(self):
return 'FargError:' + self.msg + str(self.stack_trace)
class FargException(Exception):
"""Base class for FARG-specific exceptions."""
pass
class BatchModeStopException(Exception):
"""Base class of ways of stopping during batch mode.
Look at the subclasses (in this file) for details.
"""
def __init__(self, *, codelet_count):
Exception.__init__(self)
#: Number of codelets that had been run when the exception was raised.
self.codelet_count = codelet_count
class StoppingConditionMet(BatchModeStopException):
"""
|
When a stopping condition is specified, this indicates that it has been r
|
eached."""
def __str__(self):
return 'StoppingConditionMet after %d codelets' % self.codelet_count
class SuccessfulCompletion(BatchModeStopException):
"""Raised when the problem has been fully solved.
What fully solved means depends on the application, of course. For Seqsee, this means
currently means "Sequence has been extended to all known terms.".
"""
pass
class AnswerFoundException(BatchModeStopException):
"""Raised by a subspace when it believes that an answer has been found."""
def __init__(self, answer, *, codelet_count):
BatchModeStopException.__init__(self, codelet_count=codelet_count)
self.answer = answer
class NoAnswerException(BatchModeStopException):
"""Raised by a subspace when it is realized that no answer is forthcoming."""
def __init__(self, *, codelet_count):
BatchModeStopException.__init__(self, codelet_count=codelet_count)
|
openstack/smaug
|
karbor/services/operationengine/engine/triggers/timetrigger/__init__.py
|
Python
|
apache-2.0
| 1,625 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
time_trigger_opts = [
cfg.IntOpt('min_interval',
default=60 * 60,
help='The minimum interval of two adjacent time points. '
'min_interval >= (max_window_time * 2)'),
cfg.IntOpt('min_window_time',
default=900,
help='The minimum window time'),
cfg.IntOpt('max_window_time',
default=1800,
help='The maximum window time'),
cfg.StrOpt('time_format',
default='calendar',
choices=['crontab', 'calendar'],
help='The type of time format which is used to compute time'),
cfg.I
|
ntOpt('trigger_poll_interval',
default=15,
help='Interval, in seconds, in which Karbor will poll for '
|
'trigger events'),
cfg.StrOpt('scheduling_strategy',
default='multi_node',
help='Time trigger scheduling strategy '
)
]
CONF = cfg.CONF
CONF.register_opts(time_trigger_opts)
|
SunPower/pvfactors
|
pvfactors/geometry/pvrow.py
|
Python
|
bsd-3-clause
| 31,109 | 0 |
"""Module will classes related to PV row geometries"""
import numpy as np
from pvfactors.config import COLOR_DIC
from pvfactors.geometry.base import \
BaseSide, _coords_from_center_tilt_length, PVSegment
from shapely.geometry import GeometryCollection, LineString
from pvfactors.geometry.timeseries import \
TsShadeCollection, TsLineCoords, TsSurface
from pvlib.tools import cosd, sind
class TsPVRow(object):
"""Timeseries PV row class: this class is a vectorized version of the
PV row geometries. The coordinates and attributes (front and back sides)
are all vectorized."""
def __init__(self, ts_front_side, ts_back_side, xy_center, index=None,
full_pvrow_coords=None):
"""Initialize timeseries PV row with its front and back sides.
Parameters
----------
ts_front_side : :py:class:`~pvfactors.geometry.pvrow.TsSide`
Timeseries front side of the PV row
ts_back_side : :py:class:`~pvfactors.geometry.pvrow.TsSide`
Timeseries back side of the PV row
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
index : int, optional
index of the PV row (Default = None)
full_pvrow_coords : \
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`, optional
Timeseries coordinates of the full PV row, end to end
(Default = None)
"""
self.front = ts_front_side
self.back = ts_back_side
self.xy_center = xy_center
self.index = index
self.full_pvrow_coords = full_pvrow_coords
@classmethod
def from_raw_inputs(cls, xy_center, width, rotation_vec,
cut, shaded_length_front, shaded_length_back,
index=None, param_names=None):
"""Create timeseries PV row using raw inputs.
Note: shading will always be zero when pv rows are flat.
Parameters
----------
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
width : float
width of the PV rows [m]
rotation_vec : np.ndarray
Timeseries rotation values of the PV row [deg]
cut : dict
Discretization scheme of the PV row. Eg {'front': 2, 'back': 4}.
Will create segments of equal length on the designated sides.
shaded_length_front : np.ndarray
Timeseries values of front side shaded length [m]
shaded_length_back : np.ndarray
Timeseries values of back side shaded length [m]
index : int, optional
Index of the pv row (default = None)
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
Returns
-------
New timeseries PV row object
"""
# Calculate full pvrow coords
pvrow_coords = TsPVRow._calculate_full_coords(
xy_center, width, rotation_vec)
# Calculate normal vectors
dx = pvrow_coords.b2.x - pvrow_coords.b1.x
dy = pvrow_coords.b2.y - pvrow_coords.b1.y
normal_vec_front = np.array([-dy, dx])
# Calculate front side coords
ts_front = TsSide.from_raw_inputs(
xy_center, width, rotation_vec, cut.get('front', 1),
shaded_length_front, n_vector=normal_vec_front,
param_names=param_names)
# Calculate back side coords
ts_back = TsSide.from_raw_inputs(
xy_center, width, rotation_vec, cut.get('back', 1),
shaded_length_back, n_vector=-normal_vec_front,
param_names=param_names)
return cls(ts_front, ts_back, xy_center, index=index,
full_pvrow_coords=pvrow_coords)
@staticmethod
def _calculate_full_coords(xy_center, width, rotation):
"""Method to calculate the full PV row coordinaltes.
Parameters
----------
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
width : float
width of the PV rows [m]
rotation : np.ndarray
Timeseries rotation values of the PV row [deg]
Returns
-------
coords: :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries coordinates of full PV row
"""
x_center, y_center = xy_center
radius = width / 2.
# Calculate coords
x1 = radius * cosd(rotation + 180.) + x_center
y1 = radius * sind(rotation + 180.) + y_center
x2 = radius * cosd(rotation) + x_center
y2 = radius * sind(rotation) + y_center
coords = TsLineCoords.from_array(np.array([[x1, y1], [x2, y2]]))
return coords
def surfaces_at_idx(self, idx):
"""Get all PV surface geometries in timeseries PV row for a certain
index.
Parameters
----------
idx : int
Index to use to generate PV surface geometries
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface` objects
List of PV surfaces
"""
pvrow = self.at(idx)
return pvrow.all_surfaces
def plot_at_idx(self, idx, ax, color_shaded=COLOR_DIC['pvrow_shaded'],
color_illum=COLOR_DIC['pvrow_illum'],
with_surface_index=False):
"""Plot timeseries PV row at a certain index.
Parameters
----------
idx : int
Index to use to plot timeseries PV rows
ax : :py:class:`matplotlib.pyplot.axes` object
Axes for plotting
color_shaded : str, optional
Color to use for plotting the shaded surfaces (Default =
COLOR_DIC['pvrow_shaded'])
color_shaded : str, optional
Color to use for plotting the illuminated surfaces (Default =
COLOR_DIC['pvrow_illum'])
with_surface_index : bool, optional
Plot the surfaces with their index values (Default = False)
"""
pvrow = self.at(idx)
pvrow.plot(ax, color_shaded=color_shaded,
color_illum=colo
|
r_illum, with_index=with_surface_index)
def at(self, idx):
"""Generate a PV row geometry for the desired index.
Parameters
----------
idx : int
Index to use to generate PV row geo
|
metry
Returns
-------
pvrow : :py:class:`~pvfactors.geometry.pvrow.PVRow`
"""
front_geom = self.front.at(idx)
back_geom = self.back.at(idx)
original_line = LineString(
self.full_pvrow_coords.as_array[:, :, idx])
pvrow = PVRow(front_side=front_geom, back_side=back_geom,
index=self.index, original_linestring=original_line)
return pvrow
def update_params(self, new_dict):
"""Update timeseries surface parameters of the PV row.
Parameters
----------
new_dict : dict
Parameters to add or update for the surfaces
"""
self.front.update_params(new_dict)
self.back.update_params(new_dict)
@property
def n_ts_surfaces(self):
"""Number of timeseries surfaces in the ts PV row"""
return self.front.n_ts_surfaces + self.back.n_ts_surfaces
@property
def all_ts_surfaces(self):
"""List of all timeseries surfaces"""
return self.front.all_ts_surfaces + self.back.all_ts_surfaces
@property
def centroid(self):
"""Centroid point of the timeseries pv row"""
centroid = (self.full_pvrow_coords.centroid
if self.full_pvrow_coords is not None else None)
return centroid
@property
def length(self):
"""Length of both sides of the timeseries PV row"""
return self.front.length + self.back.length
@property
def highest_point(self):
"""Timeseries point coordinates of highest point of PV row"""
high_pt = (self.full_pvrow_coords.highest_point
if
|
manasapte/pants
|
tests/python/pants_test/backend/jvm/tasks/jvm_binary_task_test_base.py
|
Python
|
apache-2.0
| 3,154 | 0.006658 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
class JvmBinaryTaskTestBase(JvmToolTaskTestBase):
"""
:API: public
"""
def create_artifact(self, org, name, rev, classifier=None, ext=None, materialize=True):
"""
:API: public
:param string org: The maven dependency `groupId`.
:param string name: The maven dependency `artifactId`.
:param string rev: The maven dependency `version`.
:param string classifier: The maven dependency `classifier`.
:param string ext: There is no direct maven parallel, but the maven `packaging` value of the
depended-on artifact for simple cases, and in more complex cases the
extension of the artifact. For example, 'bundle' packaging implies an
extension of 'jar'. Defaults to 'jar'.
:param bool materialize: `False` to populate the returned resolved_jar with a `pants_path` that
does not exist; defaults to `True` and `touch`es the `pants_path`.
:returns: A resolved jar describing the artifact.
:rtype: :class:`pants.backend.jvm.jar_dependency_utils.ResolvedJar`
"""
coordinate = M2Coordinate(org=org, name=name, rev=rev, classifier=classifier, ext=ext)
cache_path = 'not/a/real/cache/path'
jar_name = coordinate.artifact_filename
pants_path = self.create_workdir_file(jar_name) if materialize else os.path.join(self.pants_workdir,
jar_name)
return ResolvedJar(coordinate=coordinate, cache_path=cache_path, pants_path=pants_path)
def iter_files(self, dir_path):
"""Returns an iterator over the files found under the given `dir_path`.
:API: public
:param string dir_path: The path of the directory tree to scan for files.
:returns: An iterator of the relative paths of files found under `dir_path
|
`.
:rtype: :class:`collections.Iterator` of string
|
"""
for root_dir, _, files in os.walk(dir_path):
for f in files:
yield os.path.relpath(os.path.join(root_dir, f), dir_path)
def ensure_classpath_products(self, context):
"""Gets or creates the classpath products expected by `JvmBinaryTask`.
:API: public
:param context: The pants run context to get/create/associate classpath products with.
:type context: :class:`pants.goal.context.Context`
:returns: The classpath products associated with the given `context`
:rtype: :class:`pants.backend.jvm.tasks.classpath_products.ClasspathProducts`
"""
return context.products.get_data('runtime_classpath', init_func=ClasspathProducts.init_func(self.pants_workdir))
|
abhiatgithub/shogun-toolbox
|
examples/undocumented/python_modular/graphical/cluster_kpp.py
|
Python
|
gpl-3.0
| 1,891 | 0.050238 |
"""Graphical example illustrating improvement of convergence of KMeans
when cluster centers are initialized by KMeans++ algorithm.
In this example, 4 vertices of a rectangle are chosen: (0,0) (0,100) (10,0) (10,100).
There are 500 points normally distributed about each vertex.
Therefore, the ideal cluster centers for k=2 are the global minima ie (5,0) (5,100).
Written (W) 2014 Parijat Mazumdar
"""
from pylab import figure,clf,plot,linspace,pi,show
from numpy import array,ones,zeros,cos,sin,concatenate
from numpy.random import randn
from modshogun import *
k=2
num=500
d1=concatenate((randn(1,num),10.*randn(1,num)),0)
d2=concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[0.]])
d3=concatenate((randn(1,num),10.*randn(1,num)),0)+array([[0.],[100.]])
d4=concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[100.]])
traindata=concatenate((d1,d2,d3,d4),1)
feat_train=RealFeatures(traindata)
distance=EuclideanDistance(feat_train,feat_train)
kmeans=KMeans(k, distance, True)
kmeans.train()
centerspp=kmeans.get_cluster_centers()
radipp=kmeans.get_radiuses()
kmeans.set_use_kmeanspp(False)
kmeans.train()
centers=kmeans.get_cluster_centers()
radi=kmeans.get_radiuses()
figure('KMeans with KMeans++')
clf()
plot(d1[0],d1[1],'rx')
plot(d2[0],d2[1],'bx',hold=True)
plot(d3[0],d3[1],'gx',hold=True)
plot(d4[0],d4[1],'cx',hold=True)
plot(centerspp[0,:], centerspp[1,:], 'ko',hold=True)
for i in xrange(k):
t = linspace(0, 2*pi, 100)
plot(radipp[i]*cos(t)+centerspp[0,i],radipp[i]*sin(t)+centerspp[1,i],'k-', hold=True)
figure('KMeans w/o KMeans++')
clf()
|
plot(d1[0],d1[1],'rx')
plot(d2[0],d2[1],'bx',hold=True)
plot(d3[0],d3[1],'gx',hold=True)
plot(d4[0],d4[1],'cx',hold=True)
plot(centers[0,:], centers[1,:], 'ko',hold=True)
for i in
|
xrange(k):
t = linspace(0, 2*pi, 100)
plot(radi[i]*cos(t)+centers[0,i],radi[i]*sin(t)+centers[1,i],'k-', hold=True)
show()
|
mdiller/MangoByte
|
cogs/utils/logger.py
|
Python
|
mit
| 962 | 0.025988 |
from __main__ import settings
import logging
import datetime
import os
from
|
pyth
|
onjsonlogger import jsonlogger
# if we wanna log disnake stuff https://docs.disnake.dev/en/latest/logging.html?highlight=logger
# we can also get the root logger, which will give us a ton of info for all the libraries we have
if not os.path.exists("logs"):
os.makedirs("logs")
def setup_logger():
logger = logging.getLogger("mangologger")
if settings.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Console Logging
if settings.debug:
consoleout = logging.StreamHandler()
logger.addHandler(consoleout)
# JSON file logging
timestamp = datetime.datetime.now().strftime("%Y-%m-%d__%I_%M%p")
filehandler = logging.FileHandler(filename=f"logs/mangolog_{timestamp}.log", encoding="utf-8", mode="w")
jsonformatter = jsonlogger.JsonFormatter()
filehandler.setFormatter(jsonformatter)
logger.addHandler(filehandler)
return logger
|
boooka/GeoPowerOff
|
venv/lib/python2.7/site-packages/grab/transport/kit.py
|
Python
|
apache-2.0
| 4,620 | 0.009307 |
# Copyright: 2013, Grigoriy Petukhov
# Author: Grigoriy Petukhov (http://lorien.name)
# License: BSD
from __future__ import absolute_import
#import email
import logging
#import urllib
#try:
#from cStringIO import StringIO
#except ImportError:
#from io import BytesIO as StringIO
#import threading
import random
#try:
#from urlparse import urlsplit, urlunsplit
#except ImportError:
#from urllib.parse import urlsplit, urlunsplit
#import pycurl
#import tempfile
#import os.path
#from ..base import UploadContent, UploadFile
#from .. import error
from ..response import Response
from ..tools.http import encode_cookies, smart_urlencode, normalize_unicode,\
normalize_http_values, normalize_post_data
from ..tools.user_agent import random_user_agent
from ..base import Grab
from grab.kit import Kit
logger = logging.getLogger('grab.transport.kit')
class KitTransport(object):
"""
Grab network transport powered with QtWebKit module
"""
def __init__(self):
self.kit = Kit()
#def setup_body_file(self, storage_dir, storage_filename):
#if storage_filename is None:
#handle, path = tempfile.mkstemp(dir=storage_dir)
#else:
#path = os.path.join(storage_dir, storage_filename)
#self.body_file = open(path, 'wb')
#self.body_path = path
def reset(self):
self.request_object = {
'url': None,
'cookies': {},
'method': None,
'data': None,
'user_agent': None,
}
self.response = None
#self.response_head_chunks = []
#self.response_body_chunks = []
#self.response_body_bytes_read = 0
#self.verbose_logging = False
#self.body_file = None
#self.body_path = None
## Maybe move to super-class???
self.request_head = ''
self.request_body = ''
self.request_log = ''
def process_confi
|
g(self, grab):
self.request_object['url'] = grab.config['url']
self.request_object['method'] = grab.request_method.lower()
if grab.config['cookiefile']:
grab.load_cookies(grab.config['cookiefile'])
if grab.config['cookies
|
']:
if not isinstance(grab.config['cookies'], dict):
raise error.GrabMisuseError('cookies option shuld be a dict')
self.request_object['cookies'] = grab.config['cookies']
if grab.request_method == 'POST':
if grab.config['multipart_post']:
raise NotImplementedError
elif grab.config['post']:
post_data = normalize_post_data(grab.config['post'], grab.config['charset'])
else:
post_data = None
self.request_object['data'] = post_data
if grab.config['user_agent'] is None:
if grab.config['user_agent_file'] is not None:
with open(grab.config['user_agent_file']) as inf:
lines = inf.read().splitlines()
grab.config['user_agent'] = random.choice(lines)
else:
pass
# I think that it does not make sense
# to create random user agents for webkit transport
#grab.config['user_agent'] = random_user_agent()
self.request_object['user_agent'] = grab.config['user_agent']
def request(self):
req = self.request_object
self.kit_response = self.kit.request(
url=req['url'],
cookies=req['cookies'],
method=req['method'],
data=req['data'],
user_agent=req['user_agent'],
)
def prepare_response(self, grab):
return self.kit_response
def extract_cookies(self):
"""
Extract cookies.
"""
return self.kit_response.cookies
def __getstate__(self):
"""
Reset curl attribute which could not be pickled.
"""
state = self.__dict__.copy()
state['kit'] = None
return state
def __setstate__(self, state):
"""
Create pycurl instance after Grag instance was restored
from pickled state.
"""
state['kit'] = Kit()
self.__dict__ = state
class GrabKit(Grab):
def __init__(self, response_body=None, transport='grab.transport.curl.CurlTransport',
**kwargs):
super(GrabKit, self).__init__(response_body=response_body,
transport='grab.transport.kit.KitTransport',
**kwargs)
|
thomasleese/gantt-charts
|
ganttcharts/__init__.py
|
Python
|
mit
| 56 | 0 |
__version__ = '0.1.0'
__de
|
scription__ = 'Gantt ch
|
arts!'
|
timbuchwaldt/bundlewrap
|
bundlewrap/lock.py
|
Python
|
gpl-3.0
| 8,144 | 0.002456 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from getpass import getuser
import json
from os import environ
from pipes import quote
from socket import gethostname
from time import time
from .exceptions import NodeLockedException
from .utils import cached_property, tempfile
from .utils.text import (
blue,
bold,
format_duration,
format_timestamp,
mark_for_translation as _,
parse_duration,
red,
wrap_question,
)
from .utils.ui import io
HARD_LOCK_PATH = "/tmp/bundlewrap.lock"
HARD_LOCK_FILE = HARD_LOCK_PATH + "/info"
SOFT_LOCK_PATH = "/tmp/bundlewrap.softlock.d"
SOFT_LOCK_FILE = "/tmp/bundlewrap.softlock.d/{id}"
def identity():
return environ.get('BW_IDENTITY', "{}@{}".format(
getuser(),
gethostname(),
))
class NodeLock(object):
def __init__(self, node, interactive=False, ignore=False):
self.node = node
self.ignore = ignore
self.interactive = interactive
def __enter__(self):
if self.node.os == 'kubernetes':
# no locking required
return self
with tempfile() as local_path:
if not self.ignore:
with io.job(_("{node} checking hard lock status").format(node=bold(self.node.name))):
result = self.node.run("mkdir " + quote(HARD_LOCK_PATH), may_fail=True)
if result.return_code != 0:
self.node.download(HARD_LOCK_FILE, local_path)
with open(local_path, 'r') as f:
try:
info = json.loads(f.read())
except:
io.stderr(_(
"{warning} corrupted lock on {node}: "
"unable to read or parse lock file contents "
"(clear it with `bw run {node} 'rm -R {path}'`)"
).format(
node=self.node.name,
path=HARD_LOCK_FILE,
warning=red(_("WARNING")),
))
info = {}
expired = False
try:
d = info['date']
except KeyError:
info['date'] = _("<unknown>")
info['duration'] = _("<unknown>")
else:
duration = datetime.now() - datetime.fromtimestamp(d)
info['date'] = format_timestamp(d)
info['duration'] = format_duration(duration)
if duration > parse_duration(environ.get('BW_HARDLOCK_EXPIRY', "8h")):
expired = True
io.debug("ignoring expired hard lock on {}".format(self.node.name))
if 'user' not in info:
info['user'] = _("<unknown>")
if expired or self.ignore or (self.interactive and io.ask(
self._warning_message_hard(info),
False,
epilogue=blue("?") + " " + bold(self.node.name),
)):
pass
else:
raise NodeLockedException(info)
with io.job(_("{node} uploading lock file").format(node=bold(self.node.name))):
if self.ignore:
self.node.run("mkdir -p " + quote(HARD_LOCK_PATH))
with open(local_path, 'w') as f:
f.write(json.dumps({
'date': time(),
'user': identity(),
}))
self.node.upload(local_path, HARD_LOCK_FILE)
return self
def __exit__(self, type, value, traceback):
if self.node.os == 'kubernetes':
# no locking required
return
with io.job(_("{node} removing hard lock").format(node=bold(self.node.name))):
result = self.node.run("rm -R {}".format(quote(HARD_LOCK_PATH)), may_fail=True)
if result.return_code != 0:
io.stderr(_("{x} {node} could not release hard lock").format(
node=bold(self.node.name),
x=red("!"),
))
def _warning_message_hard(self, info):
return wrap_question(
red(_("NODE LOCKED")),
_(
"Looks like somebody is currently using BundleWrap on this node.\n"
"You should let them finish or override the lock if it has gone stale.\n"
"\n"
"locked by {user}\n"
" since {date} ({duration} ago)"
).format(
user=bold(info['user']),
date=info['date'],
duration=info['duration'],
),
bold(_("Override lock?")),
prefix="{x} {node} ".format(node=bold(self.node.name), x=blue("?")),
)
@cached_property
def soft_locks(self):
return softlock_list(self.node)
@cached_property
def my_soft_locks(self):
for lock in self.soft_locks:
if lock['user'] == identity():
yield lock
@cached_property
def other_peoples_soft_locks(self):
for lock in self.soft_locks:
if lock['user'] != identity():
yield lock
def softlock_add(node, lock_id, comment="", expiry="8h", item_selectors=None):
assert node.os != 'kubernetes'
if "\n" in comment:
raise ValueError(_("Lock comments must not contain any newlines"))
if not item_selectors:
item_selectors = ["*"]
expiry_timedelta = parse_duration(expiry)
now = time()
expiry_timestamp = now + expiry_timedelta.days * 86400 + expiry_timedelta.seconds
content = json.dumps({
'comment': comment,
'date': now,
'expiry': expiry_timestamp,
'id': lock_id,
'items': item_selectors,
'user': identity(),
}, indent=None, sort_keys=True)
with tempfile() as local_path:
with open(local_path, 'w') as f:
f.write(content + "\n")
node.run("mkdir -p " + quote(SOFT_LOCK_PATH))
node.upload(local_path, SOFT_LOCK_FILE.format(id=lock_id), mode='0644')
node.
|
repo.hooks.lock_add(node.repo, node, lock_id, item_selectors, expiry_timestamp, comment)
return lock_id
def softlock_list(node):
if node.os == 'kubernetes':
return []
with io.job(_("{} checking soft locks").format(bold(node.name))):
cat = node.run("cat {}".format(SOFT_LOCK_FILE.format(id="*")), may_fail=True)
if cat.return_code != 0:
return []
result = []
for line in cat.stdout.d
|
ecode('utf-8').strip().split("\n"):
try:
result.append(json.loads(line.strip()))
except json.decoder.JSONDecodeError:
io.stderr(_(
"{x} {node} unable to parse soft lock file contents, ignoring: {line}"
).format(
x=red("!"),
node=bold(node.name),
line=line.strip(),
))
for lock in result[:]:
if lock['expiry'] < time():
io.debug(_("removing expired soft lock {id} from node {node}").format(
id=lock['id'],
node=node.name,
))
softlock_remove(node, lock['id'])
result.remove(lock)
return result
def softlock_remove(node, lock_id):
assert node.os != 'kubernetes'
io.debug(_("removing soft lock {id} from node {node}").format(
id=lock_id,
node=node.name,
))
node.run("rm {}".format(SOFT_LOCK_FILE.format(id=lock_id)))
node.repo.hooks.lock_remove(node.repo, node, lock_id)
|
indykish/servo
|
tests/wpt/harness/wptrunner/executors/process.py
|
Python
|
mpl-2.0
| 701 | 0 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You c
|
an obtain one at http://mozilla.org/MPL/2.0/.
from .base import TestExecutor
class ProcessTestExecutor(TestExecutor):
def __init__(self, *args, **kwargs):
TestExecutor.__init__(self, *args, **kwargs)
self.binary = self.browser.binary
self.interactive = self.browser.interactive
def setup(self, runner):
self.runner = runner
self.runner.send_message("init_succeeded")
return True
|
def is_alive(self):
return True
def do_test(self, test):
raise NotImplementedError
|
akademikbilisim/ab-kurs-kayit
|
abkayit/training/migrations/0005_auto_20160627_1414.py
|
Python
|
gpl-3.0
| 1,403 | 0.002851 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('training', '0004_auto_20160627_1106'),
]
operations = [
migrations.AddField(
model_name='trainesscourserecord',
name='createdby',
field=models.ForeignKey(related_name='createdby', default=4, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='trainesscourserecord',
name='createtimestamp',
field=models.DateField(default=django.utils.timezone.now, null=True, verbose_name=b'Creation Timestamp',
|
blank=True),
),
migrations.AddField(
model_name='trainesscourserecord',
name='modifiedby',
|
field=models.ForeignKey(related_name='modifiedby', default=4, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='trainesscourserecord',
name='modifytimestamp',
field=models.DateField(default=django.utils.timezone.now, null=True, verbose_name=b'Modification Timestamp', blank=True),
),
]
|
OCA/server-tools
|
module_change_auto_install/__init__.py
|
Python
|
agpl-3.0
| 29 | 0 |
from .patch import pos
|
t_load
| |
pmart123/censible_links
|
censible_links/settings.py
|
Python
|
bsd-3-clause
| 93 | 0 |
SPIDER
|
_MODULES = ['cen
|
sible_links.spiders']
DEFAULT_ITEM_CLASS = 'censible_links.items.Page'
|
devs1991/test_edx_docmode
|
common/djangoapps/student/tests/test_login.py
|
Python
|
agpl-3.0
| 26,838 | 0.002757 |
'''
Tests for student activation and login
'''
import json
import unittest
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpResponseBadRequest, HttpResponse
import httpretty
from mock import patch
from social.apps.django_app.default.models import UserSocialAuth
from external_auth.models import ExternalAuthMap
from student.tests.factories import UserFactory, RegistrationFactory, UserProfileFactory
from student.views import login_oauth_token
from third_party_auth.tests.utils import (
ThirdPartyOAuthTestMixin,
ThirdPartyOAuthTestMixinFacebook,
ThirdPartyOAuthTestMixinGoogle
)
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class LoginTest(TestCase):
'''
Test student.views.login_user() view
'''
def setUp(self):
super(LoginTest, self).setUp()
# Create one user and save it to the database
self.user = UserFactory.build(username='test', email='test@edx.org')
self.user.set_password('test_password')
self.user.save()
# Create a registration for the user
RegistrationFactory(user=self.user)
# Create a profile for the user
UserProfileFactory(user=self.user)
# Create the test client
self.client = Client()
cache.clear()
# Store the login url
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response('test@edx.org', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', u'test@edx.org'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response('test@edx.org', 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test@edx.org'])
def test_login_success_unicode_email(self):
unicode_email = u'test' + unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(unicode_email, 'test_password', patched_audit_log='student.models.AUDIT_LOG')
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_login_fail_no_user_exists(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'ADVANCED_SECURITY': True})
def test_login_fail_incorrect_email_with_advanced_security(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(nonexistent_email, 'test_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response('test@edx.org', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'test@edx.org', u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response('test@edx.org', 'wrong_password')
self._assert_response(response, success=False,
value='Email or password is incorrect')
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test@edx.org'])
def test_login_not_activated(
|
self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active
|
for user'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=False,
value="This account has not been activated")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_unicode_email(self):
unicode_email = u'test@edx.org' + unichr(40960)
response, mock_audit_log = self._login_response(unicode_email, 'test_password')
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = u'test_password' + unichr(1972)
response, mock_audit_log = self._login_response('test@edx.org', unicode_password)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'test@edx.org', u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 302)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response('test@edx.org', 'test_password')
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
|
bossiernesto/onyx
|
persistance/documents/json_doc.py
|
Python
|
bsd-3-clause
| 703 | 0.004267 |
try:
import simplejson
except ImportError:
import json as simplejson
from .meta import DocumentMeta, BaseDocumentSession
json_objects = []
class JSONDocument(object):
"""
JSON Document base class
|
"""
__metaclass__ = DocumentMeta
def __init__(self, **kwargs):
json_objects.append(kwargs)
class Session(BaseDocumentSession):
"""
A class featuring a database session
"""
def commit(self):
"""
Dumps the scraped data to the filesystem
"""
with open(self.file_nam
|
e, 'w') as f:
simplejson.dump(json_objects, f)
def close(self):
super(Session,self).close()
json_session = Session()
|
teamfx/openjfx-9-dev-rt
|
modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/layout_tests/servers/web_platform_test_server.py
|
Python
|
gpl-2.0
| 9,422 | 0.003927 |
# Copyright (c) 2014, Canon Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Canon Inc. nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY CANON INC. AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CANON INC. AND ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import sys
import time
from webkitpy.common.system.autoinstall import AutoInstaller
from webkitpy.layout_tests.servers import http_server_base
_log = logging.getLogger(__name__)
def doc_root(port_obj):
doc_root = port_obj.get_option("wptserver_doc_root")
if doc_root is None:
return port_obj.host.filesystem.join("imported", "w3c", "web-platform-tests")
return doc_root
def base_url(port_obj):
config_wk_filepath = port_obj._filesystem.join(port_obj.layout_tests_dir(), "imported", "w3c", "resources", "config.json")
if not port_obj.host.filesystem.isfile(config_wk_filepath):
# This should only be hit by webkitpy unit tests
_log.debug("No WPT config file found")
return "http://localhost:8800/"
json_data = port_obj._filesystem.read_text_file(config_wk_filepath)
config = json.loads(json_data)
ports = config["ports"]
return "http://" + config["host"] + ":" + str(ports["http"][0]) + "/"
class WebPlatformTestServer(http_server_base.HttpServerBase):
def __init__(self, port_obj, name, pidfile=None):
http_server_base.HttpServerBase.__init__(self, port_obj)
self._output_dir = port_obj.results_directory()
self._name = name
self._log_file_name = '%s_process_log.out.txt' % (self._name)
self._wsout = None
self._process = None
self._pid_file = pidfile
if not self._pid_file:
self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
self._servers_file = self._filesystem.join(self._runtime_path, '%s_servers.json' % (self._name))
self._stdout_data = None
self._stderr_data = None
self._filesystem = port_obj.host.filesystem
self._layout_root = port_obj.layout_tests_dir()
self._doc_root = self._filesystem.join(self._layout_root, doc_root(port_obj))
self._resources_files_to_copy = ['testharness.css', 'testharnessreport.js']
current_dir_path = self._filesystem.abspath(self._filesystem.split(__file__)[0])
self._start_cmd = ["python", self._filesystem.join(current_dir_path, "web_platform_test_launcher.py"), self._servers_file]
self._doc_root_path = self._filesystem.join(self._layout_root, self._doc_root)
def _install_modules(self):
modules_file_path = self._filesystem.join(self._doc_root_path, "..", "resources", "web-platform-tests-modules.json")
if not self._filesystem.isfile(modules_file_path):
_log.warning("Cannot read " + modules_file_path)
return
modules = json.loads(self._filesystem.read_text_file(modules_file_path))
for module in modules:
path = module["path"]
name = path.pop()
AutoInstaller(target_dir=self._filesystem.jo
|
in(self._doc_root
|
, self._filesystem.sep.join(path))).install(url=module["url"], url_subpath=module["url_subpath"], target_name=name)
def _copy_webkit_test_files(self):
_log.debug('Copying WebKit resources files')
for f in self._resources_files_to_copy:
webkit_filename = self._filesystem.join(self._layout_root, "resources", f)
if self._filesystem.isfile(webkit_filename):
self._filesystem.copyfile(webkit_filename, self._filesystem.join(self._doc_root, "resources", f))
_log.debug('Copying WebKit web platform server config.json')
config_wk_filename = self._filesystem.join(self._layout_root, "imported", "w3c", "resources", "config.json")
if self._filesystem.isfile(config_wk_filename):
config_json = self._filesystem.read_text_file(config_wk_filename).replace("%CERTS_DIR%", self._filesystem.join(self._output_dir, "_wpt_certs"))
self._filesystem.write_text_file(self._filesystem.join(self._doc_root, "config.json"), config_json)
wpt_testharnessjs_file = self._filesystem.join(self._doc_root, "resources", "testharness.js")
layout_tests_testharnessjs_file = self._filesystem.join(self._layout_root, "resources", "testharness.js")
# FIXME: Next line to be removed once all bots have wpt_testharnessjs_file updated correctly. See https://bugs.webkit.org/show_bug.cgi?id=152257.
self._filesystem.copyfile(layout_tests_testharnessjs_file, wpt_testharnessjs_file)
if (not self._filesystem.compare(wpt_testharnessjs_file, layout_tests_testharnessjs_file)):
_log.warning("\n//////////\nWPT tests are not using the same testharness.js file as other WebKit Layout tests.\nWebKit testharness.js might need to be updated according WPT testharness.js.\n//////////\n")
def _clean_webkit_test_files(self):
_log.debug('Cleaning WPT resources files')
for f in self._resources_files_to_copy:
wpt_filename = self._filesystem.join(self._doc_root, "resources", f)
if self._filesystem.isfile(wpt_filename):
self._filesystem.remove(wpt_filename)
_log.debug('Cleaning WPT web platform server config.json')
config_wpt_filename = self._filesystem.join(self._doc_root, "config.json")
if self._filesystem.isfile(config_wpt_filename):
self._filesystem.remove(config_wpt_filename)
def _prepare_config(self):
if self._filesystem.exists(self._output_dir):
output_log = self._filesystem.join(self._output_dir, self._log_file_name)
self._wsout = self._filesystem.open_text_file_for_writing(output_log)
self._install_modules()
self._copy_webkit_test_files()
def _spawn_process(self):
self._stdout_data = None
self._stderr_data = None
if self._wsout:
self._process = self._executive.popen(self._start_cmd, cwd=self._doc_root_path, shell=False, stdin=self._executive.PIPE, stdout=self._wsout, stderr=self._wsout)
else:
self._process = self._executive.popen(self._start_cmd, cwd=self._doc_root_path, shell=False, stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=self._executive.STDOUT)
self._filesystem.write_text_file(self._pid_file, str(self._process.pid))
# Wait a second for the server to actually start so that tests do not start until server is running.
time.sleep(1)
return self._process.pid
def _stop_running_subservers(self):
if self._filesystem.exists(self._servers_file):
try:
json_data = self._filesystem.read_text_file(self._servers_file)
started_servers = json.loads(json_data)
for server in started_servers:
if
|
fcgravalos/CEES-API-v1.0
|
logmessages.py
|
Python
|
apache-2.0
| 1,055 | 0.006635 |
"""
File: logmessages.py
Author: Fernando Crespo Gravalos (cees.project.official@gmail.com)
Date: 2014/06/16
"""
##############
# LOG MESSAGES
##############
## GENERAL ##
DB_ERROR = 'Database error. Check database log file.'
STORE_NOT_FOUND = 'Could not find store in cees database.'
TOKEN_NOT_FOUND = 'Could not find token in database.'
SCHEMA_NOT_FOUND = 'Could not validate req
|
uest. Schema file not found.'
VALIDATION_ERROR = 'Data not valid.'
## LOGIN ##
LOGGED_IN = 'Shop assistant logged in as '
CREDENTIALS_NOT_FOUND = 'Could not find the email/password provided.'
## ARRIVALS ##
RFID_NOT_FOUND = 'Invalid identifier. RFID not found in cees database.'
CLIENT_NOT_ALLOWED = 'Client does not belong to this customer.'
SENDING_NOTIFICATION = 'Sending push notification.'
CLIENT_NOT_FOUND = 'Client not found as an arrival.'
## TOKEN ##
MISSING_TOKEN = 'There is no Au
|
thentication header in request.'
## GCM AND DEVICE REGISTRATION##
UNKNOWN_DEVICE = 'Device not found in databse.'
REGISTRATION_NOT_FOUND = 'Registration not found in database.'
|
kiniou/blender-smooth-slides
|
tools/lpod/scripts/lpod-style.py
|
Python
|
gpl-3.0
| 3,854 | 0.003374 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Hervé Cauwelier <herve@itaapy.com>
# Romain Gauthier <romain@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version
|
2.0 (the
|
"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the standard library
from optparse import OptionParser
from sys import exit, stdout
# Import from lpod
from lpod import __version__
from lpod.document import odf_get_document
def show_styles(document, automatic=True, common=True, properties=False):
"""Show the different styles of a document and their properties.
"""
output = document.show_styles(automatic=automatic, common=common,
properties=properties)
# Print the styles
encoding = stdout.encoding if stdout.encoding is not None else 'utf-8'
stdout.write(output.encode(encoding))
stdout.flush()
def delete_styles(document, pretty=True):
n = document.delete_styles()
document.save(pretty=pretty)
print n, "styles removed (0 error, 0 warning)."
def merge_styles(document, from_file, pretty=True):
source = odf_get_document(from_file)
document.delete_styles()
document.merge_styles_from(source)
document.save(pretty=pretty)
print "Done (0 error, 0 warning)."
if __name__ == '__main__':
# Options initialisation
usage = '%prog <file>'
description = 'A command line interface to manipulate styles of ' \
'OpenDocument files.'
parser = OptionParser(usage, version=__version__,
description=description)
# --automatic
parser.add_option('-a', '--automatic', dest='automatic',
action='store_true', default=False,
help="show automatic styles only")
# --common
parser.add_option('-c', '--common', dest='common', action='store_true',
default=False, help="show common styles only")
# --properties
parser.add_option('-p', '--properties', dest='properties',
action='store_true', help="show properties of styles")
# --delete
parser.add_option('-d', '--delete', dest='delete',
action='store_true', help="delete all styles (except default)")
# --merge
help = ('copy styles from FILE to <file>. Any style with the same name '
'will be replaced.')
parser.add_option('-m', '--merge-styles-from', dest='merge',
action='store', metavar='FILE', help=help)
# Parse options
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(1)
document = odf_get_document(args[0])
if options.delete:
delete_styles(document)
elif options.merge:
merge_styles(document, options.merge)
else:
automatic = options.automatic
common = options.common
if not automatic ^ common:
automatic, common = True, True
show_styles(document, automatic=automatic, common=common,
properties=options.properties)
|
kingoflolz/hearthbreaker
|
tests/card_tests/shaman_tests.py
|
Python
|
mit
| 30,173 | 0.002453 |
import random
import unittest
from hearthbreaker.cards.spells.neutral import TheCoin
from tests.agents.testing_agents import OneCardPlayingAgent, MinionAttackingAgent, CardTestingAgent, \
PlayAndAttackAgent
from tests.testing_utils import generate_game_for
from hearthbreaker.cards import *
from hearthbreaker.constants import MINION_TYPE
from hearthbreaker.agents.basic_agents import PredictableAgent, DoNothingAgent
class TestShaman(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_AlAkirTheWindlord(self):
game = generate_game_for(AlAkirTheWindlord, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 15):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Al'Akir the Windlord", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertTrue(game.players[0].minions[0].charge())
self.assertTrue(game.players[0].minions[0].divine_shield)
self.assertTrue(game.players[0].minions[0].taunt)
def test_DustDevil(self):
game = generate_game_for(DustDevil, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Dust Devil", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertEqual(2, game.players[0].upcoming_overload)
game.play_single_turn()
# Overload should cause that we start this turn with 0 mana
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(0, game.players[0].upcoming_overload)
self.assertEqual(0, game.players[0].mana)
self.assertEqual(2, game.players[0].max_mana)
def test_EarthElemental(self):
game = generate_game_for(EarthElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
# Earth Elemental should be played
for turn in range(0, 9):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Earth Elemental", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(3, game.players[0].upcoming_overload)
def test_FireElemental(self):
game = generate_game_for(FireElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
# Fire Elemental should be played, and its battlecry dealing three damage to opponent
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Fire Elemental", game.players[0].minions[0].card.name)
self.assertEqual(27, game.players[1].hero.health)
def test_FlametongueTotem(self):
game = generate_game_for(StonetuskBoar, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# There should be three Stonetusk Boars on the board
self.assertEqual(3, len(game.players[0].minions))
# add a new Flametongue Totem at index 1
totem = FlametongueTotem()
totem.summon(game.players[0], game, 1)
# The minions to either side should have their attack increased
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# When removing the minion at index 0, we should not get an error
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
self.assertEqual(3, len(game.players[0].minions))
# When removing the minion at index 1, we should have a new minion at index 1,
# and its attack should be increased
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# Silencing this minion should have no effect on its attack
game.players[0].minions[1].silence()
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# We should be able to add a boar on either side of the wolf, and their attack should be increased
# The attack of the boar which used to be next to the wolf should decrease
boar = StonetuskBoar()
boar.summon(game.players[0], game, 0)
boar.summon(game.players[0], game, 2)
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# Add a new boar on the left of the totem since we haven't tested that yet
boar.summon(game.players[0], game, 1)
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
# If the totem is silenced, then the boars to either side should no longer have increased attack
game.players[0].minions[1].silence()
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
def test_ManaTideTotem(self):
game = generate_game_for([ManaTideTotem, WarGolem], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(25, game.players[0].deck.left)
self.assertEqual(0, len(game.players[0].minions))
# Mana Tide Totem should be played, and we should draw a card at the end of turn
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
|
self.assertEqual("Mana Tide Totem", game.players[0].minions[0].card.name)
self.assertEqual(23, game.players[0].deck.left)
game.play_single_turn()
# Silence, we should only draw one card next turn
game.players[0].
|
minions[0].silence()
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(22, game.players[0].deck.left)
def test_UnboundElemental(self):
game = generate_game_for([UnboundElemental, DustDevil, DustDevil], StonetuskBoar, OneCardPlayingAgent,
DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Unbound Elemental", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].calculate_max_health())
# One Dust Devil should be played, giving the Unbound Elemental +1/+1
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].calculate_max_health())
# Test the silence
game.players[0].minions[-1].silence()
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_
|
cwolferh/heat-scratch
|
heat/tests/openstack/manila/test_share.py
|
Python
|
apache-2.0
| 9,631 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.manila import share as mshare
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
manila_template = """
heat_template_version: 2015-04-30
resources:
test_share:
type: OS::Manila::Share
properties:
share_protocol: NFS
size: 1
access_rules:
- access_to: 127.0.0.1
access_type: ip
access_level: ro
name: basic_test_share
description: basic test share
is_public: True
metadata: {"key": "value"}
"""
class DummyShare(object):
def __init__(self):
self.availability_zone = 'az'
self.host = 'host'
self.export_locations = 'el'
self.share_server_id = 'id'
self.created_at = 'ca'
self.status = 's'
self.project_id = 'p_id'
class ManilaShareTest(common.HeatTestCase):
def setUp(self):
super(ManilaShareTest, self).setUp()
self.fake_share = mock.MagicMock(id="test_share_id")
self.available_share = mock.MagicMock(
id="test_share_id",
status=mshare.ManilaShare.STATUS_AVAILABLE)
self.failed_share = mock.MagicMock(
id="test_share_id",
status=mshare.ManilaShare.STATUS_ERROR)
self.deleting_share = mock.MagicMock(
id="test_share_id",
status=mshare.ManilaShare.STATUS_DELETING)
def _init_share(self, stack_name):
tmp = template_format.parse(manila_template)
self.stack = utils.parse_stack(tmp, stack_name=stack_name)
res_def = self.stack.t.resource_definitions(self.stack)["test_share"]
share = mshare.ManilaShare("test_share", res_def, self.stack)
# replace clients and plugins with mocks
mock_client = mock.MagicMock()
client = mock.MagicMock(return_value=mock_client)
share.client = client
mock_plugin = mock.MagicMock()
client_plugin = mock.MagicMock(return_value=mock_plugin)
share.client_plugin = client_plugin
return share
def _create_share(self, stack_name):
share = self._init_share(stack_name)
share.client().shares.create.return_value = self.fake_share
share.client().shares.get.return_value = self.available_share
scheduler.TaskRunner(share.create)()
return share
def test_share_create(self):
share = self._create_share("stack_share_create")
expected_state = (share.CREATE, share.COMPLETE)
self.assertEqual(expected_state, share.state,
"Share is not in expected state")
self.assertEqual(self.fake_share.id, share.resource_id,
"Expected share ID was not propagated to share")
share.client().shares.allow.assert_called_once_with(
access="127.0.0.1", access_level="ro",
share=share.resource_id, access_type="ip")
args, kwargs = share.client().shares.create.call_args
message_end = " parameter was not passed to manila client"
self.assertEqual(u"NFS", kwargs["share_proto"],
"Share protoc
|
ol" + message_end)
se
|
lf.assertEqual(1, kwargs["size"], "Share size" + message_end)
self.assertEqual("basic_test_share", kwargs["name"],
"Share name" + message_end)
self.assertEqual("basic test share", kwargs["description"],
"Share description" + message_end)
self.assertEqual({u"key": u"value"}, kwargs["metadata"],
"Metadata" + message_end)
self.assertTrue(kwargs["is_public"])
share.client().shares.get.assert_called_once_with(self.fake_share.id)
self.assertEqual('shares', share.entity)
def test_share_create_fail(self):
share = self._init_share("stack_share_create_fail")
share.client().shares.get.return_value = self.failed_share
exc = self.assertRaises(exception.ResourceInError,
share.check_create_complete,
self.failed_share)
self.assertIn("Error during creation", six.text_type(exc))
def test_share_create_unknown_status(self):
share = self._init_share("stack_share_create_unknown")
share.client().shares.get.return_value = self.deleting_share
exc = self.assertRaises(exception.ResourceUnknownStatus,
share.check_create_complete,
self.deleting_share)
self.assertIn("Unknown status", six.text_type(exc))
def test_share_check(self):
share = self._create_share("stack_share_check")
scheduler.TaskRunner(share.check)()
expected_state = (share.CHECK, share.COMPLETE)
self.assertEqual(expected_state, share.state,
"Share is not in expected state")
def test_share_check_fail(self):
share = self._create_share("stack_share_check_fail")
share.client().shares.get.return_value = self.failed_share
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(share.check))
self.assertIn("Error: resources.test_share: 'status': expected "
"'['available']'", six.text_type(exc))
def test_share_update(self):
share = self._create_share("stack_share_update")
updated_share_props = copy.deepcopy(share.properties.data)
updated_share_props[mshare.ManilaShare.DESCRIPTION] = "desc"
updated_share_props[mshare.ManilaShare.NAME] = "name"
updated_share_props[mshare.ManilaShare.IS_PUBLIC] = True
share.client().shares.update.return_value = None
after = rsrc_defn.ResourceDefinition(share.name, share.type(),
updated_share_props)
scheduler.TaskRunner(share.update, after)()
kwargs = {
"display_name": "name",
"display_description": "desc",
}
share.client().shares.update.assert_called_once_with(
share.resource_id, **kwargs)
def test_share_update_access_rules(self):
share = self._create_share("stack_share_update_access_rules")
updated_share_props = copy.deepcopy(share.properties.data)
updated_share_props[mshare.ManilaShare.ACCESS_RULES] = [
{mshare.ManilaShare.ACCESS_TO: "127.0.0.2",
mshare.ManilaShare.ACCESS_TYPE: "ip",
mshare.ManilaShare.ACCESS_LEVEL: "ro"}]
share.client().shares.deny.return_value = None
current_rule = {
mshare.ManilaShare.ACCESS_TO: "127.0.0.1",
mshare.ManilaShare.ACCESS_TYPE: "ip",
mshare.ManilaShare.ACCESS_LEVEL: "ro",
"id": "test_access_rule"
}
rule_tuple = collections.namedtuple("DummyRule",
list(current_rule.keys()))
share.client().shares.access_list.return_value = [
rule_tuple(**current_rule)]
after = rsrc_defn.ResourceDefinition(share.name, share.type(),
updated_share_props)
scheduler.TaskRunner(share.update, after)()
share.client().shares.access_list.assert_called_once_with(
share.resource_id)
share.client().shares.allow.assert_called_with(
share=share.resource_id, access_type="ip",
|
horstjens/ThePythonGameBook
|
en/python/battleship/chat_server.py
|
Python
|
gpl-3.0
| 3,173 | 0.007249 |
#!/usr/bin/env python3
"""Server for multithreaded (asynchronous) chat application."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept() # client_adress is ip and port
print("client {}:{} has connected with server".format(client_address[0], client_address[1]))
#client.send(bytes("Welcome to Battleships! Please type your name and press enter!", "utf8"))
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(BUFSIZ).decode("utf8")
#welcome = "Welcome {}! type 'quit' to exit".format(name)
if players[0] is None:
index = 0
client.send(bytes("welcome player1 ","utf8"))
print("welcome player1")
players[0] = name
elif players[1] is None:
index = 1
client.send(bytes("welcome player2 ","utf8"))
print("welcome player2")
players[1] = name
broadcast("player{} ({}) has joined the chat!".format(index+1, name), "server:")
#broadcast(bytes(msg, "utf8"))
clients[client] = name
if players[0] is not None and players[1] is not None:
broadcast("may the game begin!", "server:")
while True:
msg = client.recv(BUFSIZ) # msg is in byte format
#create string:
message = "".join([chr(i) for i in msg])
#if msg != bytes("quit", "utf8"):
# broadcast(msg, "player{} ({}): ".format(index+1,name))#, "utf8")
#else:
if message == "quit":
client.send(bytes("quit", "utf8"))
client.close()
del clients[client]
broadcast("player{}({}) has left the chat".format(index+1, name), "server:") # , "utf
|
8"))
break
if message.lower()=="a2" and Game.turn % 2 == index:
broadcast("mfires at A2", "player{}({})".format(index+1, name))
Game.turn += 1
broadcast("turn {}. It is your turn, player{}".format(Game.turn, index+1))
else:
broadcast(message, "player{} ({}):".format(index+1,name))
def broadcast(msg, prefix=""): # prefix tells who is sending the message.
"""Broadcasts a message to all the clients.
|
converts msg to bytes if necessary"""
msg2 = msg if isinstance(msg, bytes) else bytes(msg, 'utf8')
for sock in clients:
#sock.send(bytes(prefix, "utf8") + msg)
#print("message:", msg, type(msg))
#print("prefix:", prefix)
sock.send(bytes(prefix, "utf8") + msg2)
class Game:
turn = 1
players = [None, None]
clients = {}
addresses = {}
HOST = ''
PORT = 33000
BUFSIZ = 1024
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(ADDR)
if __name__ == "__main__":
SERVER.listen(5)
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
tchar/pushbots
|
pushbots/examples/analytics.py
|
Python
|
mit
| 1,805 | 0 |
# -*- coding: utf-8 -*-
"""
The following examples are used to demonstrate how to get/record
analytics
The method signatures are:
Pushbots.get_analytics()
and
Pushbots.record_analytics(platform=None, data=None)
In which you must specify either platform or data.
"""
from pushbots import Pushbots
def example_get_analytics():
"""Get analytics by calling Pushbots.get_analytics()"""
# Define app_id and secret
my_app_id = 'my_app_id'
my_secret = 'my_secret'
# Create a Pushbots instance
pushbots = Pushbots(app_id=my_app_id, secret=my_secret)
code, message = pushbots.get_analytics()
print('Returned code: {0}'.format(code))
print('Returned message: {0}'.format(message))
def example_record_analytics1():
"""Record analytics by passing platform directly to
Pushbots.record_analytics()
"""
# Define app_id and secret
my_app_id = 'my_app_id'
my_secret = 'my_secret'
# Create a Pushbots instance
pushbots = Pushbots(app_id=my_app_id, secret=my_secret)
# Define platform
platform = Pushbots.PLATFORM_IOS
code, message = pushbots.record_analytics(platform=platform)
print('Returned code: {0}'.format(code))
print('Returned message: {0}'.format(message))
def example_record_analytics2():
"""Record analytics by passing data defined by you to
Pushbots.record_analytics()
"""
# Define app_id and secret
my_app_id = 'my_app_id'
my_secr
|
et = 'my_secret'
# Create a Pushbots instance
pushbots = Pushbots(app_id=my_app_id, secret=my_secret)
# Define data
data = {'platform': '0'} # '0' is Equivalent to Pushbots.PLATFORM_IOS
|
code, message = pushbots.record_analytics(data=data)
print('Returned code: {0}'.format(code))
print('Returned message: {0}'.format(message))
|
ddurieux/alignak
|
alignak/eventhandler.py
|
Python
|
agpl-3.0
| 6,318 | 0.002375 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# aviau, alexandre.viau@savoirfairelinux.com
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# Jean Gabes, naparuba@gmail.com
# Zoran Zaric, zz@zoranzaric.de
# Gerhard Lausser, gerhard.lausser@consol.de
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
from action import Action
from alignak.property import IntegerProp, StringProp, FloatProp, BoolProp
from alignak.autoslots import AutoSlots
""" TODO: Add some comment about this class for the doc"""
class EventHandler(Action):
# AutoSlots create the __slots__ with properties and
# running_properties names
__metaclass__ = AutoSlots
my_type = 'eventhandler'
properties = {
'is_a': StringProp(default='eventhandler'),
'type': StringProp(default=''),
'_in_timeout': StringProp(default=False),
'status': StringProp(default=''),
'exit_status': StringProp(default=3),
'output': StringProp(default=''),
'long_output': StringProp(default=''),
't_to_go': StringProp(default=0),
'check_time': StringProp(default=0),
'execution_time': FloatProp(default=0),
'u_time': FloatProp(default=0.0),
's_time': FloatProp(default=0.0),
'env': StringProp(default={}),
'perf_data': StringProp(default=''),
'sched_id': IntegerProp(default=0),
'timeout': IntegerProp(default=10),
'check_time': IntegerProp(default=0),
'command': StringProp(default=''),
'module_type': StringProp(default='fork'),
'worker': StringProp(default='none'),
'reactionner_tag': StringProp(default='Non
|
e'),
'is_snapshot': BoolProp(default=False),
}
# id = 0 #Is common to Actions
def __init__(self, command, id=None, ref=None, timeout=10, env={},
module_type='fork', reactionner_tag='None', is_snapshot=False):
self.is_a = 'eventhandler'
self.type = ''
self.status = 'scheduled'
if id is None: # id != None is for copy ca
|
ll only
self.id = Action.id
Action.id += 1
self.ref = ref
self._in_timeout = False
self.timeout = timeout
self.exit_status = 3
self.command = command
self.output = ''
self.long_output = ''
self.t_to_go = time.time()
self.check_time = 0
self.execution_time = 0
self.u_time = 0
self.s_time = 0
self.perf_data = ''
self.env = {}
self.module_type = module_type
self.worker = 'none'
self.reactionner_tag = reactionner_tag
self.is_snapshot = is_snapshot
# return a copy of the check but just what is important for execution
# So we remove the ref and all
def copy_shell(self):
# We create a dummy check with nothing in it, just defaults values
return self.copy_shell__(EventHandler('', id=self.id, is_snapshot=self.is_snapshot))
def get_return_from(self, e):
self.exit_status = e.exit_status
self.output = e.output
self.long_output = getattr(e, 'long_output', '')
self.check_time = e.check_time
self.execution_time = getattr(e, 'execution_time', 0.0)
self.perf_data = getattr(e, 'perf_data', '')
def get_outputs(self, out, max_plugins_output_length):
self.output = out
def is_launchable(self, t):
return t >= self.t_to_go
def __str__(self):
return "Check %d status:%s command:%s" % (self.id, self.status, self.command)
def get_id(self):
return self.id
# Call by pickle to dataify the comment
# because we DO NOT WANT REF in this pickleisation!
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
if not hasattr(self, 'worker'):
self.worker = 'none'
if not getattr(self, 'module_type', None):
self.module_type = 'fork'
# s_time and u_time are added between 1.2 and 1.4
if not hasattr(self, 'u_time'):
self.u_time = 0
self.s_time = 0
|
JulienMcJay/eclock
|
windows/kivy/kivy/tests/test_audio.py
|
Python
|
gpl-2.0
| 1,707 | 0.000586 |
'''
Audio tests
===========
'''
import unittest
import os
SAMPLE_FILE = os.path.join(os.path.dirname(__file__), 'sample1.ogg')
SAMPLE_LENGTH = 1.402
SAMPLE_LENGTH_MIN = SAMPLE_LENGTH * 0.99
SAMPLE_LENGTH_MAX = SAMPLE_LENGTH * 1.01
class AudioTestCase(unittest.TestCase):
def get_sound(self):
import os
assert os.path.exists(SAMPLE_FILE)
from kivy.core import audio
return audio.SoundLoader.load(SAMPLE_FILE)
def test_length_simple
|
(self):
sound = self.get_sound()
volume = sound.volume = 0.75
length = sound.length
assert SAMPLE_LENGTH_MIN <= length <= SAMPLE_LENGTH_MAX
# ensure that the gstreamer play/stop doe
|
sn't mess up the volume
assert volume == sound.volume
def test_length_playing(self):
import time
sound = self.get_sound()
sound.play()
try:
time.sleep(0.1)
length = sound.length
assert SAMPLE_LENGTH_MIN <= length <= SAMPLE_LENGTH_MAX
finally:
sound.stop()
def test_length_stopped(self):
import time
sound = self.get_sound()
sound.play()
try:
time.sleep(0.1)
finally:
sound.stop()
length = sound.length
assert SAMPLE_LENGTH_MIN <= length <= SAMPLE_LENGTH_MAX
class AudioGstreamerTestCase(AudioTestCase):
def make_sound(self, source):
from kivy.core.audio import audio_gstreamer
return audio_gstreamer.SoundGstreamer(source)
class AudioPygameTestCase(AudioTestCase):
def make_sound(self, source):
from kivy.core.audio import audio_pygame
return audio_pygame.SoundPygame(source)
|
wikimedia/pywikibot-core
|
pywikibot/scripts/version.py
|
Python
|
mit
| 3,279 | 0 |
#!/usr/bin/python3
"""Script to determine the Pywikibot version (tag, revision and date).
.. versionchanged:: 7.0
version script was moved to the framework scripts folder
"""
#
# (C) Pywikibot team, 2007-2021
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import sys
import pywikibot
from pywikibot.version import get_toolforge_hostname, getversion
class DummyModule:
"""Fake module instance."""
__version__ = 'n/a'
try:
import setuptools
except ImportError:
setuptools = DummyModule()
try:
import mwparserfromhell
except ImportError:
mwparserfromhell = DummyModule()
try:
import wikitextparser
except ImportError:
wikitextparser = DummyModule()
try:
import requests
except ImportError:
requests = DummyModule()
WMF_CACERT = 'MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs'
def main(*args: s
|
tr) -> None:
"""Print pywikibot version and important settings."""
pywikibot.output('Pywikibot: ' + getversion())
pywikibot.output('Release version: ' + pywikibot.__version__)
pywikibot.output('setuptools version: ' + setuptools.__version__)
pywikibot.output('mwparserfromhell version: '
+ mwparserfromhell.__version__)
pywikibot.output('wikitextparser version: ' + wikitextparser.__version__)
pywikibot.output('requests version: ' + requests.__version__)
ha
|
s_wikimedia_cert = False
if (not hasattr(requests, 'certs')
or not hasattr(requests.certs, 'where')
or not callable(requests.certs.where)):
pywikibot.output(' cacerts: not defined')
elif not os.path.isfile(requests.certs.where()):
pywikibot.output(' cacerts: {} (missing)'.format(
requests.certs.where()))
else:
pywikibot.output(' cacerts: ' + requests.certs.where())
with codecs.open(requests.certs.where(), 'r', 'utf-8') as cert_file:
text = cert_file.read()
if WMF_CACERT in text:
has_wikimedia_cert = True
pywikibot.output(' certificate test: {}'
.format('ok' if has_wikimedia_cert else 'not ok'))
if not has_wikimedia_cert:
pywikibot.output(' Please reinstall requests!')
pywikibot.output('Python: ' + sys.version)
toolforge_env_hostname = get_toolforge_hostname()
if toolforge_env_hostname:
pywikibot.output('Toolforge hostname: ' + toolforge_env_hostname)
# check environment settings
settings = {key for key in os.environ if key.startswith('PYWIKIBOT')}
settings.update(['PYWIKIBOT_DIR', 'PYWIKIBOT_DIR_PWB',
'PYWIKIBOT_NO_USER_CONFIG'])
for environ_name in sorted(settings):
pywikibot.output(
'{}: {}'.format(environ_name,
os.environ.get(environ_name, 'Not set') or "''"))
pywikibot.output('Config base dir: ' + pywikibot.config.base_dir)
for family, usernames in pywikibot.config.usernames.items():
if not usernames:
continue
pywikibot.output('Usernames for family {!r}:'.format(family))
for lang, username in usernames.items():
pywikibot.output('\t{}: {}'.format(lang, username))
if __name__ == '__main__':
main()
|
Salamek/DwaPython
|
tests/UserTest.py
|
Python
|
gpl-3.0
| 4,776 | 0.012772 |
# Copyright (C) 2014 Adam Schubert <adam.schubert@sg1-game.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__="Adam Schubert <adam.schubert@sg1-game.net>"
__date__ ="$12.10.2014 2:20:45$"
import tests.DwaTestCase as DwaTestCase
import unittest
import time
class UserTest(DwaTestCase.DwaTestCase):
def setUp(self):
DwaTestCase.DwaTestCase.setUp(self)
self.user = self.d.user()
self.username = self.credential['username'] + 'UserTest' + str(time.time())
def testCreate(se
|
lf):
params = {}
params['password'] = self.credential['password']
params['username
|
'] = self.username
params['nickname'] = DwaTestCase.generateNickname()
params['email'] = self.username + '@divine-warfare.com'
params['active'] = True
#create
message = self.user.create(params)['message']
#delete
userData = self.user.token({'password': params['password'], 'username': params['username']})
delParams = {}
delParams['user_id'] = userData['id']
delParams['user_token'] = userData['token']
self.user.delete(delParams)
self.assertEqual(message, 'User created')
def testDelete(self):
params = {}
params['password'] = self.credential['password']
params['username'] = self.username
params['nickname'] = DwaTestCase.generateNickname()
params['email'] = self.username + '@divine-warfare.com'
params['active'] = True
#create
self.user.create(params)
userData = self.user.token({'password': params['password'], 'username': params['username']})
delParams = {}
delParams['user_id'] = userData['id']
delParams['user_token'] = userData['token']
#delete
message = self.user.delete(delParams)['message']
self.assertEqual(message, 'User deleted')
def testList(self):
data = self.user.list({'limit': 20, 'page': 0})
self.assertEqual(data['message'], 'OK')
self.assertIsNotNone(data['data'])
self.assertIsNotNone(data['pages'])
def testToken(self):
data = self.user.token(self.credential)
self.assertEqual(data['message'], 'Token created')
self.assertEqual(len(data['token']), 32)
self.assertIsNotNone(data['id'])
self.assertRegexpMatches(data['token_expiration'], '(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})')
def testPassword(self):
data_token = self.user.token(self.credential)
data = self.user.password({'old_password': self.credential['password'], 'new_password': self.credential['password'], 'user_token': data_token['token'], 'user_id': data_token['id']})
self.assertEqual(data['message'], 'Password changed')
def testActive(self):
data_token = self.user.token(self.credential)
data = self.user.active({'user_id': data_token['id'], 'active': True, 'user_token': data_token['token']})
self.assertEqual(data['message'], 'User activated')
def testDeactive(self):
data_token = self.user.token(self.credential)
data = self.user.active({'user_id': data_token['id'], 'active': False, 'user_token': data_token['token']})
self.assertEqual(data['message'], 'User deactivated')
#Will fail cos our mailserver checks if maildir exists...
#@unittest.expectedFailure
def testRequestPasswordReset(self):
email = self.credential['username'] + '@example.com';
content_fill = 'abc' * 5333 #16k of shit
data = self.user.request_password_reset({'email': email, 'email_content': 'URL: example.com/password/reset/{reset_token}' + content_fill, 'email_subject': 'Password reset unittest', 'email_from': 'unittest@example.com'})
#self.assertEqual(data['message'], 'Email with reset token has been send')
self.assertEqual(data['message'], 'Email not found')
@unittest.expectedFailure
def testDoPasswordReset(self):
#we use USER token as password reset token, cos we dont have reset token (and we cant have it cos it is only in email) so this call will fail, and that is a good thing :)
data_token = self.user.token(self.credential)
data = self.user.request_password_reset({'reset_token': data_token['token'], 'new_password': 'newPassword'})
self.assertEqual(data['message'], 'Password changed')
|
brnomendes/grader-edx
|
Core/Ranking.py
|
Python
|
mit
| 564 | 0.003546 |
from Models.Submission i
|
mport Submission
from Core.Database import Database
from Core.Scorer import Score
from sqlalchemy import func, desc
class Ranking():
@staticmethod
def get_all():
session = Database.session()
scores = session.query(Score).order_by(desc(Score.score)).all()
return [{"student_id": s.student_id,
"submissions": session.query(func.count(Submission.id))
|
.filter(Submission.student_id == s.student_id).scalar(),
"score": s.score}
for s in scores]
|
hbradleyiii/ww
|
ww/settings.py
|
Python
|
mit
| 2,079 | 0.005772 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: settings.py
# author: Harold Bradley III
# email: harold@bradleystudio.net
# created on: 01/23/2016
#
# description: The settings file for
|
ww
#
from __future__ import
|
absolute_import, print_function
import os
import time
TEMPLATE_PATH = os.path.dirname(os.path.realpath(__file__)) + '/../templates/'
## Change these settings to your hearts content ##
# Site Settings
SITE_ADMIN_EMAIL = 'email@mail.com'
SITE_ERROR_LOG = 'error.log'
SITE_ACCESS_LOG = 'access.log'
WWW_DIR = '/var/www/'
WWW_USR = 'www-data'
WWW_ADMIN = 'admin_usr'
GITIGNORE_TEMPLATE = TEMPLATE_PATH + 'gitignore.template'
HTA_5G_TEMPLATE = TEMPLATE_PATH + '5g-htaccess.template'
VHOST_PATH = '/etc/apache2/sites-available/'
VHOST_TEMPLATE = TEMPLATE_PATH + 'vhost.template'
VHOST_SSL_TEMPLATE = TEMPLATE_PATH + 'vhost-ssl.template'
MYSQL = {
'host' : 'localhost',
'user' : 'username',
'password' : 'password123',
}
# WordPress Settings
WP_LATEST = 'http://wordpress.org/latest.tar.gz'
WP_SETUP_URL = '/wp-admin/setup-config.php?step=2'
WP_INSTALL_URL = '/wp-admin/install.php?step=2'
WP_HTA_TEMPLATE = TEMPLATE_PATH + 'wordpress-htaccess.template'
WP_HTA_HARDENED_TEMPLATE = TEMPLATE_PATH + 'hardened-wordpress-htaccess.template'
WP_CONFIG_TEMPLATE = TEMPLATE_PATH + 'wp-config.php.template'
WP_ADMIN_USER = 'admin'
WP_ADMIN_EMAIL = 'admin@wp.com'
WP_ADMIN_PW = 'password123' # Please change this.
WP_SALT_URL = 'https://api.wordpress.org/secret-key/1.1/salt/'
# Apache commands
CMD_RESTART_APACHE = 'sudo service apache2 reload'
CMD_ENABLE_CONFIG = 'sudo a2ensite ' # run as: {command} domain
CMD_DISABLE_CONFIG = 'sudo a2dissite ' # run as: {command} domain
CMD_CHECK_IF_ENABLED = "apache2ctl -S | grep ' namevhost {0} '" # See if apache is serving domain ({})
# Try to import local settings. This is a temporary work-around for now.
try:
from .settings_local import *
except ImportError:
print("Can't find settings_local. Using default settings.")
|
sniemi/SamPy
|
sandbox/src1/TCSE3-3rd-examples/src/tools/fileaction.py
|
Python
|
bsd-2-clause
| 3,008 | 0.006981 |
#!/usr/bin/env python
"""
fileaction.py 'display' '*.ps' '*.jpg' '*.gif'
creates a GUI with a list of all PostScript, JPEG, and GIF files in
the directory tree with the current working directory as root.
Clicking on one of the filenames in the list launches the display
program, which displays the image file.
As another example,
fileaction.py 'xanim' '*.mpg' '*.mpeg'
gives an overview of all MPEG files in the directory tree and
the possibility to play selected files with the xanim application.
The general interface is
fileactionGUI.py command filetype1 filetype2 filetype3 ...
"""
from Tkinter import *
import Pmw, os, sys
class FileActionGUI:
def __init__(self, parent, file_patterns, command):
self.master = parent
self.top = Frame(parent)
self.top.pack(expand=True, fill='both')
self.file_patterns = file_patterns
self.files = self.find_files()
self.list1 = Pmw.ScrolledListBox(self.top,
listbox_selectmode='single', #
|
or 'multiple'
vscrollmode='dynamic', hscrollmode='dynamic',
listbox_width=min(max([len(f) for f in self.files]),40),
listbox_height=min(len(self.files),20),
label_text='files', labelpos='n',
items=self.files,
selectioncommand=self.select)
self.list1.pack(side='top'
|
, padx=10, expand=True, fill='both')
self.command = StringVar(); self.command.set(command)
Pmw.EntryField(self.top,
labelpos='w', label_text='process file with',
entry_width=len(command)+5,
entry_textvariable=self.command).pack(side='top',pady=3)
Button(self.top, text='Quit', width=8, command=self.master.destroy).pack(pady=2)
def select(self):
file = self.list1.getcurselection()[0]
cmd = '%s %s &' % (self.command.get(), file)
os.system(cmd)
def find_files(self):
from scitools.misc import find
def check(filepath, arg):
ext = os.path.splitext(filepath)[1]
import fnmatch # Unix shell-style wildcard matching
for s in self.file_patterns:
if fnmatch.fnmatch(ext, s):
arg.append(filepath)
files = []
find(check, os.curdir, files)
return files
if __name__ == '__main__':
root = Tk()
Pmw.initialise(root)
import scitools.misc; scitools.misc.fontscheme3(root)
try:
command = sys.argv[1]
file_patterns = sys.argv[2:]
except:
print 'Usage: %s file-command filetype1 filetype2 ...' % sys.argv[0]
print "Example: fileactionGUI.py 'display' '*.ps' '*.eps' '*.jpg'"
print 'A GUI with a list of all files matching the specified'
print 'patterns is launched. By clicking on one of the filenames,'
print 'the specified command is run with that file as argument.'
sys.exit(1)
g = FileActionGUI(root, file_patterns, command)
root.mainloop()
|
gwpy/gwpy.github.io
|
docs/v0.5/examples/frequencyseries/variance-3.py
|
Python
|
gpl-3.0
| 260 | 0.003846 |
plot = variance.plot(norm='log', vmin=.5, cmap='plasma')
ax = plot.gca()
ax.grid()
ax.set_xlim(20, 1500)
ax.set_ylim(1e-24, 1e-20)
ax.set_xlabel(
|
'Frequency [Hz]')
ax.set_ylabel(r'[strain/\rtHz]')
ax.set_title('LIGO-Living
|
ston sensitivity variance')
plot.show()
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractXiaoyuusTranslations.py
|
Python
|
bsd-3-clause
| 244 | 0.028689 |
def extractXiaoyuusTranslations(item):
"""
Xiaoyuu's Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp
|
or vol or frag) or 'preview' in item['title'].lower():
return None
return Fa
|
lse
|
nikste/visualizationDemo
|
zeppelin-web/node/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py
|
Python
|
apache-2.0
| 3,325 | 0.002105 |
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluati
|
ons already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. Thi
|
s is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
fedelemantuano/thug
|
thug/ActiveX/modules/SymantecBackupExec.py
|
Python
|
gpl-2.0
| 2,143 | 0.0056 |
# Symantec BackupExec
#
|
CVE-2007-6016,CVE-2007-6017
import logging
|
log = logging.getLogger("Thug")
def Set_DOWText0(self, val):
self.__dict__['_DOWText0'] = val
if len(val) > 255:
log.ThugLogging.log_exploit_event(self._window.url,
"Symantec BackupExec ActiveX",
"Overflow in property _DOWText0",
cve = 'CVE-2007-6016')
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-6016", None)
log.DFT.check_shellcode(val)
def Set_DOWText6(self, val):
self.__dict__['_DOWText6'] = val
if len(val) > 255:
log.ThugLogging.log_exploit_event(self._window.url,
"Symantec BackupExec ActiveX",
"Overflow in property _DOWText6",
cve = 'CVE-2007-6016')
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-6016", None)
log.DFT.check_shellcode(val)
def Set_MonthText0(self, val):
self.__dict__['_MonthText0'] = val
if len(val) > 255:
log.ThugLogging.log_exploit_event(self._window.url,
"Symantec BackupExec ActiveX",
"Overflow in property _MonthText6",
cve = 'CVE-2007-6016')
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-6016", None)
log.DFT.check_shellcode(val)
def Set_MonthText11(self, val):
self.__dict__['_MonthText11'] = val
if len(val) > 255:
log.ThugLogging.log_exploit_event(self._window.url,
"Symantec BackupExec ActiveX",
"Overflow in property _MonthText11",
cve = 'CVE-2007-6016')
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-6016", None)
log.DFT.check_shellcode(val)
def Save(self, a, b):
return
|
emreg00/biana
|
biana/ext/networkx/readwrite/edgelist.py
|
Python
|
gpl-3.0
| 5,622 | 0.011028 |
"""
**********
Edge Lists
**********
Read and write NetworkX graphs as edge lists.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
__all__ = ['read_edgelist', 'write_edgelist']
import codecs
import locale
import string
import sys
import time
from networkx.utils import is_string_like,_get_fh
import networkx
def write_edgelist(G, path, comments="#", delimiter=' '):
"""Write graph as a list of edges.
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string uses to separate values. The default is whitespace.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_edgelist(G, "test.edgelist")
>>> fh=open("test.edgelist",'w')
>>> nx.write_edgelist(G,fh)
>>> nx.write_edgelist(G, "test.edgelist.gz")
Notes
-----
The file will use the default text encoding on your system.
It is possible to write files in other encodings by opening
the file with the codecs module. See doc/examples/unicode.py
for hints.
>>> import codecs
>>> fh=codecs.open("test.edgelist",'w',encoding='utf=8') # utf-8 encoding
>>> nx.write_edgelist(G,fh)
See Also
--------
networkx.write_edgelist
"""
fh=_get_fh(path,mode='w')
pargs=comments+" "+string.join(sys.argv,' ')
fh.write("%s\n" % (pargs))
fh.write(comments+" GMT %s\n" % (time.asctime(time.gmtime())))
fh.write(comments+" %s\n" % (
|
G.name))
def make_str(t):
if is_string_like(t): return t
return str(t)
for e in G.edges(data=True):
fh.write(delimiter.join(map(make_str,e))+"\n")
#if G.multigraph:
# u,v,datalist=e
# for d in datalist:
# fh.write(delimiter.join(map(make_str,(u,v,d)))+"\n")
#else:
def read_edgelist(path, comment
|
s="#", delimiter=' ',
create_using=None, nodetype=None, edgetype=None):
"""Read a graph from a list of edges.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be uncompressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string uses to separate values. The default is whitespace.
create_using : Graph container, optional
Use specified Graph container to build graph. The default is
nx.Graph().
nodetype : int, float, str, Python type, optional
Convert node data from strings to specified type
edgetype : int, float, str, Python type, optional
Convert edge data from strings to specified type
Returns
----------
out : graph
A networkx Graph or other type specified with create_using
Examples
--------
>>> nx.write_edgelist(nx.path_graph(4), "test.edgelist")
>>> G=nx.read_edgelist("test.edgelist")
>>> fh=open("test.edgelist")
>>> G=nx.read_edgelist(fh)
>>> G=nx.read_edgelist("test.edgelist", nodetype=int)
>>> G=nx.read_edgelist("test.edgelist",create_using=nx.DiGraph())
Notes
-----
Since nodes must be hashable, the function nodetype must return hashable
types (e.g. int, float, str, frozenset - or tuples of those, etc.)
Example edgelist file formats
Without edge data::
# source target
a b
a c
d e
With edge data:::
# source target data
a b 1
a c 3.14159
d e apple
"""
if create_using is None:
G=networkx.Graph()
else:
try:
G=create_using
G.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
fh=_get_fh(path)
for line in fh.readlines():
line = line[:line.find(comments)].strip()
if not len(line): continue
# if line.startswith("#") or line.startswith("\n"):
# continue
# line=line.strip() #remove trailing \n
# split line, should have 2 or three items
s=line.split(delimiter)
if len(s)==2:
if nodetype is not None:
try:
(u,v)=map(nodetype,s)
except:
raise TypeError("Failed to convert edge %s to type %s"\
%(s,nodetype))
else:
(u,v)=s
G.add_edge(u,v)
elif len(s)==3:
(u,v,d)=s
if nodetype is not None:
try:
(u,v)=map(nodetype,(u,v))
except:
raise TypeError("Failed to convert edge (%s, %s) to type %s"\
%(u,v,nodetype))
if d is not None and edgetype is not None:
try:
d=edgetype(d)
except:
raise TypeError("Failed to convert edge data (%s) to type %s"\
%(d, edgetype))
G.add_edge(u,v,d) # XGraph or XDiGraph
else:
raise TypeError("Failed to read line: %s"%line)
return G
|
WmHHooper/aima-python
|
submissions/Flanagin/myKMeans.py
|
Python
|
mit
| 54,288 | 0.000037 |
school_scores = [
[2.2, 1032, 1253, 188, 0],
[1.9, 671, 1622, 418, 12],
[2.1, 3854, 7193, 1184, 16],
[2.2, 457, 437, 57, 0],
[1.8, 25546, 84659, 18839, 240],
[2.2, 2736, 4312, 732, 12],
[2.1, 2646, 17108, 4338, 105],
[1.8, 956, 2718, 731, 19],
[1.8, 316, 1493, 643, 13],
[1.8, 15418, 41559, 9420, 111],
[1.8, 9098, 29098, 5573, 74],
[2, 1012, 3581, 970, 25],
[2, 817, 1167, 257, 2],
[2.1, 3127, 4056, 476, 4],
[2.2, 6003, 18736, 5912, 117],
[2.8, 557, 333, 35, 0],
[2.5, 832, 684, 76, 3],
[2.1, 1390, 1417, 152, 3],
[1.8, 805, 1081, 183, 2],
[2.2, 1617, 4945, 954, 25],
[2.1, 6112, 19826, 6013, 169],
[2, 4536, 28692, 8054, 265],
[2.2, 3032, 3181, 474, 12],
[2.7, 1769, 1908, 172, 6],
[2.3, 344, 232, 30, 0],
[2.6, 1106, 1245, 157, 7],
[2.6, 776, 1233, 238, 5],
[2.5, 457, 419, 56, 4],
[1.8, 1350, 3039, 538, 9],
[2, 1305, 6016, 1376, 20],
[2.1, 10417, 37455, 9008, 214],
[2.3, 607, 843, 127, 4],
[2.2, 19988, 64955, 19989, 1361],
[1.9, 10991, 20933, 4728, 128],
[2.8, 140, 67, 13, 0],
[2.4, 7640, 13102, 2814, 62],
[2.3, 789, 684, 85, 1],
[2, 3846, 7638, 1514, 34],
[2.1, 17969, 44568, 9793, 207],
[1.8, 491, 691, 93, 1],
[2, 721, 4240, 1000, 38],
[1.8, 4354, 10152, 2216, 33],
[2.7, 154, 94, 15, 0],
[2.3, 2027, 2349, 319, 3],
[2, 24009, 58514, 7193, 93],
[2.7, 545, 613, 82, 1],
[2.4, 727, 2551, 495, 12],
[1.2, 62, 503, 167, 0],
[2.1, 1055, 938, 121, 1],
[2.1, 6556, 14116, 2387, 46],
[2.5, 1362, 1047, 153, 6],
[2.5, 204, 179, 33, 2],
[2.2, 1050, 1137, 150, 2],
[1.9, 689, 1553, 459, 8],
[2.1, 3939, 7011, 1303, 26],
[2.3, 435, 355, 64, 1],
[1.8, 28126, 85907, 20619, 283],
[2.2, 2710, 4094, 746, 12],
[2.1, 2971, 17185, 4432, 94],
[1.8, 1102, 2812, 758, 21],
[1.9, 321, 1498, 622, 22],
[1.7, 16541, 40909, 9685, 136],
[1.8, 9458, 27921, 5259, 71],
[2, 1127, 3412, 1037, 22],
[2, 865, 971, 210, 4],
[2.1, 3263, 3787, 415, 7],
[2.2, 6259, 18383, 5835, 139],
[2.9, 509, 313, 25, 1],
[2.6, 776, 649, 83, 1],
[2.1, 1399, 1175, 157, 0],
[1.9, 670, 742, 105, 2],
[2.2, 1771, 4843, 929, 17],
[2.1, 6622, 19945, 5965, 178],
[2, 5119, 29138, 8377, 302],
[2.2, 2989, 2870, 466, 9],
[2.7, 1798, 1674, 178, 4],
[2.3, 342, 252, 30, 0],
[2.6, 1066, 1155, 159, 4],
[2.5, 736, 1055, 219, 5],
[2.4, 448, 332, 43, 1],
[1.7, 1383, 2941, 548, 6],
[2, 1399, 5945, 1488, 21],
[2.1, 11608, 37683, 9117, 209],
[2.3, 621, 808, 140, 4],
[2.3, 22060, 63727, 19016, 1283],
[1.9, 11706, 19807, 4506, 113],
[2.7, 114, 47, 5, 1],
[2.4, 7653, 12582, 2778, 69],
[2.4, 743, 632, 88, 2],
[2.1, 3893, 6910, 1400, 28],
[2, 18867, 42918, 9022, 178],
[1.7, 537, 697, 90, 0],
[2, 804, 4118, 970, 40],
[1.8, 4528, 10189, 1993, 37],
[2.8, 119, 45, 8, 1],
[2.3, 1895, 2097, 276, 5],
[2, 24613, 55066, 6799, 90],
[2.7, 557, 580, 79, 2],
[2.3, 755, 2468, 514, 9],
[1.4, 80, 457, 142, 1],
[2.2, 1076, 960, 163, 4],
[2.1, 6695, 13557, 2476, 44],
[2.6, 1273, 949, 140, 6],
[2.7, 180, 149, 28, 1],
[2.2, 1031, 1126, 160, 4],
[1.9, 715, 1480, 381, 9],
[2.1, 4040, 7413, 1390, 24],
[2.3, 397, 359, 48, 0],
[1.8, 28902, 88322, 21971, 311],
[2.2, 2610, 3810, 681, 20],
[2.1, 3112, 17860, 4817, 122],
[1.8, 1059, 2758, 761, 12],
[1.8, 366, 1487, 710, 31],
[1.8, 17020, 42754, 10229, 151],
[1.8, 9748, 28641, 5690, 67],
[2, 1079, 3558, 1037, 29],
[2, 807, 1059, 212, 3],
[2.1, 2866, 3334, 389, 7],
[2.2, 6484, 18913, 6135, 157],
[2.8, 435, 248, 31, 0],
[2.6, 738, 588, 82, 1],
[2.2, 1274, 964, 137, 3],
[1.9, 774, 805, 124, 2],
[2.1, 1520, 4406, 1182, 84],
[2.1, 6827, 20409, 6259, 200],
[1.9, 5556, 30001, 8656, 329],
[2.2, 2804, 2688, 440, 8],
[2.7, 1551, 1481, 167, 3],
[2.5, 301, 202, 21, 1],
[2.6, 1048, 928, 98, 2],
[2.5, 738, 1000, 212, 4],
[2.6, 431, 287, 37, 2],
[1.8, 1477, 3179, 597, 10],
[2, 1552, 6078, 1469, 23],
[2.1, 12329, 38967, 9520, 280],
[2.3, 581, 778, 109, 2],
[2.3, 23170, 66092, 20647, 1509],
[1.9, 12478, 20458, 4789, 146],
[2.8, 118, 45, 13, 0],
[2.4, 7683, 12192, 2632, 63],
[2.4, 690, 536, 67, 2],
[2.1, 3942, 7256, 1360, 25],
[2, 19746, 43385, 9133, 205],
[1.7, 487, 715, 101, 3],
[2, 814, 4215, 1052, 34],
[1.8, 4795, 10168, 2123, 43],
[2.8, 125, 67, 7, 0],
[2.3, 1847, 1926, 244, 4],
[2, 24679, 56823, 7163, 108],
[2.7, 486, 546, 77, 2],
[2.3, 788, 2530, 423, 14],
[1.4, 81, 445, 120, 0],
[2.1, 9153, 24979, 8388, 260],
[2.1, 7040, 13758, 2678, 50],
[2.2, 1103, 934, 154, 2],
[2.6, 1181, 927, 138, 3],
[2.6, 156, 106, 15, 0],
[2.3, 933, 1039, 145, 7],
[1.9, 696, 1453, 365, 13],
[2.2, 4265, 7666, 1495, 27],
[2.4, 413, 370, 32, 2],
[1.9, 26435, 89292, 21939, 306],
[2.3, 2540, 3443, 579, 13],
[2.1, 2869, 18052, 4484, 139],
[1.7, 976, 2784, 728, 25],
[2, 403, 1603, 836, 30],
[1.8, 16387, 42479, 9671, 163],
[1.9, 9449, 28902, 5687, 72],
[2.1, 1120, 3590, 976, 20],
[2, 800, 1053, 188, 3],
[2.1, 2515, 3064, 439, 24],
[2.3, 6523, 19485, 6428, 173],
[2.8, 414, 252, 31, 2],
[2.7, 725, 585, 54, 1],
[2.2, 1123, 962, 109, 2],
[2.1, 695, 753, 109, 1],
[2.2, 1718, 5430, 1576, 144],
[2.2, 6174, 20443, 6320, 182],
[2, 4925, 29244, 8289, 329],
[2.3, 2197, 1680, 175, 4],
[2.8, 1489, 1338, 164, 4],
[2.4, 295, 184, 30, 1],
[2.6, 936, 826, 97, 7],
[2.6, 675, 875, 179, 6],
[2.6, 335, 247, 30, 1],
[1.8, 1559, 3452, 645, 12],
[2.1, 1506, 5963, 1371, 26],
[2.1, 11183, 38429, 9154, 226],
[2.4, 616, 708, 114, 2],
[2.3, 20267, 67272, 21456, 1639],
[1.9, 11475, 21013, 4575, 104],
[2.7, 109, 41, 3, 0],
[2.4, 7312, 11726, 2338, 70],
[2.4, 627, 485, 57, 3],
[2.1, 3983, 6897, 1312, 31],
[2.1, 18952, 43429, 9174, 247],
[1.9, 493, 707, 73, 3],
[2.1, 794, 4372, 1016, 33],
[1.8, 4323, 9361, 1948, 42],
[2.8, 92, 58, 5, 1],
[2.4, 1575, 1844, 249, 8],
[2.1, 24387, 58981, 7507, 94],
[2.7, 493, 512, 86, 7],
[2.4, 824, 2522, 445, 7],
[1.4, 90, 498, 127, 0],
[2.1, 9083, 24925, 7804, 213],
[2.2, 7071, 13986, 2782, 62],
[2.2, 1007, 971, 146, 4],
[2.5, 1129, 790, 125, 2],
[2.8, 109, 73, 11, 0],
[2.3, 1022, 1069, 126, 2],
[2.3, 1022, 1069, 126, 2],
[2.3, 4704, 8114, 1583, 31],
[2.4, 509, 348, 57,
|
0],
[2, 32111, 94493, 22337, 337],
[2.3, 2596, 3347, 507, 7],
[2.3, 3342, 18794, 4543, 145],
[1.8, 1131, 3033, 829, 23],
[2.1, 404, 1702, 870, 24],
[1.9, 17485, 45804, 10011, 179],
[1.9, 10676, 30509, 6237, 77],
[2.2, 1260, 3696, 1009, 27],
[2.1, 887, 1012, 189,
|
8],
[2.2, 2439, 2629, 344, 17],
[2.4, 7147, 19999, 6524, 168],
[3, 423, 201, 37, 0],
[2.8, 699, 495, 75, 2],
[2.2, 1057, 797, 101, 1],
[2.1, 713, 775, 127, 4],
[2.3, 1833, 6753, 2452, 215],
[2.3, 6848, 21354, 6662, 209],
[2.1, 5585, 30307, 8499, 357],
[2.3, 2054, 1393, 185, 5],
[2.9, 1510, 1188, 184, 4],
[2.4, 352, 183, 23, 0],
[2.7, 955, 848, 92, 8],
[2.6, 629, 865, 156, 6],
[2.6, 343, 205, 28, 2],
[1.9, 1759, 3762, 714, 18],
[2.2, 1570, 6077, 1348, 29],
[2.3, 12434, 39524, 8915, 223],
[2.3, 585, 734, 118, 4],
[2.4, 23527, 69233, 22802, 1884],
[2, 13285, 22176, 5035, 104],
[3, 87, 35, 11, 1],
[2.5, 7445, 11108, 2138, 68],
[2.5, 685, 431, 62, 0],
[2.2, 4022, 6802, 1309, 21],
[2.1, 20683, 44131, 9398, 279],
[2.3, 883, 4412, 1014, 29],
[1.8, 5390, 10882, 2104, 55],
[2.8, 107, 63, 9, 0],
[2.4, 1689, 1847, 227, 2],
[2.1, 26279, 64120, 7978, 112],
[2.8, 554, 566, 82, 4],
[2.5, 873, 2442, 462, 6],
[2.2, 10153, 25792, 7837, 194],
[2.3, 7381, 14437, 3060, 81],
[2.2, 1016, 943, 152, 6],
[2.6, 1124, 712, 104, 2
|
buxx/synergine
|
tests/src/event/LonelinessSuicideAction.py
|
Python
|
apache-2.0
| 402 | 0.002488 |
from synergine.synergy.event.Action import Action
from tests.src.event.LonelinessSuicideEvent import LonelinessSuicideEvent
from tests.src.event.TooMuchBeansAction import TooMuchBeansAction
class LonelinessSuicideAction(Action):
_listen = LonelinessSuicideEvent
_depend =
|
[TooMuchBeansAction]
def run(self,
|
obj, context, synergy_manager):
obj.get_collection().remove_object(obj)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.