text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
version = (2, 0, 0)
VERSION = "%d.%d.%d" % version
|
KlausPopp/Moddy
|
src/moddy/version.py
|
Python
|
lgpl-3.0
| 51 | 0 |
#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
|
faskiri/barry2gugl
|
dn.py
|
Python
|
apache-2.0
| 3,915 | 0.02069 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('carts', '0006_auto_20150930_1739'),
('carts', '0005_auto_20151022_2158'),
]
operations = [
]
|
abhijitbangera/ecommerce
|
src/carts/migrations/0007_merge.py
|
Python
|
mit
| 293 | 0 |
"""
A small Sphinx extension that adds Domain objects (eg. Python Classes & Methods) to the TOC Tree.
It dynamically adds them to the already rendered ``app.env.tocs`` dict on the Sphinx environment.
Traditionally this only contains Section's,
we then nest our Domain references inside the already existing Sections.
"""
from docutils import nodes
from sphinx import addnodes
import sphinx.util.logging
LOGGER = sphinx.util.logging.getLogger(__name__)
def _build_toc_node(docname, anchor="anchor", text="test text", bullet=False):
"""
Create the node structure that Sphinx expects for TOC Tree entries.
The ``bullet`` argument wraps it in a ``nodes.bullet_list``,
which is how you nest TOC Tree entries.
"""
reference = nodes.reference(
"",
"",
internal=True,
refuri=docname,
anchorname="#" + anchor,
*[nodes.Text(text, text)]
)
para = addnodes.compact_paragraph("", "", reference)
ret_list = nodes.list_item("", para)
return nodes.bullet_list("", ret_list) if bullet else ret_list
def _traverse_parent(node, objtypes):
"""
Traverse up the node's parents until you hit the ``objtypes`` referenced.
Can either be a single type,
or a tuple of types.
"""
curr_node = node.parent
while curr_node is not None:
if isinstance(curr_node, objtypes):
return curr_node
curr_node = curr_node.parent
return None
def _find_toc_node(toc, ref_id, objtype):
"""
Find the actual TOC node for a ref_id.
Depends on the object type:
* Section - First section (refuri) or 2nd+ level section (anchorname)
* Desc - Just use the anchor name
"""
for check_node in toc.traverse(nodes.reference):
if objtype == nodes.section and (
check_node.attributes["refuri"] == ref_id
or check_node.attributes["anchorname"] == "#" + ref_id
):
return check_node
if (
objtype == addnodes.desc
and check_node.attributes["anchorname"] == "#" + ref_id
):
return check_node
return None
def _get_toc_reference(node, toc, docname):
"""
Logic that understands maps a specific node to it's part of the toctree.
It takes a specific incoming ``node``,
and returns the actual TOC Tree node that is said reference.
"""
if isinstance(node, nodes.section) and isinstance(node.parent, nodes.document):
# Top Level Section header
ref_id = docname
toc_reference = _find_toc_node(toc, ref_id, nodes.section)
elif isinstance(node, nodes.section):
# Nested Section header
ref_id = node.attributes["ids"][0]
toc_reference = _find_toc_node(toc, ref_id, nodes.section)
else:
# Desc node
try:
ref_id = node.children[0].attributes["ids"][0]
toc_reference = _find_toc_node(toc, ref_id, addnodes.desc)
except (KeyError, IndexError):
LOGGER.warning("Invalid desc node", exc_info=True)
toc_reference = None
return toc_reference
def add_domain_to_toctree(app, doctree, docname):
"""
Add domain objects to the toctree dynamically.
This should be attached to the ``doctree-resolved`` event.
This works by:
* Finding each domain node (addnodes.desc)
* Figuring out it's parent that will be in the toctree
(nodes.section, or a previously added addnodes.desc)
* Finding that parent in the TOC Tree based on it's ID
* Taking that element in the TOC Tree,
and finding it's parent that is a TOC Listing (nodes.bullet_list)
* Adding the new TOC element for our specific node as a child of that nodes.bullet_list
* This checks that bullet_list's last child,
and checks that it is also a nodes.bullet_list,
effectively nesting it under that element
"""
toc = app.env.tocs[docname]
for desc_node in doctree.traverse(addnodes.desc):
try:
ref_id = desc_node.children[0].attributes["ids"][0]
except (KeyError, IndexError):
LOGGER.warning("Invalid desc node", exc_info=True)
continue
try:
# Python domain object
ref_text = desc_node[0].attributes["fullname"].split(".")[-1].split("(")[0]
except (KeyError, IndexError):
# TODO[eric]: Support other Domains and ways of accessing this data
# Use `astext` for other types of domain objects
ref_text = desc_node[0].astext().split(".")[-1].split("(")[0]
# This is the actual object that will exist in the TOC Tree
# Sections by default, and other Desc nodes that we've previously placed.
parent_node = _traverse_parent(
node=desc_node, objtypes=(addnodes.desc, nodes.section)
)
if parent_node:
toc_reference = _get_toc_reference(parent_node, toc, docname)
if toc_reference:
# Get the last child of our parent's bullet list, this is where "we" live.
toc_insertion_point = _traverse_parent(
toc_reference, nodes.bullet_list
)[-1]
# Ensure we're added another bullet list so that we nest inside the parent,
# not next to it
if toc_insertion_point and isinstance(
toc_insertion_point[0], nodes.bullet_list
):
new_insert = toc_insertion_point[0]
to_add = _build_toc_node(docname, anchor=ref_id, text=ref_text)
new_insert.append(to_add)
else:
to_add = _build_toc_node(
docname, anchor=ref_id, text=ref_text, bullet=True
)
toc_insertion_point.append(to_add)
|
rtfd/sphinx-autoapi
|
autoapi/toctree.py
|
Python
|
mit
| 5,871 | 0.001533 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
try:
import rbd
except ImportError:
rbd = None
__imagebackend_opts = [
cfg.StrOpt('libvirt_images_type',
default='default',
help='VM Images format. Acceptable values are: raw, qcow2, lvm,'
'rbd, default. If default is specified,'
' then use_cow_images flag is used instead of this one.'),
cfg.StrOpt('libvirt_images_volume_group',
help='LVM Volume Group that is used for VM images, when you'
' specify libvirt_images_type=lvm.'),
cfg.BoolOpt('libvirt_sparse_logical_volumes',
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.IntOpt('libvirt_lvm_snapshot_size',
default=1000,
help='The amount of storage (in megabytes) to allocate for LVM'
' snapshot copy-on-write blocks.'),
cfg.StrOpt('libvirt_images_rbd_pool',
default='rbd',
help='the RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('libvirt_images_rbd_ceph_conf',
default='', # default determined by librados
help='path to the ceph configuration file to use'),
]
CONF = cfg.CONF
CONF.register_opts(__imagebackend_opts)
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
CONF.import_opt('preallocate_images', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
class Image(object):
__metaclass__ = abc.ABCMeta
def __init__(self, source_type, driver_format, is_block_dev=False):
"""Image initialization.
:source_type: block or file
:driver_format: raw or qcow2
:is_block_dev:
"""
self.source_type = source_type
self.driver_format = driver_format
self.is_block_dev = is_block_dev
self.preallocate = False
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
self.lock_path = os.path.join(CONF.instances_path, 'locks')
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
"""Create image from template.
Contains specific behavior for each image type.
:prepare_template: function, that creates template.
Should accept `target` argument.
:base: Template name
:size: Size of created image in bytes
"""
pass
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs, hypervisor_version):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_format = self.driver_format
driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version,
self.is_block_dev)
info.driver_name = driver_name
info.source_path = self.path
tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
'disk_write_bytes_sec', 'disk_write_iops_sec',
'disk_total_bytes_sec', 'disk_total_iops_sec']
# Note(yaguang): Currently, the only tuning available is Block I/O
# throttling for qemu.
if self.source_type in ['file', 'block']:
for key, value in extra_specs.iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in tune_items:
setattr(info, scope[1], value)
return info
def check_image_exists(self):
return os.path.exists(self.path)
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
"""Creates image from template.
Ensures that template and image not already exists.
Ensures that base directory exists.
Synchronizes on template fetching.
:fetch_func: Function that creates the base image
Should accept `target` argument.
:filename: Name of the file in the image directory
:size: Size of created image in bytes (optional)
"""
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def call_if_not_exists(target, *args, **kwargs):
if not os.path.exists(target):
fetch_func(target=target, *args, **kwargs)
elif CONF.libvirt_images_type == "lvm" and \
'ephemeral_size' in kwargs:
fetch_func(target=target, *args, **kwargs)
base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
if not self.check_image_exists() or not os.path.exists(base):
self.create_image(call_if_not_exists, base, size,
*args, **kwargs)
if (size and self.preallocate and self._can_fallocate() and
os.access(self.path, os.W_OK)):
utils.execute('fallocate', '-n', '-l', size, self.path)
def _can_fallocate(self):
"""Check once per class, whether fallocate(1) is available,
and that the instances directory supports fallocate(2).
"""
can_fallocate = getattr(self.__class__, 'can_fallocate', None)
if can_fallocate is None:
_out, err = utils.trycmd('fallocate', '-n', '-l', '1',
self.path + '.fallocate_test')
fileutils.delete_if_exists(self.path + '.fallocate_test')
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
LOG.error('Unable to preallocate_images=%s at path: %s' %
(CONF.preallocate_images, self.path))
return can_fallocate
@staticmethod
def verify_base_size(base, size, base_size=0):
"""Check that the base image is not larger than size.
Since images can't be generally shrunk, enforce this
constraint taking account of virtual image size.
"""
# Note(pbrady): The size and min_disk parameters of a glance
# image are checked against the instance size before the image
# is even downloaded from glance, but currently min_disk is
# adjustable and doesn't currently account for virtual disk size,
# so we need this extra check here.
# NOTE(cfb): Having a flavor that sets the root size to 0 and having
# nova effectively ignore that size and use the size of the
# image is considered a feature at this time, not a bug.
if size is None:
return
if size and not base_size:
base_size = disk.get_disk_size(base)
if size < base_size:
msg = _('%(base)s virtual size %(base_size)s '
'larger than flavor root disk size %(size)s')
LOG.error(msg % {'base': base,
'base_size': base_size,
'size': size})
raise exception.InstanceTypeDiskTooSmall()
def snapshot_create(self):
raise NotImplementedError()
def snapshot_extract(self, target, out_format):
raise NotImplementedError()
def snapshot_delete(self):
raise NotImplementedError()
class Raw(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
self.correct_format()
def correct_format(self):
if os.path.exists(self.path):
data = images.qemu_img_info(self.path)
self.driver_format = data.file_format or 'raw'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_raw_image(base, target, size):
libvirt_utils.copy_image(base, target)
if size:
# class Raw is misnamed, format may not be 'raw' in all cases
use_cow = self.driver_format == 'qcow2'
disk.extend(target, size, use_cow=use_cow)
generating = 'image_id' not in kwargs
if generating:
#Generating image in place
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, max_size=size, *args, **kwargs)
self.verify_base_size(base, size)
if not os.path.exists(self.path):
with fileutils.remove_path_on_error(self.path):
copy_raw_image(base, self.path, size)
self.correct_format()
def snapshot_create(self):
pass
def snapshot_extract(self, target, out_format):
images.convert_image(self.path, target, out_format)
def snapshot_delete(self):
pass
class Qcow2(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_qcow2_image(base, target, size):
# TODO(pbrady): Consider copying the cow image here
# with preallocation=metadata set for performance reasons.
# This would be keyed on a 'preallocate_images' setting.
libvirt_utils.create_cow_image(base, target)
if size:
disk.extend(target, size, use_cow=True)
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
prepare_template(target=base, max_size=size, *args, **kwargs)
else:
self.verify_base_size(base, size)
legacy_backing_size = None
legacy_base = base
# Determine whether an existing qcow2 disk uses a legacy backing by
# actually looking at the image itself and parsing the output of the
# backing file it expects to be using.
if os.path.exists(self.path):
backing_path = libvirt_utils.get_disk_backing_file(self.path)
if backing_path is not None:
backing_file = os.path.basename(backing_path)
backing_parts = backing_file.rpartition('_')
if backing_file != backing_parts[-1] and \
backing_parts[-1].isdigit():
legacy_backing_size = int(backing_parts[-1])
legacy_base += '_%d' % legacy_backing_size
legacy_backing_size *= 1024 * 1024 * 1024
# Create the legacy backing file if necessary.
if legacy_backing_size:
if not os.path.exists(legacy_base):
with fileutils.remove_path_on_error(legacy_base):
libvirt_utils.copy_image(base, legacy_base)
disk.extend(legacy_base, legacy_backing_size, use_cow=True)
if not os.path.exists(self.path):
with fileutils.remove_path_on_error(self.path):
copy_qcow2_image(base, self.path, size)
def snapshot_create(self):
libvirt_utils.create_snapshot(self.path, self.snapshot_name)
def snapshot_extract(self, target, out_format):
libvirt_utils.extract_snapshot(self.path, 'qcow2',
self.snapshot_name, target,
out_format)
def snapshot_delete(self):
libvirt_utils.delete_snapshot(self.path, self.snapshot_name)
class Lvm(Image):
@staticmethod
def escape(filename):
return filename.replace('_', '__')
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Lvm, self).__init__("block", "raw", is_block_dev=True)
if path:
info = libvirt_utils.logical_volume_info(path)
self.vg = info['VG']
self.lv = info['LV']
self.path = path
else:
if not CONF.libvirt_images_volume_group:
raise RuntimeError(_('You should specify'
' libvirt_images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
self.lv = '%s_%s' % (self.escape(instance['name']),
self.escape(disk_name))
self.path = os.path.join('/dev', self.vg, self.lv)
# TODO(pbrady): possibly deprecate libvirt_sparse_logical_volumes
# for the more general preallocate_images
self.sparse = CONF.libvirt_sparse_logical_volumes
self.preallocate = not self.sparse
if snapshot_name:
self.snapshot_name = snapshot_name
self.snapshot_path = os.path.join('/dev', self.vg,
self.snapshot_name)
def _can_fallocate(self):
return False
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def create_lvm_image(base, size):
base_size = disk.get_disk_size(base)
self.verify_base_size(base, size, base_size=base_size)
resize = size > base_size
size = size if resize else base_size
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
images.convert_image(base, self.path, 'raw', run_as_root=True)
if resize:
disk.resize2fs(self.path, run_as_root=True)
generated = 'ephemeral_size' in kwargs
#Generate images with specified size right on volume
if generated and size:
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
with self.remove_volume_on_error(self.path):
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, max_size=size, *args, **kwargs)
with self.remove_volume_on_error(self.path):
create_lvm_image(base, size)
@contextlib.contextmanager
def remove_volume_on_error(self, path):
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
libvirt_utils.remove_logical_volumes(path)
def snapshot_create(self):
size = CONF.libvirt_lvm_snapshot_size
cmd = ('lvcreate', '-L', size, '-s', '--name', self.snapshot_name,
self.path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
def snapshot_extract(self, target, out_format):
images.convert_image(self.snapshot_path, target, out_format,
run_as_root=True)
def snapshot_delete(self):
# NOTE (rmk): Snapshot volumes are automatically zeroed by LVM
cmd = ('lvremove', '-f', self.snapshot_path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
class Rbd(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None, **kwargs):
super(Rbd, self).__init__("block", "rbd", is_block_dev=True)
if path:
try:
self.rbd_name = path.split('/')[1]
except IndexError:
raise exception.InvalidDevicePath(path=path)
else:
self.rbd_name = '%s_%s' % (instance['name'], disk_name)
self.snapshot_name = snapshot_name
if not CONF.libvirt_images_rbd_pool:
raise RuntimeError(_('You should specify'
' libvirt_images_rbd_pool'
' flag to use rbd images.'))
self.pool = CONF.libvirt_images_rbd_pool
self.ceph_conf = CONF.libvirt_images_rbd_ceph_conf
self.rbd = kwargs.get('rbd', rbd)
def _supports_layering(self):
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
def _ceph_args(self):
args = []
args.extend(['--id', CONF.rbd_user])
args.extend(['--conf', self.ceph_conf])
return args
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json'] + self._ceph_args()
out, _ = utils.execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = jsonutils.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
hosts, ports = self._get_mon_addrs()
info.device_type = device_type
info.driver_format = 'raw'
info.driver_cache = cache_mode
info.target_bus = disk_bus
info.target_dev = disk_dev
info.source_type = 'network'
info.source_protocol = 'rbd'
info.source_name = '%s/%s' % (self.pool, self.rbd_name)
info.source_hosts = hosts
info.source_ports = ports
auth_enabled = (CONF.rbd_user is not None)
if CONF.rbd_secret_uuid:
info.auth_secret_uuid = CONF.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.rbd_user:
info.auth_username = CONF.rbd_user
if auth_enabled:
info.auth_secret_type = 'ceph'
info.auth_secret_uuid = CONF.rbd_secret_uuid
return info
def _can_fallocate(self):
return False
def check_image_exists(self):
rbd_volumes = libvirt_utils.list_rbd_volumes(self.pool)
for vol in rbd_volumes:
if vol.startswith(self.rbd_name):
return True
return False
def create_image(self, prepare_template, base, size, *args, **kwargs):
if self.rbd is None:
raise RuntimeError(_('rbd python libraries not found'))
old_format = True
features = 0
if self._supports_layering():
old_format = False
features = self.rbd.RBD_FEATURE_LAYERING
if not os.path.exists(base):
prepare_template(target=base, max_size=size, *args, **kwargs)
else:
self.verify_base_size(base, size)
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['--pool', self.pool, base, self.rbd_name]
if self._supports_layering():
args += ['--new-format']
args += self._ceph_args()
libvirt_utils.import_rbd_image(*args)
def snapshot_create(self):
pass
def snapshot_extract(self, target, out_format):
snap = 'rbd:%s/%s' % (self.pool, self.rbd_name)
images.convert_image(snap, target, out_format)
def snapshot_delete(self):
pass
class Backend(object):
def __init__(self, use_cow):
self.BACKEND = {
'raw': Raw,
'qcow2': Qcow2,
'lvm': Lvm,
'rbd': Rbd,
'default': Qcow2 if use_cow else Raw
}
def backend(self, image_type=None):
if not image_type:
image_type = CONF.libvirt_images_type
image = self.BACKEND.get(image_type)
if not image:
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
def image(self, instance, disk_name, image_type=None):
"""Constructs image for selected backend
:instance: Instance name.
:name: Image name.
:image_type: Image type.
Optional, is CONF.libvirt_images_type by default.
"""
backend = self.backend(image_type)
return backend(instance=instance, disk_name=disk_name)
def snapshot(self, disk_path, snapshot_name, image_type=None):
"""Returns snapshot for given image
:path: path to image
:snapshot_name: snapshot name
:image_type: type of image
"""
backend = self.backend(image_type)
return backend(path=disk_path, snapshot_name=snapshot_name)
|
TieWei/nova
|
nova/virt/libvirt/imagebackend.py
|
Python
|
apache-2.0
| 23,315 | 0.000815 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_huff_darklighter.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","base_npc_theme_park")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_huff_darklighter.py
|
Python
|
mit
| 452 | 0.04646 |
floor = 0
with open('../input.txt', 'r') as fp:
while True:
buffer = fp.read(1024)
if buffer is None or len(buffer) <= 0:
break
for c in buffer:
if c == '(':
floor += 1
elif c == ')':
floor -= 1
print floor
|
tosmun/AdventOfCode
|
solutions/day1/p1/main.py
|
Python
|
apache-2.0
| 233 | 0.042918 |
# repo.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.exc import InvalidGitRepositoryError, NoSuchPathError
from git.cmd import Git
from git.util import Actor
from git.refs import *
from git.index import IndexFile
from git.objects import *
from git.config import GitConfigParser
from git.remote import (
Remote,
digest_process_messages,
finalize_process,
add_progress
)
from git.db import (
GitCmdObjectDB,
GitDB
)
from gitdb.util import (
join,
isfile,
hex_to_bin
)
from fun import (
rev_parse,
is_git_dir,
find_git_dir,
touch
)
import os
import sys
import re
DefaultDBType = GitDB
if sys.version_info[1] < 5: # python 2.4 compatiblity
DefaultDBType = GitCmdObjectDB
# END handle python 2.4
__all__ = ('Repo', )
class Repo(object):
"""Represents a git repository and allows you to query references,
gather commit information, generate diffs, create and clone repositories query
the log.
The following attributes are worth using:
'working_dir' is the working directory of the git command, wich is the working tree
directory if available or the .git directory in case of bare repositories
'working_tree_dir' is the working tree directory, but will raise AssertionError
if we are a bare repository.
'git_dir' is the .git repository directoy, which is always set."""
DAEMON_EXPORT_FILE = 'git-daemon-export-ok'
__slots__ = ( "working_dir", "_working_tree_dir", "git_dir", "_bare", "git", "odb" )
# precompiled regex
re_whitespace = re.compile(r'\s+')
re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$')
re_hexsha_shortened = re.compile('^[0-9A-Fa-f]{4,40}$')
re_author_committer_start = re.compile(r'^(author|committer)')
re_tab_full_line = re.compile(r'^\t(.*)$')
# invariants
# represents the configuration level of a configuration file
config_level = ("system", "global", "repository")
def __init__(self, path=None, odbt = DefaultDBType):
"""Create a new Repo instance
:param path: is the path to either the root git directory or the bare git repo::
repo = Repo("/Users/mtrier/Development/git-python")
repo = Repo("/Users/mtrier/Development/git-python.git")
repo = Repo("~/Development/git-python.git")
repo = Repo("$REPOSITORIES/Development/git-python.git")
:param odbt: Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects. It will
be used to access all object data
:raise InvalidGitRepositoryError:
:raise NoSuchPathError:
:return: git.Repo """
epath = os.path.abspath(os.path.expandvars(os.path.expanduser(path or os.getcwd())))
if not os.path.exists(epath):
raise NoSuchPathError(epath)
self.working_dir = None
self._working_tree_dir = None
self.git_dir = None
curpath = epath
# walk up the path to find the .git dir
while curpath:
if is_git_dir(curpath):
self.git_dir = curpath
self._working_tree_dir = os.path.dirname(curpath)
break
gitpath = find_git_dir(join(curpath, '.git'))
if gitpath is not None:
self.git_dir = gitpath
self._working_tree_dir = curpath
break
curpath, dummy = os.path.split(curpath)
if not dummy:
break
# END while curpath
if self.git_dir is None:
raise InvalidGitRepositoryError(epath)
self._bare = False
try:
self._bare = self.config_reader("repository").getboolean('core','bare')
except Exception:
# lets not assume the option exists, although it should
pass
# adjust the wd in case we are actually bare - we didn't know that
# in the first place
if self._bare:
self._working_tree_dir = None
# END working dir handling
self.working_dir = self._working_tree_dir or self.git_dir
self.git = Git(self.working_dir)
# special handling, in special times
args = [join(self.git_dir, 'objects')]
if issubclass(odbt, GitCmdObjectDB):
args.append(self.git)
self.odb = odbt(*args)
def __eq__(self, rhs):
if isinstance(rhs, Repo):
return self.git_dir == rhs.git_dir
return False
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
return hash(self.git_dir)
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.git_dir)
# Description property
def _get_description(self):
filename = join(self.git_dir, 'description')
return file(filename).read().rstrip()
def _set_description(self, descr):
filename = join(self.git_dir, 'description')
file(filename, 'w').write(descr+'\n')
description = property(_get_description, _set_description,
doc="the project's description")
del _get_description
del _set_description
@property
def working_tree_dir(self):
""":return: The working tree directory of our git repository
:raise AssertionError: If we are a bare repository"""
if self._working_tree_dir is None:
raise AssertionError( "Repository at %r is bare and does not have a working tree directory" % self.git_dir )
return self._working_tree_dir
@property
def bare(self):
""":return: True if the repository is bare"""
return self._bare
@property
def heads(self):
"""A list of ``Head`` objects representing the branch heads in
this repo
:return: ``git.IterableList(Head, ...)``"""
return Head.list_items(self)
@property
def references(self):
"""A list of Reference objects representing tags, heads and remote references.
:return: IterableList(Reference, ...)"""
return Reference.list_items(self)
# alias for references
refs = references
# alias for heads
branches = heads
@property
def index(self):
""":return: IndexFile representing this repository's index."""
return IndexFile(self)
@property
def head(self):
""":return: HEAD Object pointing to the current head reference"""
return HEAD(self,'HEAD')
@property
def remotes(self):
"""A list of Remote objects allowing to access and manipulate remotes
:return: ``git.IterableList(Remote, ...)``"""
return Remote.list_items(self)
def remote(self, name='origin'):
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
return Remote(self, name)
#{ Submodules
@property
def submodules(self):
"""
:return: git.IterableList(Submodule, ...) of direct submodules
available from the current head"""
return Submodule.list_items(self)
def submodule(self, name):
""" :return: Submodule with the given name
:raise ValueError: If no such submodule exists"""
try:
return self.submodules[name]
except IndexError:
raise ValueError("Didn't find submodule named %r" % name)
# END exception handling
def create_submodule(self, *args, **kwargs):
"""Create a new submodule
:note: See the documentation of Submodule.add for a description of the
applicable parameters
:return: created submodules"""
return Submodule.add(self, *args, **kwargs)
def iter_submodules(self, *args, **kwargs):
"""An iterator yielding Submodule instances, see Traversable interface
for a description of args and kwargs
:return: Iterator"""
return RootModule(self).traverse(*args, **kwargs)
def submodule_update(self, *args, **kwargs):
"""Update the submodules, keeping the repository consistent as it will
take the previous state into consideration. For more information, please
see the documentation of RootModule.update"""
return RootModule(self).update(*args, **kwargs)
#}END submodules
@property
def tags(self):
"""A list of ``Tag`` objects that are available in this repo
:return: ``git.IterableList(TagReference, ...)`` """
return TagReference.list_items(self)
def tag(self,path):
""":return: TagReference Object, reference pointing to a Commit or Tag
:param path: path to the tag reference, i.e. 0.1.5 or tags/0.1.5 """
return TagReference(self, path)
def create_head(self, path, commit='HEAD', force=False, logmsg=None ):
"""Create a new head within the repository.
For more documentation, please see the Head.create method.
:return: newly created Head Reference"""
return Head.create(self, path, commit, force, logmsg)
def delete_head(self, *heads, **kwargs):
"""Delete the given heads
:param kwargs: Additional keyword arguments to be passed to git-branch"""
return Head.delete(self, *heads, **kwargs)
def create_tag(self, path, ref='HEAD', message=None, force=False, **kwargs):
"""Create a new tag reference.
For more documentation, please see the TagReference.create method.
:return: TagReference object """
return TagReference.create(self, path, ref, message, force, **kwargs)
def delete_tag(self, *tags):
"""Delete the given tag references"""
return TagReference.delete(self, *tags)
def create_remote(self, name, url, **kwargs):
"""Create a new remote.
For more information, please see the documentation of the Remote.create
methods
:return: Remote reference"""
return Remote.create(self, name, url, **kwargs)
def delete_remote(self, remote):
"""Delete the given remote."""
return Remote.remove(self, remote)
def _get_config_path(self, config_level ):
# we do not support an absolute path of the gitconfig on windows ,
# use the global config instead
if sys.platform == "win32" and config_level == "system":
config_level = "global"
if config_level == "system":
return "/etc/gitconfig"
elif config_level == "global":
return os.path.normpath(os.path.expanduser("~/.gitconfig"))
elif config_level == "repository":
return join(self.git_dir, "config")
raise ValueError( "Invalid configuration level: %r" % config_level )
def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which exact file you whish to read to prevent reading multiple files for
instance
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [ self._get_config_path(f) for f in self.config_level ]
else:
files = [ self._get_config_path(config_level) ]
return GitConfigParser(files, read_only=True)
def config_writer(self, config_level="repository"):
"""
:return:
GitConfigParser allowing to write values of the specified configuration file level.
Config writers should be retrieved, used to change the configuration ,and written
right away as they will lock the configuration file in question and prevent other's
to write it.
:param config_level:
One of the following values
system = sytem wide configuration file
global = user level configuration file
repository = configuration file for this repostory only"""
return GitConfigParser(self._get_config_path(config_level), read_only = False)
def commit(self, rev=None):
"""The Commit object for the specified revision
:param rev: revision specifier, see git-rev-parse for viable options.
:return: ``git.Commit``"""
if rev is None:
return self.head.commit
else:
return self.rev_parse(str(rev)+"^0")
def iter_trees(self, *args, **kwargs):
""":return: Iterator yielding Tree objects
:note: Takes all arguments known to iter_commits method"""
return ( c.tree for c in self.iter_commits(*args, **kwargs) )
def tree(self, rev=None):
"""The Tree object for the given treeish revision
Examples::
repo.tree(repo.heads[0])
:param rev: is a revision pointing to a Treeish ( being a commit or tree )
:return: ``git.Tree``
:note:
If you need a non-root level tree, find it by iterating the root tree. Otherwise
it cannot know about its path relative to the repository root and subsequent
operations might have unexpected results."""
if rev is None:
return self.head.commit.tree
else:
return self.rev_parse(str(rev)+"^{tree}")
def iter_commits(self, rev=None, paths='', **kwargs):
"""A list of Commit objects representing the history of a given ref/commit
:parm rev:
revision specifier, see git-rev-parse for viable options.
If None, the active branch will be used.
:parm paths:
is an optional path or a list of paths to limit the returned commits to
Commits that do not contain that path or the paths will not be returned.
:parm kwargs:
Arguments to be passed to git-rev-list - common ones are
max_count and skip
:note: to receive only commits between two named revisions, use the
"revA..revB" revision specifier
:return ``git.Commit[]``"""
if rev is None:
rev = self.head.commit
return Commit.iter_items(self, rev, paths, **kwargs)
def _get_daemon_export(self):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
return os.path.exists(filename)
def _set_daemon_export(self, value):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
fileexists = os.path.exists(filename)
if value and not fileexists:
touch(filename)
elif not value and fileexists:
os.unlink(filename)
daemon_export = property(_get_daemon_export, _set_daemon_export,
doc="If True, git-daemon may export this repository")
del _get_daemon_export
del _set_daemon_export
def _get_alternates(self):
"""The list of alternates for this repo from which objects can be retrieved
:return: list of strings being pathnames of alternates"""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if os.path.exists(alternates_path):
try:
f = open(alternates_path)
alts = f.read()
finally:
f.close()
return alts.strip().splitlines()
else:
return list()
def _set_alternates(self, alts):
"""Sets the alternates
:parm alts:
is the array of string paths representing the alternates at which
git should look for objects, i.e. /home/user/repo/.git/objects
:raise NoSuchPathError:
:note:
The method does not check for the existance of the paths in alts
as the caller is responsible."""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if not alts:
if isfile(alternates_path):
os.remove(alternates_path)
else:
try:
f = open(alternates_path, 'w')
f.write("\n".join(alts))
finally:
f.close()
# END file handling
# END alts handling
alternates = property(_get_alternates, _set_alternates, doc="Retrieve a list of alternates paths or set a list paths to be used as alternates")
def is_dirty(self, index=True, working_tree=True, untracked_files=False):
"""
:return:
``True``, the repository is considered dirty. By default it will react
like a git-status without untracked files, hence it is dirty if the
index or the working copy have changes."""
if self._bare:
# Bare repositories with no associated working directory are
# always consired to be clean.
return False
# start from the one which is fastest to evaluate
default_args = ('--abbrev=40', '--full-index', '--raw')
if index:
# diff index against HEAD
if isfile(self.index.path) and self.head.is_valid() and \
len(self.git.diff('HEAD', '--cached', *default_args)):
return True
# END index handling
if working_tree:
# diff index against working tree
if len(self.git.diff(*default_args)):
return True
# END working tree handling
if untracked_files:
if len(self.untracked_files):
return True
# END untracked files
return False
@property
def untracked_files(self):
"""
:return:
list(str,...)
Files currently untracked as they have not been staged yet. Paths
are relative to the current working directory of the git command.
:note:
ignored files will not appear here, i.e. files mentioned in .gitignore"""
# make sure we get all files, no only untracked directores
proc = self.git.status(porcelain=True,
untracked_files=True,
as_process=True)
# Untracked files preffix in porcelain mode
prefix = "?? "
untracked_files = list()
for line in proc.stdout:
if not line.startswith(prefix):
continue
filename = line[len(prefix):].rstrip('\n')
# Special characters are escaped
if filename[0] == filename[-1] == '"':
filename = filename[1:-1].decode('string_escape')
untracked_files.append(filename)
return untracked_files
@property
def active_branch(self):
"""The name of the currently active branch.
:return: Head to the active branch"""
return self.head.reference
def blame(self, rev, file):
"""The blame information for the given file at the given revision.
:parm rev: revision specifier, see git-rev-parse for viable options.
:return:
list: [git.Commit, list: [<line>]]
A list of tuples associating a Commit object with a list of lines that
changed within the given commit. The Commit objects will be given in order
of appearance."""
data = self.git.blame(rev, '--', file, p=True)
commits = dict()
blames = list()
info = None
for line in data.splitlines(False):
parts = self.re_whitespace.split(line, 1)
firstpart = parts[0]
if self.re_hexsha_only.search(firstpart):
# handles
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates another line of blame with the same data
digits = parts[-1].split(" ")
if len(digits) == 3:
info = {'id': firstpart}
blames.append([None, []])
elif info['id'] != firstpart:
info = {'id': firstpart}
blames.append([commits.get(firstpart), []])
# END blame data initialization
else:
m = self.re_author_committer_start.search(firstpart)
if m:
# handles:
# author Tom Preston-Werner
# author-mail <tom@mojombo.com>
# author-time 1192271832
# author-tz -0700
# committer Tom Preston-Werner
# committer-mail <tom@mojombo.com>
# committer-time 1192271832
# committer-tz -0700 - IGNORED BY US
role = m.group(0)
if firstpart.endswith('-mail'):
info["%s_email" % role] = parts[-1]
elif firstpart.endswith('-time'):
info["%s_date" % role] = int(parts[-1])
elif role == firstpart:
info[role] = parts[-1]
# END distinguish mail,time,name
else:
# handle
# filename lib/grit.rb
# summary add Blob
# <and rest>
if firstpart.startswith('filename'):
info['filename'] = parts[-1]
elif firstpart.startswith('summary'):
info['summary'] = parts[-1]
elif firstpart == '':
if info:
sha = info['id']
c = commits.get(sha)
if c is None:
c = Commit( self, hex_to_bin(sha),
author=Actor._from_string(info['author'] + ' ' + info['author_email']),
authored_date=info['author_date'],
committer=Actor._from_string(info['committer'] + ' ' + info['committer_email']),
committed_date=info['committer_date'],
message=info['summary'])
commits[sha] = c
# END if commit objects needs initial creation
m = self.re_tab_full_line.search(line)
text, = m.groups()
blames[-1][0] = c
blames[-1][1].append( text )
info = {'id': sha}
# END if we collected commit info
# END distinguish filename,summary,rest
# END distinguish author|committer vs filename,summary,rest
# END distinguish hexsha vs other information
return blames
@classmethod
def init(cls, path=None, mkdir=True, **kwargs):
"""Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:parm mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:parm kwargs:
keyword arguments serving as additional options to the git-init command
:return: ``git.Repo`` (the newly created repo)"""
if mkdir and path and not os.path.exists(path):
os.makedirs(path, 0755)
# git command automatically chdir into the directory
git = Git(path)
output = git.init(**kwargs)
return Repo(path)
@classmethod
def _clone(cls, git, url, path, odb_default_type, progress, **kwargs):
# special handling for windows for path at which the clone should be
# created.
# tilde '~' will be expanded to the HOME no matter where the ~ occours. Hence
# we at least give a proper error instead of letting git fail
prev_cwd = None
prev_path = None
odbt = kwargs.pop('odbt', odb_default_type)
if os.name == 'nt':
if '~' in path:
raise OSError("Git cannot handle the ~ character in path %r correctly" % path)
# on windows, git will think paths like c: are relative and prepend the
# current working dir ( before it fails ). We temporarily adjust the working
# dir to make this actually work
match = re.match("(\w:[/\\\])(.*)", path)
if match:
prev_cwd = os.getcwd()
prev_path = path
drive, rest_of_path = match.groups()
os.chdir(drive)
path = rest_of_path
kwargs['with_keep_cwd'] = True
# END cwd preparation
# END windows handling
try:
proc = git.clone(url, path, with_extended_output=True, as_process=True, v=True, **add_progress(kwargs, git, progress))
if progress:
digest_process_messages(proc.stderr, progress)
#END handle progress
finalize_process(proc)
finally:
if prev_cwd is not None:
os.chdir(prev_cwd)
path = prev_path
# END reset previous working dir
# END bad windows handling
# our git command could have a different working dir than our actual
# environment, hence we prepend its working dir if required
if not os.path.isabs(path) and git.working_dir:
path = join(git._working_dir, path)
# adjust remotes - there may be operating systems which use backslashes,
# These might be given as initial paths, but when handling the config file
# that contains the remote from which we were clones, git stops liking it
# as it will escape the backslashes. Hence we undo the escaping just to be
# sure
repo = cls(os.path.abspath(path), odbt = odbt)
if repo.remotes:
repo.remotes[0].config_writer.set_value('url', repo.remotes[0].url.replace("\\\\", "\\").replace("\\", "/"))
# END handle remote repo
return repo
def clone(self, path, progress=None, **kwargs):
"""Create a clone from this repository.
:param path:
is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress: See 'git.remote.Remote.push'.
:param kwargs:
odbt = ObjectDatabase Type, allowing to determine the object database
implementation used by the returned Repo instance
All remaining keyword arguments are given to the git-clone command
:return: ``git.Repo`` (the newly cloned repo)"""
return self._clone(self.git, self.git_dir, path, type(self.odb), progress, **kwargs)
@classmethod
def clone_from(cls, url, to_path, progress=None, **kwargs):
"""Create a clone from the given URL
:param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
:param to_path: Path to which the repository should be cloned to
:param progress: See 'git.remote.Remote.push'.
:param kwargs: see the ``clone`` method
:return: Repo instance pointing to the cloned directory"""
return cls._clone(Git(os.getcwd()), url, to_path, GitCmdObjectDB, progress, **kwargs)
def archive(self, ostream, treeish=None, prefix=None, **kwargs):
"""Archive the tree at the given revision.
:parm ostream: file compatible stream object to which the archive will be written
:parm treeish: is the treeish name/id, defaults to active branch
:parm prefix: is the optional prefix to prepend to each filename in the archive
:parm kwargs:
Additional arguments passed to git-archive
NOTE: Use the 'format' argument to define the kind of format. Use
specialized ostreams to write any format supported by python
:raise GitCommandError: in case something went wrong
:return: self"""
if treeish is None:
treeish = self.head.commit
if prefix and 'prefix' not in kwargs:
kwargs['prefix'] = prefix
kwargs['output_stream'] = ostream
self.git.archive(treeish, **kwargs)
return self
rev_parse = rev_parse
def __repr__(self):
return '<git.Repo "%s">' % self.git_dir
|
dbaxa/GitPython
|
git/repo/base.py
|
Python
|
bsd-3-clause
| 30,180 | 0.00666 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .health_evaluation import HealthEvaluation
class ApplicationsHealthEvaluation(HealthEvaluation):
"""Represents health evaluation for applications, containing health
evaluations for each unhealthy application that impacted current aggregated
health state.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str or :class:`enum
<azure.servicefabric.models.enum>`
:param description: Description of the health evaluation, which represents
a summary of the evaluation process.
:type description: str
:param kind: Polymorphic Discriminator
:type kind: str
:param max_percent_unhealthy_applications: Maximum allowed percentage of
unhealthy applications from the ClusterHealthPolicy.
:type max_percent_unhealthy_applications: int
:param total_count: Total number of applications from the health store.
:type total_count: long
:param unhealthy_evaluations:
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'kind': {'key': 'Kind', 'type': 'str'},
'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'},
'total_count': {'key': 'TotalCount', 'type': 'long'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
}
def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_applications=None, total_count=None, unhealthy_evaluations=None):
super(ApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description)
self.max_percent_unhealthy_applications = max_percent_unhealthy_applications
self.total_count = total_count
self.unhealthy_evaluations = unhealthy_evaluations
self.kind = 'Applications'
|
AutorestCI/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/applications_health_evaluation.py
|
Python
|
mit
| 2,699 | 0.002223 |
#!/usr/bin/env python
## \file adjoint.py
# \brief python package for running adjoint problems
# \author T. Lukaczyk, F. Palacios
# \version 5.0.0 "Raven"
#
# SU2 Original Developers: Dr. Francisco D. Palacios.
# Dr. Thomas D. Economon.
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
# Prof. Edwin van der Weide's group at the University of Twente.
# Prof. Vincent Terrapon's group at the University of Liege.
#
# Copyright (C) 2012-2017 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
import os, sys, shutil, copy
from .. import io as su2io
from .. import mesh as su2mesh
def adaptation ( config , kind='' ):
# local copy
konfig = copy.deepcopy(config)
# check kind
if kind: konfig['KIND_ADAPT'] = kind
kind = konfig.get('KIND_ADAPT','NONE')
if kind == 'NONE':
return {}
# check adapted?
# get adaptation function
adapt_function = su2mesh.adapt.name_map[kind]
# setup problem
suffix = 'adapt'
meshname_orig = konfig['MESH_FILENAME']
meshname_new = su2io.add_suffix( konfig['MESH_FILENAME'], suffix )
konfig['MESH_OUT_FILENAME'] = meshname_new
# Run Adaptation
info = adapt_function(konfig)
# update super config
config['MESH_FILENAME'] = meshname_new
config['KIND_ADAPT'] = kind
# files out
files = { 'MESH' : meshname_new }
# info out
append_nestdict( info, { 'FILES' : files } )
return info
|
pawhewitt/Dev
|
SU2_PY/SU2/run/adaptation.py
|
Python
|
lgpl-2.1
| 2,501 | 0.015194 |
#! /usr/bin/env python
import sys
from ete2 import Tree
import random
def get_json(node):
# Read ETE tag for duplication or speciation events
if not hasattr(node, 'evoltype'):
dup = random.sample(['N', 'Y'], 1)[0]
elif node.evoltype == "S":
dup = "N"
elif node.evoltype == "D":
dup = "Y"
node.name = node.name.replace("'", '')
json = {"name": node.name,
# "display_label": node.name,
# "duplication": dup,
# "branch_length": str(node.dist),
# "common_name": node.name,
# "seq_length": 0,
"type": "node" if node.children else "leaf",
# "uniprot_name": "Unknown",
}
if node.children:
json["children"] = []
for ch in node.children:
json["children"].append(get_json(ch))
return json
if __name__ == '__main__':
if len(sys.argv) > 1:
t = Tree(sys.argv[1])
else:
# create a random example tree
t = Tree()
t.populate(100, random_branches=True)
# TreeWidget seems to fail with simple quotes
print str(get_json(t)).replace("'", '"')
|
Phelimb/atlas
|
scripts/newick2json.py
|
Python
|
mit
| 1,160 | 0 |
""" Python Character Mapping Codec iso8859_13 generated from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u201d' # 0xA1 -> RIGHT DOUBLE QUOTATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u201c' # 0xB4 -> LEFT DOUBLE QUOTATION MARK
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xe6' # 0xBF -> LATIN SMALL LETTER AE
'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
'\u2019' # 0xFF -> RIGHT SINGLE QUOTATION MARK
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Hasimir/brython
|
www/src/Lib/encodings/iso8859_13.py
|
Python
|
bsd-3-clause
| 13,578 | 0.020916 |
from dependencies.dependency import ModuleSecurityInfo#, #allow_module
from dependencies.dependency import DateTime
from dependencies.dependency import DisplayList
from dependencies.dependency import getToolByName
from dependencies.dependency import TranslationServiceTool
from lims.browser import BrowserView
from lims import bikaMessageFactory as _
from lims.utils import t
from lims import interfaces
from lims import logger
from dependencies.dependency import IFileNameNormalizer
from dependencies.dependency import IIDNormalizer
from dependencies.dependency import getUtility
from dependencies.dependency import providedBy
import copy,re,urllib
from dependencies import transaction
class IDServerUnavailable(Exception):
pass
def idserver_generate_id(context, prefix, batch_size = None):
""" Generate a new id using external ID server.
"""
plone = context.portal_url.getPortalObject()
url = context.bika_setup.getIDServerURL()
try:
if batch_size:
# GET
f = urllib.urlopen('%s/%s/%s?%s' % (
url,
plone.getId(),
prefix,
urllib.urlencode({'batch_size': batch_size}))
)
else:
f = urllib.urlopen('%s/%s/%s'%(url, plone.getId(), prefix))
new_id = f.read()
f.close()
except:
from sys import exc_info
info = exc_info()
import zLOG; zLOG.LOG('INFO', 0, '', 'generate_id raised exception: %s, %s \n ID server URL: %s' % (info[0], info[1], url))
raise IDServerUnavailable(_('ID Server unavailable'))
return new_id
def generateUniqueId(context):
""" Generate pretty content IDs.
- context is used to find portal_type; in case there is no
prefix specified for the type, the normalized portal_type is
used as a prefix instead.
"""
fn_normalize = getUtility(IFileNameNormalizer).normalize
id_normalize = getUtility(IIDNormalizer).normalize
prefixes = context.bika_setup.getPrefixes()
year = context.bika_setup.getYearInPrefix() and \
DateTime().strftime("%Y")[2:] or ''
separator = '-'
for e in prefixes:
if 'separator' not in e:
e['separator'] = ''
if e['portal_type'] == context.portal_type:
separator = e['separator']
# Analysis Request IDs
if context.portal_type == "AnalysisRequest":
sample = context.getSample()
s_prefix = fn_normalize(sample.getSampleType().getPrefix())
sample_padding = context.bika_setup.getSampleIDPadding()
ar_padding = context.bika_setup.getARIDPadding()
sample_id = sample.getId()
sample_number = sample_id.split(s_prefix)[1]
ar_number = sample.getLastARNumber()
ar_number = ar_number and ar_number + 1 or 1
return fn_normalize(
("%s%s" + separator + "R%s") % (s_prefix,
str(sample_number).zfill(sample_padding),
str(ar_number).zfill(ar_padding))
)
# Sample Partition IDs
if context.portal_type == "SamplePartition":
# We do not use prefixes. There are actually codes that require the 'P'.
# matches = [p for p in prefixes if p['portal_type'] == 'SamplePartition']
# prefix = matches and matches[0]['prefix'] or 'samplepartition'
# padding = int(matches and matches[0]['padding'] or '0')
# at this time the part exists, so +1 would be 1 too many
partnr = str(len(context.aq_parent.objectValues('SamplePartition')))
# parent id is normalized already
return ("%s" + separator + "P%s") % (context.aq_parent.id, partnr)
if context.bika_setup.getExternalIDServer():
# if using external server
for d in prefixes:
# Sample ID comes from SampleType
if context.portal_type == "Sample":
prefix = context.getSampleType().getPrefix()
padding = context.bika_setup.getSampleIDPadding()
new_id = str(idserver_generate_id(context, "%s%s-" % (prefix, year)))
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
elif d['portal_type'] == context.portal_type:
prefix = d['prefix']
padding = d['padding']
new_id = str(idserver_generate_id(context, "%s%s-" % (prefix, year)))
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
# no prefix; use portal_type
# year is not inserted here
# portal_type is be normalized to lowercase
npt = id_normalize(context.portal_type)
new_id = str(idserver_generate_id(context, npt + "-"))
return ('%s' + separator + '%s') % (npt, new_id)
else:
# No external id-server.
def next_id(prefix):
# normalize before anything
prefix = fn_normalize(prefix)
plone = context.portal_url.getPortalObject()
# grab the first catalog we are indexed in.
at = getToolByName(plone, 'archetype_tool')
if context.portal_type in at.catalog_map:
catalog_name = at.catalog_map[context.portal_type][0]
else:
catalog_name = 'portal_catalog'
catalog = getToolByName(plone, catalog_name)
# get all IDS that start with prefix
# this must specifically exclude AR IDs (two -'s)
rr = re.compile("^"+prefix+separator+"[\d+]+$")
ids = [int(i.split(prefix+separator)[1]) \
for i in catalog.Indexes['id'].uniqueValues() \
if rr.match(i)]
#plone_tool = getToolByName(context, 'plone_utils')
#if not plone_tool.isIDAutoGenerated(l.id):
ids.sort()
_id = ids and ids[-1] or 0
new_id = _id + 1
return str(new_id)
for d in prefixes:
if context.portal_type == "Sample":
# Special case for Sample IDs
prefix = fn_normalize(context.getSampleType().getPrefix())
padding = context.bika_setup.getSampleIDPadding()
sequence_start = context.bika_setup.getSampleIDSequenceStart()
new_id = next_id(prefix+year)
# If sequence_start is greater than new_id. Set
# sequence_start as new_id. (Jira LIMS-280)
if sequence_start > int(new_id):
new_id = str(sequence_start)
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
elif d['portal_type'] == context.portal_type:
prefix = d['prefix']
padding = d['padding']
sequence_start = d.get("sequence_start", None)
new_id = next_id(prefix+year)
# Jira-tracker LIMS-280
if sequence_start and int(sequence_start) > int(new_id):
new_id = str(sequence_start)
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
# no prefix; use portal_type
# no year inserted here
# use "IID" normalizer, because we want portal_type to be lowercased.
prefix = id_normalize(context.portal_type);
new_id = next_id(prefix)
return ('%s' + separator + '%s') % (prefix, new_id)
def renameAfterCreation(obj):
# Can't rename without a subtransaction commit when using portal_factory
transaction.savepoint(optimistic=True)
# The id returned should be normalized already
new_id = generateUniqueId(obj)
obj.aq_inner.aq_parent.manage_renameObject(obj.id, new_id)
return new_id
|
yasir1brahim/OLiMS
|
lims/idserver.py
|
Python
|
agpl-3.0
| 8,055 | 0.003228 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-06-23 09:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gwasdb', '0006_auto_20170623_0933'),
]
operations = [
migrations.AddField(
model_name='study',
name='n_hits_thr',
field=models.IntegerField(blank=True, null=True),
),
]
|
1001genomes/AraGWAS
|
aragwas_server/gwasdb/migrations/0007_study_n_hits_thr.py
|
Python
|
mit
| 462 | 0 |
import codecs
import os
from setuptools import setup, find_packages
def read(filename):
filepath = os.path.join(os.path.dirname(__file__), filename)
return codecs.open(filepath, encoding='utf-8').read()
setup(
name='lemon-filebrowser',
version='0.1.2',
license='ISC',
description="Fork of Patrick Kranzlmueller's django-filebrowser app.",
url='https://github.com/trilan/lemon-filebrowser',
author='Trilan Team',
author_email='dev@lemon.io',
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
)
|
trilan/lemon-filebrowser
|
setup.py
|
Python
|
bsd-3-clause
| 998 | 0 |
from astrodata.ReductionObjects import PrimitiveSet
class OBSERVEDPrimitives(PrimitiveSet):
astrotype = "OBSERVED"
def init(self, rc):
print "OBSERVEDPrimitives.init(rc)"
return
def typeSpecificPrimitive(self, rc):
print "OBSERVEDPrimitives::typeSpecificPrimitive()"
def mark(self, rc):
for ad in rc.get_inputs_as_astrodata():
if ad.is_type("MARKED"):
print "OBSERVEDPrimitives::mark(%s) already marked" % ad.filename
else:
ad.phu_set_key_value("S_MARKED", "TRUE")
rc.report_output(ad)
yield rc
def unmark(self, rc):
for ad in rc.get_inputs_as_astrodata():
if ad.is_type("UNMARKED"):
print "OBSERVEDPrimitives::unmark(%s) not marked" % ad.filename
else:
ad.phu_set_key_value("S_MARKED", None)
rc.report_output(ad)
yield rc
|
pyrrho314/recipesystem
|
trunk/astrodata/samples/astrodata_Sample/RECIPES_Sample/primitives/primitives_OBSERVED.py
|
Python
|
mpl-2.0
| 968 | 0.007231 |
import clr
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
elements = UnwrapElement(tolist(IN[0]))
out1 = []
cutters = []
cutU = InstanceVoidCutUtils
for i in xrange(len(elements)):
try:
if cutU.CanBeCutWithVoid(elements[i]):
cut1 = cutU.GetCuttingVoidInstances(elements[i])
if cut1.Count == 0:
out1.append(False)
cutters.append([])
else:
out1.append(True)
cut1 = [doc.GetElement(id).ToDSType(True) for id in cut1]
cutters.append(cut1)
else:
out1.append(False)
cutters.append([])
except:
out1.append(False)
cutters.append([])
OUT = out1, cutters
|
dimven/SpringNodes
|
py/Element.IsCut.py
|
Python
|
mit
| 932 | 0.032189 |
#!/usr/bin/python
# Copyright (c) 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
from jinja2.loaders import TemplateNotFound
from jinja2.utils import open_if_exists
import os
def get_dist_templates_path():
return os.path.join(os.path.dirname(__file__), 'dist-templates')
class RenderspecLoader(jinja2.BaseLoader):
"""A special template loader which allows rendering supplied .spec template
with distro specific blocks maintained as part of renderspec.
'.spec' returns the spec template (which you need to supply during init)
while other strings map to corresponding child templates included
in renderspec which simply extend the '.spec' template.
"""
base_ref = '.spec'
template_postfix = '.spec.j2'
def __init__(self, template_fn, encoding='utf-8'):
self.base_fn = template_fn
self.encoding = encoding
self.disttemp_path = get_dist_templates_path()
def get_source(self, environment, template):
if template == self.base_ref:
fn = self.base_fn
else:
fn = os.path.join(self.disttemp_path,
template + self.template_postfix)
f = open_if_exists(fn)
if not f:
return TemplateNotFound(template)
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = os.path.getmtime(self.base_fn)
def uptodate():
try:
return os.path.getmtime(self.base_fn) == mtime
except OSError:
return False
return contents, fn, uptodate
def list_templates(self):
found = set([self.base_ref])
walk_dir = os.walk(self.disttemp_path)
for _, _, filenames in walk_dir:
for fn in filenames:
if fn.endswith(self.template_postfix):
template = fn[:-len(self.template_postfix)]
found.add(template)
return sorted(found)
|
openstack/renderspec
|
renderspec/distloader.py
|
Python
|
apache-2.0
| 2,522 | 0 |
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle.encode('utf-8'),
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle.encode('utf-8'),
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == u'!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == u'!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == u'!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), u'',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), u'', mark, mark)
|
croxis/SpaceDrive
|
spacedrive/renderpipeline/rplibs/yaml/yaml_py2/parser.py
|
Python
|
mit
| 26,131 | 0.002334 |
"""Scans and sanitzes HAR files containing sensitive information."""
# Copyright 2017, Google Inc.
# Authors: Garrett Anderson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import json
import urllib2
from flask import Flask, url_for, request, Response, render_template_string
import decorators
from harsanitizer import Har, HarSanitizer
# Config local/remote file locations
CURRENT_DIR = os.path.abspath("./")
# Load/sanity check config.json
try:
with open("./config.json", "r") as config:
STATIC_FOLDER = json.load(config)["static_folder"]
except IOError:
raise IOError(
"'config.json' not found in '{}'. Please ensure that script is "
"being run from root './har-sanitizer/' directory.".format(CURRENT_DIR))
except KeyError:
raise KeyError("'STATIC_FOLDER' key not found in config.json")
WORDLIST_PATH = "{}/wordlist.json".format(STATIC_FOLDER)
MIMETYPES_PATH = "{}/mimetypesScrubList.json".format(STATIC_FOLDER)
# Local STATIC_FOLDER and template config
if STATIC_FOLDER[:4] != "http":
INDEX_PATH = "{}/templates/localhost/index.html".format(STATIC_FOLDER)
# Remote STATIC_FOLDER and template config
else:
INDEX_PATH = "{}/templates/remotehost/index.html".format(STATIC_FOLDER)
# Serialize utility
def json_serial(obj):
"""JSON serializer for datetime.datetime not serializable by default json code."""
if isinstance(obj, datetime.datetime):
serial = obj.isoformat()
return serial
raise TypeError("Object not of type datetime.datetime")
app = Flask(__name__)
@app.route("/")
def index():
if STATIC_FOLDER[:4] == "http":
index_html_str = urllib2.urlopen(INDEX_PATH).read()
else:
with open(INDEX_PATH, "r") as index_file:
index_html_str = index_file.read()
return render_template_string(index_html_str, static_files=STATIC_FOLDER)
@app.route("/get_wordlist", methods=["GET"])
def get_wordlist():
"""Returns default HarSanitizer wordlist."""
hs = HarSanitizer()
try:
if WORDLIST_PATH[:4] == "http":
wordlist_json = json.loads(urllib2.urlopen(WORDLIST_PATH).read())
wordlist = hs.load_wordlist(wordlist=wordlist_json)
else:
wordlist = hs.load_wordlist(wordlist_path=WORDLIST_PATH)
except Exception:
message = {"message": "Error: {} not found.".format(WORDLIST_PATH)}
data = json.dumps(message, default=json_serial)
return Response(data, 500, mimetype="application/json")
data = json.dumps(wordlist, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/default_mimetype_scrublist", methods=["GET"])
def get_mimetype_scrublist():
"""Returns default HarSanitizer mimeTypes scrub list."""
hs = HarSanitizer()
try:
if MIMETYPES_PATH[:4] == "http":
mimetype_scrub_list = json.loads(urllib2.urlopen(MIMETYPES_PATH).read())
else:
with open(MIMETYPES_PATH, "r") as mimetypes_file:
mimetype_scrub_list = json.load(mimetypes_file)
except Exception:
message = {"message": "Error: {} not found.".format(MIMETYPES_PATH)}
data = json.dumps(message, default=json_serial)
return Response(data, 500, mimetype="application/json")
data = json.dumps(mimetype_scrub_list, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/cookies", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_cookie_names():
"""Returns all cookie names found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
har = Har(har=data)
cookies = hs.get_hartype_names(har, "cookies").keys()
data = json.dumps(cookies, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/headers", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_header_names():
"""Returns all header names found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
har = Har(har=data)
headers = hs.get_hartype_names(har, "headers").keys()
data = json.dumps(headers, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/params", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_urlparams():
"""Returns all URL Query and POSTData Parameter names found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
cond_table = {}
har = Har(har=data)
url_pattern = hs.gen_hartype_names_pattern(har, "queryString")
postdata_pattern = hs.gen_hartype_names_pattern(har, "params")
cond_table.update(url_pattern)
cond_table.update(postdata_pattern)
iter_har_dict = hs.iter_eval_exec(my_iter=har.har_dict, cond_table=cond_table)
har = hs.har
urlparams = har.category["queryString"].keys()
if isinstance(har.category["params"].keys(), list):
postdata_params = har.category["params"].keys()
params = urlparams + postdata_params
else:
params = urlparams
data = json.dumps(params, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/mimetypes", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_mimetypes():
"""Returns all content mimeTypes found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
har = Har(har=data)
mimetypes = hs.get_mimetypes(har).keys()
data = json.dumps(mimetypes, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/scrub_har", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def scrub():
"""Scrubs data["har"] with optional wordlists,
content types, and scrub_all type bools.
"""
hs = HarSanitizer()
hs_kwargs = {}
data = request.json
har = Har(har=data["har"])
if "wordlist" in data.keys():
hs_kwargs["wordlist"] = data["wordlist"]
if "content_list" in data.keys():
hs_kwargs["content_list"] = data["content_list"]
if "all_cookies" in data.keys():
hs_kwargs["all_cookies"] = data["all_cookies"]
if "all_headers" in data.keys():
hs_kwargs["all_headers"] = data["all_headers"]
if "all_params" in data.keys():
hs_kwargs["all_params"] = data["all_params"]
if "all_content_mimetypes" in data.keys():
hs_kwargs["all_content_mimetypes"] = data["all_content_mimetypes"]
sanitized_har = hs.scrub(har, **hs_kwargs)
data = json.dumps(sanitized_har.har_dict, indent=2, separators=(",", ": "))
return Response(data, 200, mimetype="text/plain")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=False)
|
google/har-sanitizer
|
harsanitizer/harsan_api.py
|
Python
|
apache-2.0
| 7,146 | 0.012175 |
import unittest
import re
import requests
from .timeschedules_parser import FSM
class TestTimeschedulesParser(unittest.TestCase):
def _construct_fsm(self, string):
fsm = FSM([])
def stub(*args, **kwargs):
return string
fsm.next_string = stub
return fsm
def test_next_schedule(self):
self.assertEqual([], self._construct_fsm("ARRARR").next_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [7950, 8000]],
self._construct_fsm("MWF12:30PM-1:20PM").next_schedule())
self.assertEqual(
[[3510, 3590], [6390, 6470]],
self._construct_fsm("TTh10:30AM-11:50AM").next_schedule())
self.assertEqual(
[[3510, 3590]],
self._construct_fsm("Tue10:30AM-11:50AM").next_schedule())
self.assertEqual(
[[3510, 3590]],
self._construct_fsm("U10:30AM-11:50AM").next_schedule())
self.assertEqual(
[[1890, 2360], [3330, 3800], [4770, 5240], [6210, 6680],
[7650, 8120]],
self._construct_fsm("M-F7:30AM-3:20PM").next_schedule())
self.assertEqual(
[[9420, 9600]],
self._construct_fsm("Sat1:00PM-4:00PM").next_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [6510, 6560], [7950, 8000]],
self._construct_fsm("MWHF12:30PM-1:20PM").next_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [6510, 6560], [7950, 8000]],
self._construct_fsm("MWRF12:30PM-1:20PM").next_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [6510, 6560], [7950, 8000]],
self._construct_fsm("MWTHF12:30PM-1:20PM").next_schedule())
self.assertEqual(
[[6660, 6830]],
self._construct_fsm("Thu3:00PM-5:50PM").next_schedule())
if __name__ == '__main__':
unittest.main()
|
kelly-shen/canigraduate.uchicago.edu
|
backend/uchicago/src/timeschedules_parser_test.py
|
Python
|
mit
| 1,945 | 0 |
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Access Control FireRole."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
"""These functions are for realizing a firewall like role definition for extending
webaccess to connect user to roles using every infos about users.
"""
import re
import cPickle
from zlib import compress, decompress
import sys
import time
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.access_control_config import InvenioWebAccessFireroleError
from invenio.dbquery import run_sql, blob_to_string
from invenio.config import CFG_CERN_SITE
from invenio.access_control_config import CFG_ACC_EMPTY_ROLE_DEFINITION_SRC, \
CFG_ACC_EMPTY_ROLE_DEFINITION_SER, CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# INTERFACE
def compile_role_definition(firerole_def_src):
""" Given a text in which every row contains a rule it returns the compiled
object definition.
Rules have the following syntax:
allow|deny [not] field {list of one or more (double)quoted string or regexp}
or allow|deny any
Every row may contain a # sign followed by a comment which are discarded.
Field could be any key contained in a user_info dictionary. If the key does
not exist in the dictionary, the rule is skipped.
The first rule which matches return.
"""
line = 0
ret = []
default_allow_p = False
if not firerole_def_src or not firerole_def_src.strip():
firerole_def_src = CFG_ACC_EMPTY_ROLE_DEFINITION_SRC
for row in firerole_def_src.split('\n'):
line += 1
row = row.strip()
if not row:
continue
clean_row = _no_comment_re.sub('', row)
if clean_row:
g = _any_rule_re.match(clean_row)
if g:
default_allow_p = g.group('command').lower() == 'allow'
break
g = _rule_re.match(clean_row)
if g:
allow_p = g.group('command').lower() == 'allow'
not_p = g.group('not') != None
field = g.group('field').lower()
# Renaming groups to group
for alias_item in _aliasTable:
if field in alias_item:
field = alias_item[0]
break
if field.startswith('precached_'):
raise InvenioWebAccessFireroleError("Error while compiling rule %s (line %s): %s is a reserved key and can not be used in FireRole rules!" % (row, line, field))
expressions = g.group('expression')+g.group('more_expressions')
expressions_list = []
for expr in _expressions_re.finditer(expressions):
expr = expr.group()
if field in ('from', 'until'):
try:
expressions_list.append((False, time.mktime(time.strptime(expr[1:-1], '%Y-%m-%d'))))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid date with format YYYY-MM-DD because %s!" % (row, line, expr, msg))
elif expr[0] == '/':
try:
expressions_list.append((True, re.compile(expr[1:-1], re.I)))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid re because %s!" % (row, line, expr, msg))
else:
if field == 'remote_ip' and '/' in expr[1:-1]:
try:
expressions_list.append((False, _ip_matcher_builder(expr[1:-1])))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid ip group because %s!" % (row, line, expr, msg))
else:
expressions_list.append((False, expr[1:-1]))
expressions_list = tuple(expressions_list)
if field in ('from', 'until'):
if len(expressions_list) != 1:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): exactly one date is expected when using 'from' or 'until', but %s were found" % (row, line, len(expressions_list)))
if not_p:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): 'not' is not allowed when using 'from' or 'until'" % (row, line))
ret.append((allow_p, not_p, field, expressions_list))
else:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): not a valid rule!" % (row, line))
return (default_allow_p, tuple(ret))
def repair_role_definitions():
""" Try to rebuild compiled serialized definitions from their respectives
sources. This is needed in case Python break back compatibility.
"""
definitions = run_sql("SELECT id, firerole_def_src FROM accROLE")
for role_id, firerole_def_src in definitions:
run_sql("UPDATE accROLE SET firerole_def_ser=%s WHERE id=%s", (serialize(compile_role_definition(firerole_def_src)), role_id))
def store_role_definition(role_id, firerole_def_ser, firerole_def_src):
""" Store a compiled serialized definition and its source in the database
alongside the role to which it belong.
@param role_id: the role_id
@param firerole_def_ser: the serialized compiled definition
@param firerole_def_src: the sources from which the definition was taken
"""
run_sql("UPDATE accROLE SET firerole_def_ser=%s, firerole_def_src=%s WHERE id=%s", (firerole_def_ser, firerole_def_src, role_id))
def load_role_definition(role_id):
""" Load the definition corresponding to a role. If the compiled definition
is corrupted it try to repairs definitions from their sources and try again
to return the definition.
@param role_id:
@return: a deserialized compiled role definition
"""
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
try:
return deserialize(res[0][0])
except Exception:
## Something bad might have happened? (Update of Python?)
repair_role_definitions()
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
return deserialize(res[0][0])
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
def acc_firerole_extract_emails(firerole_def_obj):
"""
Best effort function to extract all the possible email addresses
authorized by the given firerole.
"""
authorized_emails = set()
try:
default_allow_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
if not_p:
continue
if field == 'group':
for reg_p, expr in expressions_list:
if reg_p:
continue
if CFG_CERN_SITE and expr.endswith(' [CERN]'):
authorized_emails.add(expr[:len(' [CERN]')].lower().strip() + '@cern.ch')
emails = run_sql("SELECT user.email FROM usergroup JOIN user_usergroup ON usergroup.id=user_usergroup.id_usergroup JOIN user ON user.id=user_usergroup.id_user WHERE usergroup.name=%s", (expr, ))
for email in emails:
authorized_emails.add(email[0].lower().strip())
elif field == 'email':
for reg_p, expr in expressions_list:
if reg_p:
continue
authorized_emails.add(expr.lower().strip())
elif field == 'uid':
for reg_p, expr in expressions_list:
if reg_p:
continue
email = run_sql("SELECT email FROM user WHERE id=%s", (expr, ))
if email:
authorized_emails.add(email[0][0].lower().strip())
return authorized_emails
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
def acc_firerole_check_user(user_info, firerole_def_obj):
""" Given a user_info dictionary, it matches the rules inside the deserializez
compiled definition in order to discover if the current user match the roles
corresponding to this definition.
@param user_info: a dict produced by collect_user_info which contains every
info about a user
@param firerole_def_obj: a compiled deserialized definition produced by
compile_role_defintion
@return: True if the user match the definition, False otherwise.
"""
try:
default_allow_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
group_p = field == 'group' # Is it related to group?
ip_p = field == 'remote_ip' # Is it related to Ips?
until_p = field == 'until' # Is it related to dates?
from_p = field == 'from' # Idem.
next_expr_p = False # Silly flag to break 2 for cycles
if not user_info.has_key(field) and not from_p and not until_p:
continue
for reg_p, expr in expressions_list: # For every element in the rule
if group_p: # Special case: groups
if reg_p: # When it is a regexp
for group in user_info[field]: # iterate over every group
if expr.match(group): # if it matches
if not_p: # if must not match
next_expr_p = True # let's skip to next expr
break
else: # Ok!
return allow_p
if next_expr_p:
break # I said: let's skip to next rule ;-)
elif expr.lower() in [group.lower() for group in user_info[field]]: # Simple expression then just check for expr in groups
if not_p: # If expr is in groups then if must not match
break # let's skip to next expr
else: # Ok!
return allow_p
elif reg_p: # Not a group, then easier. If it's a regexp
if expr.match(user_info[field]): # if it matches
if not_p: # If must not match
break # Let's skip to next expr
else:
return allow_p # Ok!
elif ip_p and type(expr) == type(()): # If it's just a simple expression but an IP!
if _ipmatch(user_info['remote_ip'], expr): # Then if Ip matches
if not_p: # If must not match
break # let's skip to next expr
else:
return allow_p # ok!
elif until_p:
if time.time() <= expr:
if allow_p:
break
else:
return False
elif allow_p:
return False
else:
break
elif from_p:
if time.time() >= expr:
if allow_p:
break
else:
return False
elif allow_p:
return False
else:
break
elif expr.lower() == user_info[field].lower(): # Finally the easiest one!!
if not_p: # ...
break
else: # ...
return allow_p # ...
if not_p and not next_expr_p: # Nothing has matched and we got not
return allow_p # Then the whole rule matched!
except Exception, msg:
raise InvenioWebAccessFireroleError, msg
return default_allow_p # By default we allow ;-) it'an OpenAccess project
def serialize(firerole_def_obj):
""" Serialize and compress a definition."""
if firerole_def_obj == CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
elif firerole_def_obj:
return compress(cPickle.dumps(firerole_def_obj, -1))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_SER
def deserialize(firerole_def_ser):
""" Deserialize and decompress a definition."""
if firerole_def_ser:
return cPickle.loads(decompress(blob_to_string(firerole_def_ser)))
else:
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# IMPLEMENTATION
# Comment finder
_no_comment_re = re.compile(r'[\s]*(?<!\\)#.*')
# Rule dissecter
_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(?:(?P<not>not)[\s]+)?(?P<field>[\w]+)[\s]+(?P<expression>(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/)(?P<more_expressions>([\s]*,[\s]*((?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/))*)(?:[\s]*(?<!\\).*)?', re.I)
_any_rule_re = re.compile(r'(?P<command>allow|deny)[\s]+(any|all)[\s]*', re.I)
# Sub expression finder
_expressions_re = re.compile(r'(?<!\\)\'.+?(?<!\\)\'|(?<!\\)\".+?(?<!\\)\"|(?<!\\)\/.+?(?<!\\)\/')
def _mkip (ip):
""" Compute a numerical value for a dotted IP """
num = 0L
for i in map (int, ip.split ('.')):
num = (num << 8) + i
return num
_full = 2L ** 32 - 1
_aliasTable = (('group', 'groups'), )
def _ip_matcher_builder(group):
""" Compile a string "ip/bitmask" (i.e. 127.0.0.0/24)
@param group: a classical "ip/bitmask" string
@return: a tuple containing the gip and mask in a binary version.
"""
gip, gmk = group.split('/')
gip = _mkip(gip)
gmk = int(gmk)
mask = (_full - (2L ** (32 - gmk) - 1))
if not (gip & mask == gip):
raise InvenioWebAccessFireroleError, "Netmask does not match IP (%Lx %Lx)" % (gip, mask)
return (gip, mask)
def _ipmatch(ip, ip_matcher):
""" Check if an ip matches an ip_group.
@param ip: the ip to check
@param ip_matcher: a compiled ip_group produced by ip_matcher_builder
@return: True if ip matches, False otherwise
"""
return _mkip(ip) & ip_matcher[1] == ip_matcher[0]
|
pombredanne/invenio
|
modules/webaccess/lib/access_control_firerole.py
|
Python
|
gpl-2.0
| 15,550 | 0.007203 |
from sense_hat import SenseHat
sense = SenseHat()
X = [255, 0, 0] # Red
O = [255, 255, 255] # White
up_arrow = [
O, O, O, X, X, O, O, O,
O, O, X, X, X, X, O, O,
O, X, X, X, X, X, X, O,
O, O, O, X, X, O, O, O,
O, O, O, X, X, O, O, O,
O, O, O, X, X, O, O, O,
O, O, O, X, X, O, O, O,
O, O, O, X, X, O, O, O
]
sense.set_pixels(up_arrow)
|
seattleacademy/fall27
|
arrowup.py
|
Python
|
mit
| 336 | 0.029762 |
"""
Copyright (C) 2014 Maruf Maniruzzaman
Website: http://cosmosframework.com
Author: Maruf Maniruzzaman
License :: OSI Approved :: MIT License
"""
|
kuasha/cosmos
|
cosmos/schema/object.py
|
Python
|
mit
| 154 | 0.006494 |
import gizeh
surface = gizeh.Surface(width=320, height=260)
circle = gizeh.circle (r=40, # radius, in pixels
xy= [156, 200], # coordinates of the center
fill= (1,0,0)) # 'red' in RGB coordinates
circle.draw( surface ) # draw the circle on the surface
surface.get_npimage() # export as a numpy array (we will use that)
surface.write_to_png("my_drawing.png") # export as a PNG
|
mmenz/michaelmenz
|
spare-parts/animatedgifs.py
|
Python
|
apache-2.0
| 420 | 0.033333 |
# -*- coding=utf-8 -*-
import sys
import time
import logging
import os
sys.path.append(os.getcwd())
logging.basicConfig()
from hammer.sqlhelper import SqlHelper
db_config = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '123456',
'db': 'test',
}
def test_create_table():
command = '''
CREATE TABLE `test_test` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(10) DEFAULT NULL,
`age` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=34 DEFAULT CHARSET=utf8;
'''
sql.execute(command, True)
def test_select():
command = ''''''
def test_insert():
datas = [
{
'name': "a'b'c",
'age': 1,
'date': None,
},
{
'name': 'a"b"c',
'age': 1,
'date': None,
},
{
'name': 'a"b";\'c',
'age': 1,
'date': None,
},
{
'name': "a\"blll\";\'c",
'age': 1,
'date': '2018',
},
]
sql.insert_datas(datas, table_name = 'test')
def test_update():
datas = [
{
'id': 1,
'name': "a'b'c",
'age': 2,
'date': None,
},
{
'id': 2,
'name': 'a"b"c',
'age': 2,
'date': None,
},
{
'id': 3,
'name': 'a"b";\'c',
'age': 2,
'date': None,
},
{
'id': 4,
'name': "a\"blll\";\'c",
'age': 2,
'date': '2018-01-02',
},
]
sql.update_datas(datas, table_name = 'test')
def test_is_exists():
print(sql.is_exists('testdfads'))
def test_check_table_exists():
print(sql.check_table_exists('test', db_name = 'tesdt'))
if __name__ == '__main__':
sql = SqlHelper(**db_config)
# test_insert()
# test_update()
# test_is_exists()
# test_check_table_exists()
datas = []
for i in range(1, 3):
data = {
'id': i,
'name': "vvv",
'age': None,
'date': None,
}
datas.append(data)
print(datas)
print(len(datas))
start = time.time()
# sql.insert_datas(datas, table_name = 'test')
sql.update_datas(datas, table_name = 'test', update_keys = ['name', 'age'])
print(time.time() - start)
|
awolfly9/hammer
|
test/test.py
|
Python
|
mit
| 2,505 | 0.004391 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SettingList(ListResource):
def __init__(self, version):
"""
Initialize the SettingList
:param Version version: Version that contains the resource
:returns: twilio.rest.insights.v1.setting.SettingList
:rtype: twilio.rest.insights.v1.setting.SettingList
"""
super(SettingList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self):
"""
Constructs a SettingContext
:returns: twilio.rest.insights.v1.setting.SettingContext
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
return SettingContext(self._version, )
def __call__(self):
"""
Constructs a SettingContext
:returns: twilio.rest.insights.v1.setting.SettingContext
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
return SettingContext(self._version, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Insights.V1.SettingList>'
class SettingPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the SettingPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.insights.v1.setting.SettingPage
:rtype: twilio.rest.insights.v1.setting.SettingPage
"""
super(SettingPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SettingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.insights.v1.setting.SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
return SettingInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Insights.V1.SettingPage>'
class SettingContext(InstanceContext):
def __init__(self, version):
"""
Initialize the SettingContext
:param Version version: Version that contains the resource
:returns: twilio.rest.insights.v1.setting.SettingContext
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
super(SettingContext, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Voice/Settings'.format(**self._solution)
def fetch(self, subaccount_sid=values.unset):
"""
Fetch the SettingInstance
:param unicode subaccount_sid: The subaccount_sid
:returns: The fetched SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
data = values.of({'SubaccountSid': subaccount_sid, })
payload = self._version.fetch(method='GET', uri=self._uri, params=data, )
return SettingInstance(self._version, payload, )
def update(self, advanced_features=values.unset, voice_trace=values.unset,
subaccount_sid=values.unset):
"""
Update the SettingInstance
:param bool advanced_features: The advanced_features
:param bool voice_trace: The voice_trace
:param unicode subaccount_sid: The subaccount_sid
:returns: The updated SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
data = values.of({
'AdvancedFeatures': advanced_features,
'VoiceTrace': voice_trace,
'SubaccountSid': subaccount_sid,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return SettingInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Insights.V1.SettingContext {}>'.format(context)
class SettingInstance(InstanceResource):
def __init__(self, version, payload):
"""
Initialize the SettingInstance
:returns: twilio.rest.insights.v1.setting.SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
super(SettingInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'advanced_features': payload.get('advanced_features'),
'voice_trace': payload.get('voice_trace'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SettingContext for this SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
if self._context is None:
self._context = SettingContext(self._version, )
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def advanced_features(self):
"""
:returns: The advanced_features
:rtype: bool
"""
return self._properties['advanced_features']
@property
def voice_trace(self):
"""
:returns: The voice_trace
:rtype: bool
"""
return self._properties['voice_trace']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self, subaccount_sid=values.unset):
"""
Fetch the SettingInstance
:param unicode subaccount_sid: The subaccount_sid
:returns: The fetched SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
return self._proxy.fetch(subaccount_sid=subaccount_sid, )
def update(self, advanced_features=values.unset, voice_trace=values.unset,
subaccount_sid=values.unset):
"""
Update the SettingInstance
:param bool advanced_features: The advanced_features
:param bool voice_trace: The voice_trace
:param unicode subaccount_sid: The subaccount_sid
:returns: The updated SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
return self._proxy.update(
advanced_features=advanced_features,
voice_trace=voice_trace,
subaccount_sid=subaccount_sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Insights.V1.SettingInstance {}>'.format(context)
|
twilio/twilio-python
|
twilio/rest/insights/v1/setting.py
|
Python
|
mit
| 7,805 | 0.000769 |
#!Measurement
# all of this is configuration info that can be used in the script.
# you refer to these values using mx.<group>.<attribute>
# e.g
# mx.baseline.counts is 180
# mx.multicollect.detector is H1
'''
baseline:
after: true
before: false
counts: 180
detector: H1
mass: 34.2
settling_time: 15
default_fits: nominal
equilibration:
eqtime: 1.0
inlet: R
inlet_delay: 3
outlet: O
use_extraction_eqtime: true
multicollect:
counts: 400
detector: H1
isotope: Ar40
peakcenter:
after: true
before: false
detector: H1
detectors:
- H1
- AX
- CDD
isotope: Ar40
peakhop:
hops_name: ''
use_peak_hop: false
'''
# entry point for the script
def main():
# print a message to the user
info('unknown measurement script')
# activate the following detectors. measurements will be plotted and save for these detectors
activate_detectors('H2', 'H1', 'AX', 'L1', 'L2', 'CDD')
# position the magnet with Ar40 on H1
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
# choose where to get the equilibration duration from
# sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
# start the equilibration thread
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
# set time zero after equilibrate returns i.e after the ion pump valve closes
set_time_zero()
# record/plot the equilibration
sniff(eqt)
# set the default fits
set_fits()
set_baseline_fits()
# multicollect on active detectors for 400
multicollect(ncounts=mx.multicollect.counts)
if mx.baseline.after:
# do a baseline measurement
baselines(ncounts=mx.baseline.counts, mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
# do a peak center scan and update the mftable with new peak centers
activate_detectors(*mx.peakcenter.detectors, **{'peak_center': True})
peak_center(detector=mx.peakcenter.detector, isotope=mx.peakcenter.isotope)
# print a message to the user
info('finished measure script')
|
USGSDenverPychron/pychron
|
docs/user_guide/operation/scripts/examples/basic.py
|
Python
|
apache-2.0
| 2,487 | 0.002413 |
"""Support for VOC."""
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from . import DATA_KEY, VolvoEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Volvo sensors."""
if discovery_info is None:
return
async_add_entities([VolvoSensor(hass.data[DATA_KEY], *discovery_info)])
class VolvoSensor(VolvoEntity, BinarySensorEntity):
"""Representation of a Volvo sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self.instrument.is_on
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
if self.instrument.device_class in DEVICE_CLASSES:
return self.instrument.device_class
return None
|
tboyce021/home-assistant
|
homeassistant/components/volvooncall/binary_sensor.py
|
Python
|
apache-2.0
| 856 | 0.002336 |
from crystal_filter_middleware.handlers import CrystalBaseHandler
from swift.common.swob import HTTPMethodNotAllowed
from swift.common.wsgi import make_subrequest
from swift.common.utils import public
import operator
import json
import copy
import urllib
import os
import re
mappings = {'>': operator.gt, '>=': operator.ge,
'==': operator.eq, '<=': operator.le, '<': operator.lt,
'!=': operator.ne, "OR": operator.or_, "AND": operator.and_}
class CrystalProxyHandler(CrystalBaseHandler):
def __init__(self, request, conf, app, logger):
super(CrystalProxyHandler, self).__init__(request, conf,
app, logger)
self.etag = None
self.filter_exec_list = None
def _get_dynamic_filters(self):
# Dynamic binding of policies: using a Lua script that executes
# a hgetall on the first matching key of a list and also returns
# the global filters
lua_sha = self.conf.get('LUA_get_pipeline_sha')
args = (self.account.replace('AUTH_', ''), '' if self.container is None else self.container)
redis_list = self.redis.evalsha(lua_sha, 0, *args)
index = redis_list.index("@@@@") # Separator between pipeline and global filters
self.filter_list = dict(zip(redis_list[0:index:2], redis_list[1:index:2]))
self.global_filters = dict(zip(redis_list[index+1::2], redis_list[index+2::2]))
self.proxy_filter_exec_list = {}
self.object_filter_exec_list = {}
if self.global_filters or self.filter_list:
self.proxy_filter_exec_list = self._build_filter_execution_list('proxy')
self.object_filter_exec_list = self._build_filter_execution_list('object')
def _parse_vaco(self):
return self.request.split_path(2, 4, rest_with_last=True)
def handle_request(self):
if self.is_crystal_valid_request and hasattr(self, self.request.method):
try:
self._get_dynamic_filters()
handler = getattr(self, self.request.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
return HTTPMethodNotAllowed(request=self.request)
return handler()
else:
self.logger.info('Request disabled for Crystal')
return self.request.get_response(self.app)
def _check_conditions(self, filter_metadata):
"""
This method ckecks the object_tag, object_type and object_size parameters
introduced by the dashborad to run the filter.
"""
if not filter_metadata['object_type'] and \
not filter_metadata['object_tag'] and \
not filter_metadata['object_size']:
return True
metadata = {}
if self.method == 'put':
for key in self.request.headers.keys():
metadata[key.lower()] = self.request.headers.get(key)
else:
sub_req = make_subrequest(self.request.environ, method='HEAD',
path=self.request.path_info,
headers=self.request.headers,
swift_source='Crystal Filter Middleware')
resp = sub_req.get_response(self.app)
metadata = resp.headers
correct_type = True
correct_size = True
correct_tags = True
try:
if filter_metadata['object_type']:
object_name = filter_metadata['object_name']
filename = self.request.environ['PATH_INFO']
pattern = re.compile(object_name)
if not pattern.search(filename):
correct_type = False
if filter_metadata['object_tag']:
tags = filter_metadata['object_tag'].split(',')
tag_checking = list()
for tag in tags:
key, value = tag.split(':')
meta_key = ('X-Object-Meta-'+key).lower()
sysmeta_key = ('X-Object-Sysmeta-Meta-'+key).lower()
correct_tag = (meta_key in metadata and
metadata[meta_key] == value) or \
(sysmeta_key in metadata and
metadata[sysmeta_key] == value)
tag_checking.append(correct_tag)
correct_tags = all(tag_checking)
if filter_metadata['object_size']:
object_size = filter_metadata['object_size']
op = mappings[object_size[0]]
obj_lenght = int(object_size[1])
correct_size = op(int(metadata['Content-Length']),
obj_lenght)
except Exception as e:
self.logger.error(str(e))
return False
return correct_type and correct_size and correct_tags
def _parse_filter_metadata(self, filter_metadata):
"""
This method parses the filter metadata
"""
filter_name = filter_metadata['filter_name']
language = filter_metadata["language"]
params = filter_metadata["params"]
filter_type = filter_metadata["filter_type"]
filter_main = filter_metadata["main"]
filter_dep = filter_metadata["dependencies"]
filter_size = filter_metadata["content_length"]
reverse = filter_metadata["reverse"]
filter_data = {'name': filter_name,
'language': language,
'params': self._parse_csv_params(params),
'reverse': reverse,
'type': filter_type,
'main': filter_main,
'dependencies': filter_dep,
'size': filter_size}
return filter_data
def _build_filter_execution_list(self, server):
"""
This method builds the filter execution list (ordered).
"""
filter_execution_list = {}
''' Parse global filters '''
for _, filter_metadata in self.global_filters.items():
filter_metadata = json.loads(filter_metadata)
if self.method in filter_metadata and filter_metadata[self.method] \
and filter_metadata['execution_server'] == server \
and self._check_conditions(filter_metadata):
filter_data = self._parse_filter_metadata(filter_metadata)
order = filter_metadata["execution_order"]
filter_execution_list[int(order)] = filter_data
''' Parse Project specific filters'''
for _, filter_metadata in self.filter_list.items():
filter_metadata = json.loads(filter_metadata)
if self.method in filter_metadata and filter_metadata[self.method] \
and filter_metadata['execution_server'] == server \
and self._check_conditions(filter_metadata):
filter_data = self._parse_filter_metadata(filter_metadata)
order = filter_metadata["execution_order"]
filter_execution_list[order] = filter_data
return filter_execution_list
def _format_crystal_metadata(self, filter_list):
"""
This method generates the metadata that will be stored alongside the
object in the PUT requests. It allows the reverse case of the filters
without querying the centralized controller.
"""
for key in filter_list.keys():
cfilter = filter_list[key]
if cfilter['reverse'] != 'False':
current_params = cfilter['params']
if current_params:
cfilter['params']['reverse'] = 'True'
else:
cfilter['params'] = {'reverse': 'True'}
cfilter['execution_server'] = cfilter['reverse']
cfilter.pop('reverse')
else:
filter_list.pop(key)
return filter_list
def _set_crystal_metadata(self):
"""
This method generates the metadata that will be stored alongside the
object in the PUT requests. It allows the reverse case of the filters
without querying the centralized controller.
"""
filter_exec_list = {}
for key in sorted(self.proxy_filter_exec_list.keys()):
filter_exec_list[len(filter_exec_list)] = self.proxy_filter_exec_list[key]
for key in sorted(self.object_filter_exec_list.keys()):
filter_exec_list[len(filter_exec_list)] = self.object_filter_exec_list[key]
filter_list = copy.deepcopy(filter_exec_list)
crystal_md = self._format_crystal_metadata(filter_list)
if crystal_md:
self.request.headers['X-Object-Sysmeta-Crystal'] = crystal_md
def _save_size_and_etag(self):
"""
Save original object Size and Etag
"""
etag = self.request.headers.get('ETag', None)
if etag:
self.request.headers['X-Object-Sysmeta-Etag'] = etag
self.request.headers['X-Backend-Container-Update-Override-Etag'] = etag
size = self.request.headers.get('Content-Length')
self.request.headers['X-Object-Sysmeta-Size'] = size
self.request.headers['X-Backend-Container-Update-Override-Size'] = size
def _recover_size_and_etag(self, response):
"""
Recovers the original Object Size and Etag
"""
if 'X-Object-Sysmeta-Size' in response.headers and self.obj:
size = response.headers.pop('X-Object-Sysmeta-Size')
response.headers['Content-Length'] = size
if 'X-Object-Sysmeta-Etag' in response.headers and self.obj:
etag = response.headers.pop('X-Object-Sysmeta-Etag')
response.headers['etag'] = etag
if 'Transfer-Encoding' in response.headers and self.obj:
response.headers.pop('Transfer-Encoding')
def _parse_csv_params(self, csv_params):
"""
Provides comma separated parameters "a=1,b=2" as a dictionary
"""
params_dict = dict()
params = [x.strip() for x in csv_params.split('=')]
for index in range(len(params)):
if len(params) > index + 1:
if index == 0:
params_dict[params[index]] = params[index + 1].rsplit(',', 1)[0].strip()
elif index < len(params):
params_dict[params[index].rsplit(',', 1)[1].strip()] = params[index + 1].rsplit(',', 1)[0].strip()
else:
params_dict[params[index].rsplit(',', 1)[1].strip()] = params[index + 1]
return params_dict
def _parse_headers_params(self):
"""
Extract parameters from headers
"""
parameters = dict()
for param in self.request.headers:
if param.lower().startswith('x-crystal-parameter'):
keyvalue = self.request.headers[param]
keyvalue = urllib.unquote(keyvalue)
[key, value] = keyvalue.split(':')
parameters[key] = value
return parameters
@public
def GET(self):
"""Handler for HTTP GET requests."""
return self.GETorHEAD()
@public
def HEAD(self):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD()
@public
def POST(self):
"""Handler for HTTP POST requests."""
return self.POSTorDELETE()
@public
def DELETE(self):
"""Handler for HTTP DELETE requests."""
return self.POSTorDELETE()
def GETorHEAD(self):
"""
Handle HTTP GET or HEAD requests.
"""
if self.proxy_filter_exec_list:
self.logger.info('There are Filters to execute')
self.logger.info(str(self.proxy_filter_exec_list))
self._build_pipeline(self.proxy_filter_exec_list)
else:
self.logger.info('No Filters to execute')
if self.object_filter_exec_list:
object_server_filters = json.dumps(self.object_filter_exec_list)
self.request.headers['crystal.filters'] = object_server_filters
response = self.request.get_response(self.app)
self._recover_size_and_etag(response)
return response
@public
def PUT(self):
"""
Handle HTTP PUT requests.
"""
if self.proxy_filter_exec_list:
self.logger.info('There are Filters to execute')
self.logger.info(str(self.proxy_filter_exec_list))
self._set_crystal_metadata()
self._save_size_and_etag()
self._build_pipeline(self.proxy_filter_exec_list)
else:
self.logger.info('No filters to execute')
if self.object_filter_exec_list:
object_server_filters = json.dumps(self.object_filter_exec_list)
self.request.headers['crystal.filters'] = object_server_filters
return self.request.get_response(self.app)
@public
def POSTorDELETE(self):
"""
Handle HTTP POST or DELETE requests.
"""
if self.proxy_filter_exec_list:
self.logger.info('There are Filters to execute')
self.logger.info(str(self.proxy_filter_exec_list))
self._build_pipeline(self.proxy_filter_exec_list)
else:
self.logger.info('No filters to execute')
if self.object_filter_exec_list:
object_server_filters = json.dumps(self.object_filter_exec_list)
self.request.headers['crystal.filters'] = object_server_filters
return self.request.get_response(self.app)
|
Crystal-SDS/filter-middleware
|
crystal_filter_middleware/handlers/proxy.py
|
Python
|
gpl-3.0
| 13,744 | 0.001237 |
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_classifier.common import constants
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
Base = declarative_base()
# Stolen from neutron/db/model_base.py
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
tenant_id = sa.Column(sa.String(255), index=True)
# Stolen from neutron/db/model_base.py
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
class Classifier(Base, HasId):
__tablename__ = 'classifiers'
classifier_type = sa.Column(sa.String)
__mapper_args__ = {'polymorphic_on': classifier_type}
class ClassifierGroup(Base, HasTenant, HasId):
__tablename__ = 'classifier_groups'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
classifier_chain = orm.relationship(
'ClassifierChainEntry',
backref=orm.backref('classifier_chains', cascade='all, delete'),
order_by='ClassifierChainEntry.sequence',
collection_class=ordering_list('sequence', count_from=1))
service = sa.Column(sa.Enum(*constants.NEUTRON_SERVICES), index=True)
class ClassifierChainEntry(Base, HasId):
__tablename__ = 'classifier_chains'
classifier_group_id = sa.Column(sa.String(36),
sa.ForeignKey('classifier_groups.id',
ondelete="CASCADE"))
classifier_id = sa.Column(sa.String(36),
sa.ForeignKey('classifiers.id',
ondelete="CASCADE"))
classifier = orm.relationship(Classifier)
sequence = sa.Column(sa.Integer)
classifier_group = orm.relationship(ClassifierGroup)
class IpClassifier(Classifier):
__tablename__ = 'ip_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ipclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
source_ip_prefix = sa.Column(sa.String(255))
destination_ip_prefix = sa.Column(sa.String(255))
class Ipv4Classifier(Classifier):
__tablename__ = 'ipv4_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ipv4classifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
dscp_tag = sa.Column(sa.String(255))
protocol = sa.Enum(*constants.PROTOCOLS)
dscp_mask = sa.Column(sa.String(255))
class Ipv6Classifier(Classifier):
__tablename__ = 'ipv6_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ipv6classifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
next_header = sa.Enum(*constants.PROTOCOLS)
traffic_class = sa.Column(sa.String(255))
flow_label = sa.Column(sa.String(255))
class TransportClassifier(Classifier):
__tablename__ = 'transport_classifiers'
__mapper_args__ = {'polymorphic_identity': 'transportclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
source_port_range_max = sa.Column(sa.Integer)
source_port_range_min = sa.Column(sa.Integer)
destination_port_range_max = sa.Column(sa.Integer)
destination_port_range_min = sa.Column(sa.Integer)
class EthernetClassifier(Classifier):
__tablename__ = 'ethernet_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ethernetclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
ethertype = sa.Column(sa.String(255))
source_mac = sa.Column(sa.String(255))
destination_mac = sa.Column(sa.String(255))
class VlanClassifier(Classifier):
__tablename__ = 'vlan_classifiers'
__mapper_args__ = {'polymorphic_identity': 'vlanclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
vlan_priority = sa.Column(sa.Integer)
class EncapsulationClassifier(Classifier):
__tablename__ = 'encapsulation_classifiers'
__mapper_args__ = {'polymorphic_identity': 'encapsulationclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
encapsulation_type = sa.Column(sa.Enum(*constants.ENCAPSULATION_TYPES))
encapsulation_id = sa.Column(sa.String(255))
class NeutronPortClassifier(Classifier):
__tablename__ = 'neutron_port_classifiers'
__mapper_args__ = {'polymorphic_identity': 'neutronportclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
logical_source_port = sa.Column(sa.String(255))
logical_destination_port = sa.Column(sa.String(255))
|
sc68cal/neutron-classifier
|
neutron_classifier/db/models.py
|
Python
|
apache-2.0
| 5,550 | 0 |
# coding=utf-8
"""
Collector that reports amavis metrics as reported by amavisd-agent
#### Dependencies
* amavisd-agent must be present in PATH
"""
import os
import subprocess
import re
import diamond.collector
import diamond.convertor
from diamond.collector import str_to_bool
class AmavisCollector(diamond.collector.Collector):
# From the source of amavisd-agent and it seems like the three interesting
# formats are these: ("x y/h", "xMB yMB/h", "x s y s/msg"),
# so this, ugly as it is to hardcode it this way, it should be right.
#
# The other option would be to directly read and decode amavis' berkeley
# db, and I don't even want to get there
matchers = [
re.compile(r'^\s*(?P<name>[\w]+)\s+(?P<time>[\d]+) s\s+'
r'(?P<frequency>[\d.]+) s/msg\s+\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<count>[\d]+)\s+'
r'(?P<frequency>[\d.]+)/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<size>[\d]+)MB\s+'
r'(?P<frequency>[\d.]+)MB/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
]
def get_default_config_help(self):
config_help = super(AmavisCollector, self).get_default_config_help()
config_help.update({
'amavisd_exe': 'The path to amavisd-agent',
'use_sudo': 'Call amavisd-agent using sudo',
'sudo_exe': 'The path to sudo',
'sudo_user': 'The user to use if using sudo',
})
return config_help
def get_default_config(self):
config = super(AmavisCollector, self).get_default_config()
config.update({
'path': 'amavis',
'amavisd_exe': '/usr/sbin/amavisd-agent',
'use_sudo': False,
'sudo_exe': '/usr/bin/sudo',
'sudo_user': 'amavis',
})
return config
def collect(self):
"""
Collect memory stats
"""
try:
if str_to_bool(self.config['use_sudo']):
# Use -u instead of --user as the former is more portable. Not
# all versions of sudo support the long form --user.
cmdline = [
self.config['sudo_exe'], '-u', self.config['sudo_user'],
'--', self.config['amavisd_exe'], '-c', '1'
]
else:
cmdline = [self.config['amavisd_exe'], '-c', '1']
agent = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
agent_out = agent.communicate()[0]
lines = agent_out.strip().split(os.linesep)
for line in lines:
for rex in self.matchers:
res = rex.match(line)
if res:
groups = res.groupdict()
name = groups['name']
for metric, value in groups.items():
if metric == 'name':
continue
mtype = 'GAUGE'
precision = 2
if metric in ('count', 'time'):
mtype = 'COUNTER'
precision = 0
self.publish("{}.{}".format(name, metric),
value, metric_type=mtype,
precision=precision)
except OSError as err:
self.log.error("Could not run %s: %s",
self.config['amavisd_exe'],
err)
return None
return True
|
MichaelDoyle/Diamond
|
src/collectors/amavis/amavis.py
|
Python
|
mit
| 3,732 | 0 |
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright (C) 2014, 2015 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
from datetime import datetime, timedelta
from astral import LocationInfo
from astral.sun import sun
from astral.geocoder import database, lookup
from Configuration import Configuration
def get_astral_data(for_datetime):
'''
Returns the sunrise and sunset times for the given date.
Uses the Astral package to compute sunrise/sunset for the
configured city.
Reference https://astral.readthedocs.io/en/latest/index.html
:param for_datetime: The date for the astral data
:return: Returns a dict containing the keys sunrise and sunset.
The values are datetime objects.
'''
city = None
# Either city/name or latitude and longitude are required
if Configuration.City() != "":
db = database()
try:
city = lookup(Configuration.City(), db)
# Overrides
if Configuration.Latitude() != "":
city.latitude = float(Configuration.Latitude())
if Configuration.Longitude() != "":
city.longitude = float(Configuration.Longitude())
except KeyError:
pass
if city is None:
# Default if no city is configured
city = LocationInfo()
# We expect latitude and longitude to be configured to override city
if Configuration.Latitude() != "" and Configuration.Longitude() != "":
city.latitude = float(Configuration.Latitude())
city.longitude = float(Configuration.Longitude())
else:
raise ValueError("Latitude and longitude are required")
# region is not used
# city.region = ""
# Local timezone
city.timezone = datetime.now().astimezone().tzinfo
return sun(city.observer, date=for_datetime, tzinfo=city.timezone)
def get_sun_data(for_datetime):
'''
Returns the sunrise and sunset times for the given date.
Uses the Astral package to compute sunrise/sunset for the
configured city.
Reference https://pythonhosted.org/astral/module.html
:param for_datetime:
:return: Returns a dict containing the keys sunrise and sunset.
'''
sun_data = get_astral_data(for_datetime)
sun_data_response = {}
sun_data_response["sunrise"] = sun_data["sunrise"].isoformat()
sun_data_response["sunset"] = sun_data["sunset"].isoformat()
return sun_data_response
def round_to_minute(time_to_round):
round_adj = 0
rounded = datetime(time_to_round.year, time_to_round.month, time_to_round.day,
hour=time_to_round.hour, minute=time_to_round.minute, second=0, microsecond=0,
tzinfo=time_to_round.tzinfo)
if time_to_round.second >= 30:
round_adj = timedelta(minutes=1)
rounded = rounded + round_adj
return rounded
def get_sunrise(for_datetime):
"""
Return the sunrise time for a given date/time
"""
sun_data = get_astral_data(for_datetime)
# Returns a datetime instance in local time
return round_to_minute(sun_data["sunrise"])
def get_sunset(for_datetime):
"""
Return the sunset time for a given date/time
"""
sun_data = get_astral_data(for_datetime)
# Returns a datetime instance in local time
return round_to_minute(sun_data["sunset"])
|
dhocker/athomepowerlineserver
|
helpers/sun_data.py
|
Python
|
gpl-3.0
| 3,610 | 0.000831 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
import logging
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
DATE_RANGE_FUNCTION = {
'minutes': lambda interval: timedelta(minutes=interval),
'hour': lambda interval: timedelta(hours=interval),
'day': lambda interval: timedelta(days=interval),
'month': lambda interval: timedelta(months=interval),
False: lambda interval: timedelta(0),
}
def get_datetime(date_str):
'''Return a datetime from a date string or a datetime string'''
# complete date time if date_str contains only a date
if ' ' not in date_str:
date_str = date_str + " 00:00:00"
return datetime.strptime(date_str, DEFAULT_SERVER_DATETIME_FORMAT)
class base_action_rule(osv.osv):
""" Base Action Rules """
_name = 'base.action.rule'
_description = 'Action Rules'
_order = 'sequence'
_columns = {
'name': fields.char('Rule Name', required=True),
'model_id': fields.many2one('ir.model', 'Related Document Model',
required=True, domain=[('osv_memory', '=', False)]),
'model': fields.related('model_id', 'model', type="char", string='Model'),
'create_date': fields.datetime('Create Date', readonly=1),
'active': fields.boolean('Active',
help="When unchecked, the rule is hidden and will not be executed."),
'sequence': fields.integer('Sequence',
help="Gives the sequence order when displaying a list of rules."),
'kind': fields.selection(
[('on_create', 'On Creation'),
('on_write', 'On Update'),
('on_create_or_write', 'On Creation & Update'),
('on_time', 'Based on Timed Condition')],
string='When to Run'),
'trg_date_id': fields.many2one('ir.model.fields', string='Trigger Date',
help="When should the condition be triggered. If present, will be checked by the scheduler. If empty, will be checked at creation and update.",
domain="[('model_id', '=', model_id), ('ttype', 'in', ('date', 'datetime'))]"),
'trg_date_range': fields.integer('Delay after trigger date',
help="Delay after the trigger date." \
"You can put a negative number if you need a delay before the" \
"trigger date, like sending a reminder 15 minutes before a meeting."),
'trg_date_range_type': fields.selection([('minutes', 'Minutes'), ('hour', 'Hours'),
('day', 'Days'), ('month', 'Months')], 'Delay type'),
'trg_date_calendar_id': fields.many2one(
'resource.calendar', 'Use Calendar',
help='When calculating a day-based timed condition, it is possible to use a calendar to compute the date based on working days.',
ondelete='set null',
),
'act_user_id': fields.many2one('res.users', 'Set Responsible'),
'act_followers': fields.many2many("res.partner", string="Add Followers"),
'server_action_ids': fields.many2many('ir.actions.server', string='Server Actions',
domain="[('model_id', '=', model_id)]",
help="Examples: email reminders, call object service, etc."),
'filter_pre_id': fields.many2one('ir.filters', string='Before Update Filter',
ondelete='restrict',
domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before the update of the record."),
'filter_id': fields.many2one('ir.filters', string='Filter',
ondelete='restrict',
domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before executing the action rule."),
'last_run': fields.datetime('Last Run', readonly=1, copy=False),
}
_defaults = {
'active': True,
'trg_date_range_type': 'day',
}
def onchange_kind(self, cr, uid, ids, kind, context=None):
clear_fields = []
if kind in ['on_create', 'on_create_or_write']:
clear_fields = ['filter_pre_id', 'trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind in ['on_write', 'on_create_or_write']:
clear_fields = ['trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind == 'on_time':
clear_fields = ['filter_pre_id']
return {'value': dict.fromkeys(clear_fields, False)}
def _filter(self, cr, uid, action, action_filter, record_ids, context=None):
""" filter the list record_ids that satisfy the action filter """
if record_ids and action_filter:
assert action.model == action_filter.model_id, "Filter model different from action rule model"
model = self.pool[action_filter.model_id]
domain = [('id', 'in', record_ids)] + eval(action_filter.domain)
ctx = dict(context or {})
ctx.update(eval(action_filter.context))
record_ids = model.search(cr, uid, domain, context=ctx)
return record_ids
def _process(self, cr, uid, action, record_ids, context=None):
""" process the given action on the records """
model = self.pool[action.model_id.model]
# modify records
values = {}
if 'date_action_last' in model._all_columns:
values['date_action_last'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if action.act_user_id and 'user_id' in model._all_columns:
values['user_id'] = action.act_user_id.id
if values:
model.write(cr, uid, record_ids, values, context=context)
if action.act_followers and hasattr(model, 'message_subscribe'):
follower_ids = map(int, action.act_followers)
model.message_subscribe(cr, uid, record_ids, follower_ids, context=context)
# execute server actions
if action.server_action_ids:
server_action_ids = map(int, action.server_action_ids)
for record in model.browse(cr, uid, record_ids, context):
action_server_obj = self.pool.get('ir.actions.server')
ctx = dict(context, active_model=model._name, active_ids=[record.id], active_id=record.id)
action_server_obj.run(cr, uid, server_action_ids, context=ctx)
return True
def _register_hook(self, cr, ids=None):
""" Wrap the methods `create` and `write` of the models specified by
the rules given by `ids` (or all existing rules if `ids` is `None`.)
"""
updated = False
if ids is None:
ids = self.search(cr, SUPERUSER_ID, [])
for action_rule in self.browse(cr, SUPERUSER_ID, ids):
model = action_rule.model_id.model
model_obj = self.pool[model]
if not hasattr(model_obj, 'base_action_ruled'):
# monkey-patch methods create and write
def create(self, cr, uid, vals, context=None, **kwargs):
# avoid loops or cascading actions
if context and context.get('action'):
return create.origin(self, cr, uid, vals, context=context)
# call original method with a modified context
context = dict(context or {}, action=True)
new_id = create.origin(self, cr, uid, vals, context=context, **kwargs)
# as it is a new record, we do not consider the actions that have a prefilter
action_model = self.pool.get('base.action.rule')
action_dom = [('model', '=', self._name),
('kind', 'in', ['on_create', 'on_create_or_write'])]
action_ids = action_model.search(cr, uid, action_dom, context=context)
# check postconditions, and execute actions on the records that satisfy them
for action in action_model.browse(cr, uid, action_ids, context=context):
if action_model._filter(cr, uid, action, action.filter_id, [new_id], context=context):
action_model._process(cr, uid, action, [new_id], context=context)
return new_id
def write(self, cr, uid, ids, vals, context=None, **kwargs):
# avoid loops or cascading actions
if context and context.get('action'):
return write.origin(self, cr, uid, ids, vals, context=context)
# modify context
context = dict(context or {}, action=True)
ids = [ids] if isinstance(ids, (int, long, str)) else ids
# retrieve the action rules to possibly execute
action_model = self.pool.get('base.action.rule')
action_dom = [('model', '=', self._name),
('kind', 'in', ['on_write', 'on_create_or_write'])]
action_ids = action_model.search(cr, uid, action_dom, context=context)
actions = action_model.browse(cr, uid, action_ids, context=context)
# check preconditions
pre_ids = {}
for action in actions:
pre_ids[action] = action_model._filter(cr, uid, action, action.filter_pre_id, ids, context=context)
# call original method
write.origin(self, cr, uid, ids, vals, context=context, **kwargs)
# check postconditions, and execute actions on the records that satisfy them
for action in actions:
post_ids = action_model._filter(cr, uid, action, action.filter_id, pre_ids[action], context=context)
if post_ids:
action_model._process(cr, uid, action, post_ids, context=context)
return True
model_obj._patch_method('create', create)
model_obj._patch_method('write', write)
model_obj.base_action_ruled = True
updated = True
return updated
def create(self, cr, uid, vals, context=None):
res_id = super(base_action_rule, self).create(cr, uid, vals, context=context)
if self._register_hook(cr, [res_id]):
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
super(base_action_rule, self).write(cr, uid, ids, vals, context=context)
if self._register_hook(cr, ids):
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return True
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
data = {'model': False, 'filter_pre_id': False, 'filter_id': False}
if model_id:
model = self.pool.get('ir.model').browse(cr, uid, model_id, context=context)
data.update({'model': model.model})
return {'value': data}
def _check_delay(self, cr, uid, action, record, record_dt, context=None):
if action.trg_date_calendar_id and action.trg_date_range_type == 'day':
start_dt = get_datetime(record_dt)
action_dt = self.pool['resource.calendar'].schedule_days_get_date(
cr, uid, action.trg_date_calendar_id.id, action.trg_date_range,
day_date=start_dt, compute_leaves=True, context=context
)
else:
delay = DATE_RANGE_FUNCTION[action.trg_date_range_type](action.trg_date_range)
action_dt = get_datetime(record_dt) + delay
return action_dt
def _check(self, cr, uid, automatic=False, use_new_cursor=False, context=None):
""" This Function is called by scheduler. """
context = context or {}
# retrieve all the action rules to run based on a timed condition
action_dom = [('kind', '=', 'on_time')]
action_ids = self.search(cr, uid, action_dom, context=context)
for action in self.browse(cr, uid, action_ids, context=context):
now = datetime.now()
if action.last_run:
last_run = get_datetime(action.last_run)
else:
last_run = datetime.utcfromtimestamp(0)
# retrieve all the records that satisfy the action's condition
model = self.pool[action.model_id.model]
domain = []
ctx = dict(context)
if action.filter_id:
domain = eval(action.filter_id.domain)
ctx.update(eval(action.filter_id.context))
if 'lang' not in ctx:
# Filters might be language-sensitive, attempt to reuse creator lang
# as we are usually running this as super-user in background
[filter_meta] = action.filter_id.get_metadata()
user_id = filter_meta['write_uid'] and filter_meta['write_uid'][0] or \
filter_meta['create_uid'][0]
ctx['lang'] = self.pool['res.users'].browse(cr, uid, user_id).lang
record_ids = model.search(cr, uid, domain, context=ctx)
# determine when action should occur for the records
date_field = action.trg_date_id.name
if date_field == 'date_action_last' and 'create_date' in model._all_columns:
get_record_dt = lambda record: record[date_field] or record.create_date
else:
get_record_dt = lambda record: record[date_field]
# process action on the records that should be executed
for record in model.browse(cr, uid, record_ids, context=context):
record_dt = get_record_dt(record)
if not record_dt:
continue
action_dt = self._check_delay(cr, uid, action, record, record_dt, context=context)
if last_run <= action_dt < now:
try:
context = dict(context or {}, action=True)
self._process(cr, uid, action, [record.id], context=context)
except Exception:
import traceback
_logger.error(traceback.format_exc())
action.write({'last_run': now.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
if automatic:
# auto-commit for batch processing
cr.commit()
|
andrius-preimantas/odoo
|
addons/base_action_rule/base_action_rule.py
|
Python
|
agpl-3.0
| 15,745 | 0.005017 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Message'
db.create_table('firstclass_message', (
('key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('data', self.gf('django.db.models.fields.TextField')(default='{}')),
))
db.send_create_signal('firstclass', ['Message'])
def backwards(self, orm):
# Deleting model 'Message'
db.delete_table('firstclass_message')
models = {
'firstclass.message': {
'Meta': {'object_name': 'Message'},
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'})
}
}
complete_apps = ['firstclass']
|
bennylope/django-firstclass
|
firstclass/south_migrations/0001_initial.py
|
Python
|
mit
| 1,013 | 0.00691 |
from flask import Flask
from flaskext.cache import Cache
from flaskext.mongokit import BSONObjectIdConverter
from werkzeug.routing import BaseConverter
import settings
app = Flask(__name__)
app.config.from_object('woerterbuch.settings')
app.secret_key = settings.SECRET_KEY
## Hook up custom URL converters.
class RegexConverter(BaseConverter):
"""Regex-powered url converter."""
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
app.url_map.converters['ObjectId'] = BSONObjectIdConverter
# Caching
cache = Cache(app)
# Templates
import woerterbuch.context_processors
# Views
import woerterbuch.views
|
fwenzel/strassendeutsch
|
woerterbuch/__init__.py
|
Python
|
gpl-3.0
| 739 | 0.005413 |
#
# Copyright (c) 2012 Will Page <compenguy@gmail.com>
# Derivative of ftpupload.py, credit to Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
# $Id: rsyncupload.py 2766 2014-12-02 02:45:36Z tkeffer $
#
"""For uploading files to a remove server via Rsync"""
import os
import errno
import sys
import subprocess
import syslog
import time
class RsyncUpload(object):
"""Uploads a directory and all its descendants to a remote server.
Keeps track of what files have changed, and only updates changed files."""
def __init__(self, local_root, remote_root,
server, user=None, delete=False, port=None,
ssh_options=None, compress=False, log_success=True):
"""Initialize an instance of RsyncUpload.
After initializing, call method run() to perform the upload.
server: The remote server to which the files are to be uploaded.
user: The user name that is to be used. [Optional, maybe]
delete: delete remote files that don't match with local files. Use
with caution. [Optional. Default is False.]
"""
self.local_root = os.path.normpath(local_root)
self.remote_root = os.path.normpath(remote_root)
self.server = server
self.user = user
self.delete = delete
self.port = port
self.ssh_options = ssh_options
self.compress = compress
self.log_success = log_success
def run(self):
"""Perform the actual upload."""
t1 = time.time()
# If the source path ends with a slash, rsync interprets
# that as a request to copy all the directory's *contents*,
# whereas if it doesn't, it copies the entire directory.
# We want the former, so make it end with a slash.
if self.local_root.endswith(os.sep):
rsynclocalspec = self.local_root
else:
rsynclocalspec = self.local_root + os.sep
if self.user is not None and len(self.user.strip()) > 0:
rsyncremotespec = "%s@%s:%s" % (self.user, self.server, self.remote_root)
else:
rsyncremotespec = "%s:%s" % (self.server, self.remote_root)
if self.port is not None and len(self.port.strip()) > 0:
rsyncsshstring = "ssh -p %s" % (self.port,)
else:
rsyncsshstring = "ssh"
if self.ssh_options is not None and len(self.ssh_options.strip()) > 0:
rsyncsshstring = rsyncsshstring + " " + self.ssh_options
cmd = ['rsync']
# archive means:
# recursive, copy symlinks as symlinks, preserve permissions,
# preserve modification times, preserve group and owner,
# preserve device files and special files, but not ACLs,
# no hardlinks, and no extended attributes
cmd.extend(["--archive"])
# provide some stats on the transfer
cmd.extend(["--stats"])
# Remove files remotely when they're removed locally
if self.delete:
cmd.extend(["--delete"])
if self.compress:
cmd.extend(["--compress"])
cmd.extend(["-e %s" % rsyncsshstring])
cmd.extend([rsynclocalspec])
cmd.extend([rsyncremotespec])
try:
rsynccmd = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = rsynccmd.communicate()[0]
stroutput = stdout.encode("utf-8").strip()
except OSError, e:
if e.errno == errno.ENOENT:
syslog.syslog(syslog.LOG_ERR, "rsyncupload: rsync does not appear to be installed on this system. (errno %d, \"%s\")" % (e.errno, e.strerror))
raise
# we have some output from rsync so generate an appropriate message
if stroutput.find('rsync error:') < 0:
# no rsync error message so parse rsync --stats results
rsyncinfo = {}
for line in iter(stroutput.splitlines()):
if line.find(':') >= 0:
(n,v) = line.split(':', 1)
rsyncinfo[n.strip()] = v.strip()
# get number of files and bytes transferred and produce an
# appropriate message
try:
if 'Number of regular files transferred' in rsyncinfo:
N = rsyncinfo['Number of regular files transferred']
else:
N = rsyncinfo['Number of files transferred']
Nbytes = rsyncinfo['Total transferred file size']
if N is not None and Nbytes is not None:
rsync_message = "rsync'd %d files (%s) in %%0.2f seconds" % (int(N), Nbytes)
else:
rsync_message = "rsync executed in %0.2f seconds"
except:
rsync_message = "rsync executed in %0.2f seconds"
else:
# suspect we have an rsync error so tidy stroutput
# and display a message
stroutput = stroutput.replace("\n", ". ")
stroutput = stroutput.replace("\r", "")
syslog.syslog(syslog.LOG_ERR, "rsyncupload: [%s] reported errors: %s" % (cmd, stroutput))
rsync_message = "rsync executed in %0.2f seconds"
t2= time.time()
if self.log_success:
syslog.syslog(syslog.LOG_INFO, "rsyncupload: " + rsync_message % (t2-t1))
if __name__ == '__main__':
import weewx
import configobj
weewx.debug = 1
syslog.openlog('rsyncupload', syslog.LOG_PID|syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
if len(sys.argv) < 2 :
print """Usage: rsyncupload.py path-to-configuration-file [path-to-be-rsync'd]"""
sys.exit(weewx.CMD_ERROR)
try :
config_dict = configobj.ConfigObj(sys.argv[1], file_error=True)
except IOError:
print "Unable to open configuration file ", sys.argv[1]
raise
if len(sys.argv) == 2:
try:
rsync_dir = os.path.join(config_dict['WEEWX_ROOT'],
config_dict['StdReport']['HTML_ROOT'])
except KeyError:
print "No HTML_ROOT in configuration dictionary."
sys.exit(1)
else:
rsync_dir = sys.argv[2]
rsync_upload = RsyncUpload(
rsync_dir,
**config_dict['StdReport']['RSYNC'])
rsync_upload.run()
|
tony-rasskazov/meteo
|
weewx/bin/weeutil/rsyncupload.py
|
Python
|
mit
| 6,592 | 0.005765 |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import configparser
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, RegexHandler, ConversationHandler
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from copy import deepcopy
import logging
import logging.handlers
from decisionTreeSupport import init, convert, getClassName
import xml.etree.ElementTree as ET
tree = ET.parse('config.xml.localSafeCopy')
root = tree.getroot()
Telegram_BOTID = root.find('telegramBotID').text
AdminPassword = root.find('adminPassword').text
datasets = {}
for ds in root.findall('dataset'):
name = ds.get('name')
datasets[name] = {}
datasets[name]['dataset_name'] = ds.find('filename').text
datasets[name]['class_column'] = int(ds.find('classColumn').text)
datasets[name]['data_columns'] = [int(x) for x in ds.find('dataColumns').text.split(',')]
if ds.find('successorOf') is not None:
datasets[name]['successorOf'] = ds.find('successorOf').text
datasets[name]['previousExitClass'] = ds.find('previousExitClass').text
del tree, root
CHOOSINGTREE, INTERACT = range(2)
LOG_FILENAME = 'logs.log'
treeData = {}
availableClassifierName = []
logging.basicConfig(filename=LOG_FILENAME, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.addHandler(logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=20000000, backupCount=5))
from botFunctions import *
def start(bot, update):
user = update.message.from_user
logger.debug("User %s typed /start." % user.name)
message = "Ciao, e benvenuto!"
message += "\nSono ancora in sviluppo, ecco la lista dei comandi attualmente disponibili:" \
"\n/exploretree Inizia ad esplorare gli alberi" \
"\n/help mostra la lista dei comandi disponibili"
bot.send_message(chat_id=update.message.chat_id, text=message)
def startInteraction(bot, update, chat_data):
user = update.message.from_user
logger.debug("User %s is starting the interaction." % user.name)
chat_data = {}
reply_keyboard = []
for k in availableClassifierName:
if 'isSuccessors' not in treeData[k]:
reply_keyboard.append([k])
reply_keyboard.append(['/cancel'])
update.message.reply_text('Ciao, scegli cosa vuoi che indovini.\n\n /cancel se vuoi terminare! ',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return INTERACT
def interactionManager(bot, update, chat_data):
chose = update.message.text
if chose in treeData:
chat_data['chose'] = chose
return interact(bot, update, chat_data, chose)
elif 'chose' in chat_data:
return interact(bot, update, chat_data, chat_data['chose'])
else:
bot.send_message(chat_id=update.message.chat_id, text="Scusa, ma non credo di disporre di questo dato...")
return startInteraction(bot, update, chat_data)
def interact(bot, update, chat_data, chose):
# Retrieve the data dictionary for tree interactionManager
if chose in chat_data:
data = chat_data[chose]
else:
data = deepcopy(treeData[chose])
chat_data[chose] = data
chat_data['step'] = 1 # 1 = ask question, 0 = process answer
if 'conversationHistory' not in chat_data:
chat_data['conversationHistory'] = {}
dt = treeData['dt' + chose]
while not data['__stop']:
toAsk = data['toAsk']
if data['step'] == 1:
if 'isSuccessors' in data and toAsk['feature'] in chat_data['conversationHistory']:
chat_data['step'] = 0
update.message.text = str(chat_data['conversationHistory'][toAsk['feature']])
if 'valueRange' in toAsk:
# IF the feature has numeric value within an interval:
if chat_data['step']:
question = data['questions'][toAsk['feature']] + "Range: " + str(toAsk['valueRange'])
update.message.reply_text(question, reply_markup=ReplyKeyboardRemove())
chat_data['step'] = 0
return INTERACT
else:
user_value_for_feature = convert(update.message.text.strip())
if toAsk['valueRange'][0] <= user_value_for_feature <= toAsk['valueRange'][1]:
chat_data['conversationHistory'][toAsk['feature']] = user_value_for_feature
data['step'] = 0
data['s'][toAsk['feature']] = user_value_for_feature
data = dt.classify_by_asking_questions(data['actualNode'], data)
chat_data['step'] = 1
else:
question = data['questions'][toAsk['feature']] + "Range: " + str(toAsk['valueRange'])
update.message.reply_text(question, reply_markup=ReplyKeyboardRemove())
return INTERACT
elif 'possibleAnswer' in toAsk:
# If the features has a symbolic value
if chat_data['step']:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
reply_keyboard = [[str(x) for x in data['featuresHumanization'][toAsk['feature']]]]
else:
reply_keyboard = [[str(x) for x in toAsk['possibleAnswer']]]
update.message.reply_text(data['questions'][toAsk['feature']],
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
chat_data['step'] = 0
return INTERACT
else:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
user_value_for_feature = convert(
data['featuresHumanization'][toAsk['feature']][update.message.text.strip()])
else:
user_value_for_feature = convert(update.message.text.strip())
if user_value_for_feature in toAsk['possibleAnswer']:
chat_data['conversationHistory'][toAsk['feature']] = user_value_for_feature
data['step'] = 0
data['toAsk']['givenAnswer'] = user_value_for_feature
data = dt.classify_by_asking_questions(data['actualNode'], data)
chat_data['step'] = 1
else:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
reply_keyboard = [[str(x) for x in data['featuresHumanization'][toAsk['feature']]]]
else:
reply_keyboard = [[str(x) for x in toAsk['possibleAnswer']]]
update.message.reply_text("Valore non valido!\n" + data['questions'][toAsk['feature']],
reply_markup=ReplyKeyboardMarkup(reply_keyboard,
one_time_keyboard=True))
return INTERACT
else:
logger.critical("Sono finito in uno stato morto...")
logger.critical("Albero: " + chat_data[chose])
logger.critical("Conversation Detal: \n" + str(chat_data['conversationHistory']))
del chat_data[chose], data, chat_data['chose'], chat_data['conversationHistory']
update.message.reply_text(
"Perdona, mi sono rotto un braccio! devo scappare in ospedale :("
"\nTi lascio con mio fratello, ma devi ricominciare.",
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
# update.message.reply_text("Ottimo! Ho trovato qualcosa!\n")
message = ""
classification = data['a']
del classification['solution_path']
which_classes = list(classification.keys())
which_classes = sorted(which_classes, key=lambda x: classification[x], reverse=True)
if classification[which_classes[0]] < 1:
message += "\nEcco la probabilità delle risposte, io sceglierei la prima ;)\n"
message += "\n " + str.ljust("Classe", 30) + "Probabilità"
message += "\n ---------- -----------"
for which_class in which_classes:
if which_class is not 'solution_path' and classification[which_class] > 0:
message += "\n " + str.ljust(getClassName(which_class), 30) + str(
round(classification[which_class], 2))
else:
if 'singleAnswer' in data['interaction']:
message += data['interaction']['singleAnswer'] + '\n'
else:
message += "\n\nSai cosa?, sono quasi sicuro che la risposta corretta sia "
if str(which_classes[0][5:]) in data['classHumanization']:
message += getClassName(data['classHumanization'][str(which_classes[0][5:])])
else:
message += getClassName(str(which_classes[0]))
# handling of connection among tree
if 'hasSuccessors' in data:
update.message.reply_text("Credo di essere sulla buona strada...\n")
chat_data['chose'] = data['successorsMap'][getClassName(which_classes[0])]
del data, chat_data[chose]
return interact(bot, update, chat_data, chat_data['chose'])
logger.debug("Conversation with :" + update.message.from_user.name)
logger.debug(str(chat_data['conversationHistory']))
message += "\nCosa vuoi fare?"
reply_keyboard = [['Ricomincia', 'Esci'], ] # ['Valuta la classificazione']]
update.message.reply_text(message, reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
del chat_data[chose], data, chat_data['chose'], chat_data['conversationHistory']
return CHOOSINGTREE
def main():
for name, v in datasets.items():
logging.info("Start training tree " + name)
print("Start training tree " + name)
data = init(v['dataset_name'], v['class_column'], v['data_columns'])
treeData['dt' + name] = deepcopy(data['dt'])
del data['dt']
treeData[name] = deepcopy(data)
# data['actualNode'].display_decision_tree(" ")
del data
logging.info("End training tree " + name)
print("End training tree " + name)
# computing connection among trees
for name, v in datasets.items():
if 'successorOf' in v:
treeData[name]['isSuccessors'] = True
treeData[v['successorOf']]['hasSuccessors'] = True
if 'successorsMap' in treeData[v['successorOf']]:
treeData[v['successorOf']]['successorsMap'][v['previousExitClass']] = name
else:
treeData[v['successorOf']]['successorsMap'] = {v['previousExitClass']: name}
for k in treeData.keys():
if not k.startswith('dt'):
availableClassifierName.append(k)
logging.info("Bot Starting...!")
updater = Updater(token=Telegram_BOTID)
dispatcher = updater.dispatcher
startHandler = CommandHandler(command='start', callback=start)
helpHandler = CommandHandler(command='help', callback=help)
settingsHandler = CommandHandler(command='settings', callback=settings)
adminIdentify = CommandHandler(command=AdminPassword, callback=imAdmin, pass_chat_data=True)
serverInfo = CommandHandler(command='getIP', callback=getServerInfo, pass_chat_data=True)
conv_handler = ConversationHandler(
entry_points=[CommandHandler('exploretree', startInteraction, pass_chat_data=True)],
states={
INTERACT: [ # RegexHandler('^(Animals)$', interactionManager, pass_chat_data=True),
MessageHandler(Filters.text, interactionManager, pass_chat_data=True)],
CHOOSINGTREE: [RegexHandler('^(Ricomincia)$', startInteraction, pass_chat_data=True),
RegexHandler('^(Esci)$', cancel, pass_chat_data=True),
RegexHandler('^(Valuta la classificazione)$', tbd, pass_chat_data=True)]
},
fallbacks=[CommandHandler('cancel', cancel, pass_chat_data=True),
MessageHandler(Filters.command, unknown)]
)
# echoHandler = MessageHandler(Filters.text, echo)
unknownCommandHandler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(adminIdentify)
dispatcher.add_handler(serverInfo)
dispatcher.add_handler(startHandler)
dispatcher.add_handler(helpHandler)
dispatcher.add_handler(settingsHandler)
dispatcher.add_handler(conv_handler)
# dispatcher.add_handler(echoHandler)
dispatcher.add_handler(unknownCommandHandler)
dispatcher.add_error_handler(error)
updater.start_polling()
logging.info("Bot Started!")
print("Bot Started correctly!")
updater.idle()
if __name__ == '__main__':
main()
|
giuva90/TreeBot
|
bot.py
|
Python
|
gpl-3.0
| 11,462 | 0.022596 |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
COHORTE configuration file parser: converts a parsed configuration file to
beans
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python standard library
import collections
import logging
import uuid
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Instantiate, \
Requires
# COHORTE constants
import cohorte
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Component to be instantiated
Component = collections.namedtuple(
'Component', ('factory', 'name', 'properties'))
# Bundle to be installed
Bundle = collections.namedtuple(
'Bundle', ('name', 'filename', 'properties', 'version', 'optional'))
# Simplest configuration possible
BootConfiguration = collections.namedtuple(
'BootConfiguration', ('bundles', 'composition', 'properties',
'environment', 'boot_args'))
# Boot configuration + Isolate basic description
Isolate = collections.namedtuple(
'Isolate', BootConfiguration._fields + ('name', 'kind', 'node',
'level', 'sublevel'))
def _recursive_namedtuple_convert(data):
"""
Recursively converts the named tuples in the given object to dictionaries
:param data: An object in a named tuple or its children
:return: The converted object
"""
if isinstance(data, list):
# List
return [_recursive_namedtuple_convert(item) for item in data]
elif hasattr(data, '_asdict'):
# Named tuple
dict_value = dict(data._asdict())
for key, value in dict_value.items():
dict_value[key] = _recursive_namedtuple_convert(value)
return dict_value
else:
# Standard object
return data
# ------------------------------------------------------------------------------
@ComponentFactory('cohorte-config-parser-factory')
@Provides(cohorte.SERVICE_CONFIGURATION_READER)
@Requires('_reader', cohorte.SERVICE_FILE_READER)
@Instantiate('cohorte-config-parser')
class BootConfigParser(object):
"""
Boot configuration parser
"""
def __init__(self):
"""
Sets up the members
"""
# File reader
self._reader = None
# Loaded isolates configurations
self._isolates = None
@staticmethod
def _parse_bundle(json_object):
"""
Reads the given JSON object and returns its Bundle representation
:param json_object: A parsed JSON object
:return: A Bundle object
:raise KeyError: A mandatory parameter is missing
"""
# Use a copy of the properties
properties = {}
json_properties = json_object.get('properties')
if json_properties:
properties.update(json_properties)
return Bundle(name=json_object['name'],
filename=json_object.get('file'),
properties=properties,
version=json_object.get('version'),
optional=json_object.get('optional', False))
def _parse_bundles(self, bundles):
"""
Parses the bundles in the given list. Returns an empty list if the
given one is None or empty.
:param bundles: A list of bundles representations
:return: A list of Bundle objects
:raise KeyError: A mandatory parameter is missing
"""
if not bundles:
return []
return [self._parse_bundle(bundle) for bundle in bundles]
@staticmethod
def _parse_component(json_object):
"""
Reads the given JSON object and returns its Component representation
:param json_object: A parsed JSON object
:return: A Component object
:raise KeyError: A mandatory parameter is missing
"""
# Mandatory values
factory = json_object['factory']
# Computed name (if needed)
name = json_object.get('name', factory + '-instance')
# Use a copy of the properties
properties = {}
json_properties = json_object.get('properties')
if json_properties:
properties.update(json_properties)
return Component(factory=factory, name=name, properties=properties)
def _parse_components(self, components):
"""
Parses the components in the given list. Returns an empty list if the
given one is None or empty.
:param components: A list of components representations
:return: A list of Component objects
:raise KeyError: A mandatory parameter is missing
"""
if not components:
return []
return [self._parse_component(component) for component in components]
def _parse_isolate(self, json_object):
"""
Reads the given JSON object and returns its Isolate representation
:param json_object: A parsed JSON object
:return: An Isolate object
:raise KeyError: A mandatory parameter is missing
"""
# Reuse the boot parser
boot_config = self.load_boot_dict(json_object)
return Isolate(name=json_object['name'],
kind=json_object['kind'],
level=json_object['level'],
sublevel=json_object['sublevel'],
# Reuse boot configuration values
**boot_config._asdict())
def _prepare_configuration(self, uid, name, kind,
bundles=None, composition=None,
base_configuration=None):
"""
Prepares and returns a configuration dictionary to be stored in the
configuration broker, to start an isolate of the given kind.
:param uid: The isolate UID
:param name: The isolate name
:param kind: The kind of isolate to boot
:param bundles: Extra bundles to install
:param composition: Extra components to instantiate
:param base_configuration: Base configuration (to override)
:return: A configuration dictionary
(updated base_configuration if given)
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
if isinstance(base_configuration, dict):
configuration = base_configuration
else:
configuration = {}
# Set up isolate properties
configuration['uid'] = uid \
or configuration.get('custom_uid') or str(uuid.uuid4())
configuration['name'] = name
configuration['kind'] = kind
# Boot configuration for this kind
new_boot = configuration.setdefault('boot', {})
new_boot.update(_recursive_namedtuple_convert(self.load_boot(kind)))
# Add bundles (or an empty list)
if bundles:
new_bundles = configuration.setdefault('bundles', [])
new_bundles.extend(_recursive_namedtuple_convert(
[self.normalize_bundle(bundle) for bundle in bundles]))
# Add components (or an empty list)
if composition:
new_compo = configuration.setdefault('composition', [])
new_compo.extend(_recursive_namedtuple_convert(composition))
# Return the configuration dictionary
return configuration
@staticmethod
def normalize_bundle(bundle):
"""
Make a Bundle object from the given Bundle-like object attributes,
using default values when necessary.
:param bundle: A Bundle-like object
:return: A Bundle object
:raise AttributeError: A mandatory attribute is missing
:raise ValueError: Invalid attribute value
"""
if isinstance(bundle, Bundle):
# Already a bundle
return bundle
# Bundle name is mandatory
name = bundle.name
if not name:
raise ValueError("A bundle must have a name: {0}".format(bundle))
# Get the filename
for fileattr in ('filename', 'file'):
filename = getattr(bundle, fileattr, None)
if filename:
break
# Normalize bundle properties
properties = getattr(bundle, 'properties', {})
if not isinstance(properties, dict):
properties = {}
# Normalize bundle version
version = getattr(bundle, 'version', None)
if version is not None:
version = str(version)
return Bundle(name, filename, properties, version,
getattr(bundle, 'optional', False))
def load_boot(self, kind):
"""
Loads the boot configuration for the given kind of isolate, or returns
the one in the cache.
:param kind: The kind of isolate to boot
:return: The loaded BootConfiguration object
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Prepare & store the bean representation
return self.load_boot_dict(self.load_conf_raw('boot', kind))
def load_conf_raw(self, level, kind):
"""
Loads the boot configuration for the given kind of isolate, or returns
the one in the cache.
:param level: The level of configuration (boot, java, python)
:param kind: The kind of isolate to boot
:return: The loaded BootConfiguration object
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Load the boot file
return self.read('{0}-{1}.js'.format(level, kind))
def load_boot_dict(self, dict_config):
"""
Parses a boot configuration from the given dictionary
:param dict_config: A configuration dictionary
:return: The parsed BootConfiguration object
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Use a copy of environment
environment = {}
json_env = dict_config.get('environment')
if json_env:
environment.update(json_env)
# Parse the properties
properties = {}
dict_properties = dict_config.get('properties')
if dict_properties:
properties.update(dict_properties)
# Prepare the bean representation
bundles = self._parse_bundles(dict_config.get('bundles'))
composition = self._parse_components(dict_config.get('composition'))
return BootConfiguration(bundles=bundles,
composition=composition,
boot_args=dict_config.get('boot_args'),
environment=environment,
properties=properties)
def prepare_isolate(self, uid, name, kind, level, sublevel,
bundles=None, composition=None):
"""
Prepares and returns a configuration dictionary to be stored in the
configuration broker, to start an isolate of the given kind.
:param uid: The isolate UID
:param name: The isolate name
:param kind: The kind of isolate to boot (pelix, osgi, ...)
:param level: The level of configuration (boot, java, python, ...)
:param sublevel: Category of configuration (monitor, isolate, ...)
:param bundles: Extra bundles to install
:param composition: Extra components to instantiate
:return: A configuration dictionary
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Load the isolate model file
configuration = self.load_conf_raw(level, sublevel)
try:
# Try to load the isolate-specific configuration
# without logging "file not found" errors
isolate_conf = self.read(name + ".js", False)
except IOError:
# Ignore I/O errors (file not found)
# Propagate ValueError (parsing errors)
pass
else:
# Merge the configurations: this method considers that the first
# parameter has priority on the second
configuration = self._reader.merge_object(isolate_conf,
configuration)
# Extend with the boot configuration
return self._prepare_configuration(uid, name, kind,
bundles, composition, configuration)
def read(self, filename, reader_log_error=True):
"""
Reads the content of the given file, without parsing it.
:param filename: A configuration file name
:param reader_log_error: If True, the reader will log I/O errors
:return: The dictionary read from the file
"""
return self._reader.load_file(filename, 'conf',
log_error=reader_log_error)
|
ahmadshahwan/cohorte-runtime
|
python/cohorte/config/parser.py
|
Python
|
apache-2.0
| 14,427 | 0 |
# -*- coding: utf-8 -*-
# usage:
# > python seoreporter/__init__.py [type] [format] [run_id]
# example:
# > python seoreporter/__init__.py build junit d09b8571-5c8a-42ff-8ab7-c38f4f8871c4
# todo
# output valid jUnit XML output
# output html files in a folder
# output html pages that show the data
# output json
import yaml
import time
import datetime
import os
import MySQLdb
start = None
def report(db, report_type, report_format, run_id):
global start
report_data = []
start = time.time()
# print [report_type, report_format, run_id]
if report_type == 'build':
report_data = build_report(db, run_id)
elif report_type == 'status_code':
report_data = status_code_report(db, run_id)
elif report_type == 'all':
report_data = all_report(db, run_id)
else:
raise Exception('Report type not supported')
if report_format == 'junit':
return junit_format(report_type, report_data, run_id)
elif report_format == 'csv':
return csv_format(report_type, report_data, run_id)
elif report_format == 'xls':
return xls_format(report_type, report_data, run_id)
elif report_format == 'sql':
return sql_format(report_type, report_data, run_id)
elif report_format == 'html_files':
return html_files_format(report_type, report_data, run_id)
else:
raise Exception('Report format not supported')
def fetch_latest_run_id(db):
run_id = None
c = db.cursor()
c.execute('SELECT run_id FROM crawl_urls ORDER BY timestamp DESC LIMIT 1')
result = c.fetchone()
if result:
run_id = result[0]
return run_id
def all_report(db, run_id):
c = db.cursor(MySQLdb.cursors.DictCursor)
c.execute('''SELECT
id, run_id, level, content_hash, address, domain, path, external,
status_code, status, body, size, address_length, encoding, content_type,
response_time, redirect_uri, canonical, title_1, title_length_1,
title_occurences_1, meta_description_1, meta_description_length_1,
meta_description_occurrences_1, h1_1, h1_length_1, h1_2, h1_length_2,
h1_count, meta_robots, rel_next, rel_prev, lint_critical, lint_error,
lint_warn, lint_info, lint_results, timestamp
FROM crawl_urls WHERE run_id = %s ORDER BY timestamp DESC''', [run_id])
return [{
'name': 'all',
'fields': [
'id', 'run_id', 'level', 'content_hash', 'address', 'domain', 'path', 'external',
'status_code', 'status', 'body', 'size', 'address_length', 'encoding', 'content_type',
'response_time', 'redirect_uri', 'canonical', 'title_1', 'title_length_1',
'title_occurences_1', 'meta_description_1', 'meta_description_length_1',
'meta_description_occurrences_1', 'h1_1', 'h1_length_1', 'h1_2', 'h1_length_2',
'h1_count', 'meta_robots', 'rel_next', 'rel_prev', 'lint_critical', 'lint_error',
'lint_warn', 'lint_info', 'lint_results', 'timestamp',
],
'values': c.fetchall(),
}]
def status_code_report(db, run_id):
output = []
# c = db.cursor()
c = db.cursor(MySQLdb.cursors.DictCursor)
# 500 errors
# TODO add other error codes
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND (status_code LIKE %s OR status_code = 0)
ORDER BY timestamp ASC''', (run_id, '5%',))
output.append({
'name': '5xx or 0 status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
# 404s
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND status_code LIKE %s
ORDER BY timestamp ASC''', (run_id, '4%',))
output.append({
'name': '4xx status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
return output
def build_report(db, run_id):
output = []
# c = db.cursor()
c = db.cursor(MySQLdb.cursors.DictCursor)
# 500 errors
# TODO add other error codes
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND (status_code LIKE %s OR status_code = 0)
ORDER BY timestamp ASC''', (run_id, '5%',))
output.append({
'name': '5xx or 0 status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
# 404s
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND status_code LIKE %s
ORDER BY timestamp ASC''', (run_id, '4%',))
output.append({
'name': '4xx status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
# missing canonicals
c.execute('''SELECT address, timestamp FROM crawl_urls
WHERE run_id = %s AND external = 0 AND content_type = 'text/html' AND canonical IS NULL
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'missing canonical',
'fields': ['address', 'timestamp'],
'values': c.fetchall(),
})
# missing titles
c.execute('''SELECT address, timestamp FROM crawl_urls
WHERE run_id = %s AND external = 0 AND content_type = 'text/html' AND title_1 IS NULL
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'missing title',
'fields': ['address', 'timestamp'],
'values': c.fetchall(),
})
# missing meta descriptions
c.execute('''SELECT address, timestamp FROM crawl_urls
WHERE run_id = %s AND external = 0 AND content_type = 'text/html' AND meta_description_1 IS NULL
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'missing meta_description',
'fields': ['address', 'timestamp'],
'values': c.fetchall(),
})
# lint level critical
c.execute('''SELECT address, timestamp, lint_critical FROM crawl_urls
WHERE run_id = %s AND external = 0 AND lint_critical > 0
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'lint level critical',
'fields': ['address', 'timestamp', 'lint_critical'],
'values': c.fetchall(),
})
# lint level error
c.execute('''SELECT address, timestamp, lint_error FROM crawl_urls
WHERE run_id = %s AND external = 0 AND lint_error > 0
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'lint level error',
'fields': ['address', 'timestamp', 'lint_error'],
'values': c.fetchall(),
})
return output
# junit schema:
# https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model\
# /src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd
def junit_format(report_type, tests, run_id):
global start
errors = 0
output = ''
def junit_row(values):
o = ''
for v in values:
o += '\t\t<error type="addresses">%s</error>\n' % str(v['address'])
return o
def junit_row_flat(values):
o = ''
# for v in values:
o += '\t\t<error type="addresses">%s</error>\n' % (", ".join([v['address'] for v in values]))
return o
for test in tests:
# header
output += '\t<testcase name="%s">\n' % (test['name'])
# values
if test['values'] and len(test['values']) > 0:
errors += len(test['values'])
# put everything in one element because jenkins ignores > 1
output += junit_row_flat(test['values'])
# footer
output += '\t</testcase>\n'
header = '''<?xml version="1.0" encoding="UTF-8"?>
<testsuite
name="seoreporter-%s"
tests="%s"
timestamp="%s"
time="%s"
errors="%s"
failures=""
id="%s"
package="seoreporter"
skipped="0">\n''' % (
report_type,
len(tests),
datetime.datetime.utcnow(),
time.time() - start,
errors,
run_id
)
return header + output + '</testsuite>'
def xls_format(report_type, tests, run_id):
output = '''<?xml version="1.0"?>
<Workbook xmlns="urn:schemas-microsoft-com:office:spreadsheet"
xmlns:o="urn:schemas-microsoft-com:office:office"
xmlns:x="urn:schemas-microsoft-com:office:excel"
xmlns:ss="urn:schemas-microsoft-com:office:spreadsheet"
xmlns:html="http://www.w3.org/TR/REC-html40">
<DocumentProperties xmlns="urn:schemas-microsoft-com:office:office">
<Version>14.0</Version>
</DocumentProperties>
<OfficeDocumentSettings xmlns="urn:schemas-microsoft-com:office:office">
<AllowPNG/>
</OfficeDocumentSettings>
<ExcelWorkbook xmlns="urn:schemas-microsoft-com:office:excel">
<WindowHeight>8000</WindowHeight>
<WindowWidth>15000</WindowWidth>
<WindowTopX>120</WindowTopX>
<WindowTopY>140</WindowTopY>
<ProtectStructure>False</ProtectStructure>
<ProtectWindows>False</ProtectWindows>
</ExcelWorkbook>
<Styles>
<Style ss:ID="Default" ss:Name="Normal">
<Alignment ss:Vertical="Bottom"/>
<Borders/>
<Font ss:FontName="Calibri" x:Family="Swiss" ss:Size="12" ss:Color="#000000"/>
<Interior/>
<NumberFormat/>
<Protection/>
</Style>
</Styles>'''
def xls_row(values):
o = ' <Row>\n'
for v in values:
o += ' <Cell>\n <Data ss:Type="String">' + str(v) + '</Data>\n </Cell>\n'
return o + ' </Row>\n'
for test in tests:
if test['values'] and len(test['values']) > 0:
# header
output += '\n <Worksheet ss:Name="%s"><Table ss:ExpandedColumnCount="%s" x:FullColumns="1" x:FullRows="1" ss:DefaultColumnWidth="150" ss:DefaultRowHeight="15">\n' % (test['name'].replace('_', ' ').title(), 2 + len(test['values'][0].keys()))
output += xls_row(['Run Id'] + [o.replace('_', ' ').title() for o in test['values'][0].keys()])
# values
for row in test['values']:
output += xls_row([run_id] + [str(v) for v in row.values()])
# footer
output += ''' </Table>
<WorksheetOptions xmlns="urn:schemas-microsoft-com:office:excel">
<PageLayoutZoom>0</PageLayoutZoom>
<ProtectObjects>False</ProtectObjects>
<ProtectScenarios>False</ProtectScenarios>
</WorksheetOptions>
</Worksheet>'''
output += '\n</Workbook>'
return output
def csv_format(report_type, tests, run_id):
output = ""
def csv_row(values):
return ",".join(values) + '\n'
for test in tests:
if test['values'] and len(test['values']) > 0:
# header
output += csv_row(['Run Id', 'Name'] + [o.replace('_', ' ').title() for o in test['values'][0].keys()])
# values
for row in test['values']:
output += csv_row([run_id, test['name'].replace('_', ' ').title()] + [str(v) for v in row.values()])
return output
def html_files_format(report_type, tests, run_id):
def file_row(path, content, folder_name="_seoreporter_html_", ext="html"):
path_name = path[:50].replace('http://', '').replace('https://', '').replace('/', '__')
file_name = folder_name + "/" + path_name + '.' + ext
if not os.path.exists(folder_name):
os.mkdir(folder_name)
with open(file_name, "w") as text_file:
text_file.write(content)
# print tests
for test in tests:
if test['values'] and len(test['values']) > 0:
for row in test['values']:
# print row
if row['content_type'] == 'text/html':
file_row(row['address'], row['body'])
return 'Done.'
def sql_format(report_type, tests, run_id):
output = ''
fields = [] # track all the column names used to build the CREATE TABLE
def sql_row(values, fields):
o = "INSERT INTO `seoreport` ("
# columns
o += ",".join(["`%s`" % (v) for v in fields])
o += ") VALUES ("
# values
o += ",".join([("'%s'" % (v) if v != 'NULL' else 'NULL') for v in values])
return o + ');\n'
for test in tests:
if test['values'] and len(test['values']) > 0:
fields += [o for o in test['values'][0].keys()]
for row in test['values']:
output += sql_row(
[run_id, test['name'].replace('_', ' ').title()] + [MySQLdb.escape_string(str(v)) for v in row.values()],
['run_id', 'report_type'] + [o for o in test['values'][0].keys()]
)
header = '''DROP TABLE IF EXISTS `seoreport`;
CREATE TABLE `seoreport` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`run_id` varchar(36) NOT NULL DEFAULT '',
`report_type` varchar(36) NOT NULL DEFAULT '',
'''
for v in set(fields): # dedupe them
header += ' `%s` varchar(2048) NOT NULL DEFAULT \'\',\n' % (v)
header += '''
PRIMARY KEY (`id`),
KEY `run_id` (`run_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n'''
return header + output
|
saymedia/seosuite
|
seoreporter/__init__.py
|
Python
|
mit
| 13,442 | 0.002232 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Thinkopen Brasil
# Copyright (C) Thinkopen Solutions Brasil (<http://www.tkobr.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'tko_point_of_sale_discount_cards',
'version': '0.032',
'description': 'This module applies selected discount on total',
'category': 'Customizations',
'sequence': 150,
'complexity': 'pos_customization',
'author': 'ThinkOpen Solutions Brasil',
'website': 'http://www.tkobr.com',
'images': ['images/oerp61.jpeg',
],
'depends': [
'point_of_sale',
'tko_point_of_sale_discount_on_order',
],
'data': [
'security/ir.model.access.csv',
'point_of_sale_view.xml',
'static/src/xml/pos.xml',
],
'qweb' : ['static/src/xml/discount.xml',],
'init': [],
'demo': [],
'update': [],
'test': [], #YAML files with tests
'installable': True,
'application': False,
'auto_install': False, #If it's True, the modules will be auto-installed when all dependencies are installed
'certificate': '',
}
|
bmya/tkobr-addons
|
tko_point_of_sale_discount_cards/__openerp__.py
|
Python
|
agpl-3.0
| 2,070 | 0.004348 |
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from robj.http import HTTPClient
from robj_test import robjhelp as testsuite
class HTTPClientTest(testsuite.TestCase):
def setUp(self):
testsuite.TestCase.setUp(self)
self.client = HTTPClient(self.server.geturi('/api/'), maxClients=1)
def testGET(self):
req = self.client.do_GET('/')
req.wait()
self.failUnlessEqual(self.getXML('/api'), req.response.content.read())
def testGETError(self):
req = self.client.do_GET('/foobar')
req.wait()
self.failUnlessEqual(req.response.status, 404)
def testPOST(self):
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/employees', employee1)
req.wait()
clientEmployee = req.response.content.read()
self.failUnlessEqual(clientEmployee, self.getXML('/api/employees/0'))
def testPOSTError(self):
raise testsuite.SkipTestException, ('disable until automated build '
'failures can be diagnosed')
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/', employee1)
req.wait()
self.failUnlessEqual(req.response.status, 501)
def testPUT(self):
# First post some data so that we can then update it.
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/employees', employee1)
req.wait()
xml = req.response.content.read()
# Change the employees name from Fred to Bob.
xml2 = xml.replace('Fred', 'Bob')
req2 = self.client.do_PUT('/employees/0', xml2)
req2.wait()
respxml = req2.response.content.read()
self.failUnlessEqual(xml2, respxml)
self.failUnlessEqual(respxml, self.getXML('/api/employees/0'))
def testPUTError(self):
raise testsuite.SkipTestException, ('disable until automated build '
'failures can be diagnosed')
req = self.client.do_GET('/')
req.wait()
xml = req.response.content.read()
xml2 = xml.replace('1.0', '2.0')
req2 = self.client.do_PUT('/', xml2)
req2.wait()
self.failUnlessEqual(req2.response.status, 501)
def testDELETE(self):
# First post some data so that we can then update it.
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/employees', employee1)
req.wait()
req2 = self.client.do_DELETE('/employees/0')
req2.wait()
self.failUnlessEqual(req2.response.status, 200)
req3 = self.client.do_DELETE('/employees/0')
req3.wait()
self.failUnlessEqual(req3.response.status, 404)
def testDELETEError(self):
req = self.client.do_DELETE('/')
req.wait()
self.failUnlessEqual(req.response.status, 501)
|
sassoftware/robj
|
robj_test/robjtest/httptest.py
|
Python
|
apache-2.0
| 3,456 | 0.000579 |
# Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sentinel.tests.functional import base
from sentinel.tests.functional import client_fixtures as fixtures
COMPUTE_CREATE_START_QUERY = [
{'field': 'event_type', 'op': 'eq', 'value': 'compute.instance.create.start'}
]
class MeteringV2MetersTestCase(base.BaseTestCase):
def test_meters_by_type(self):
grant = self.useFixture(fixtures.UserProjectGrant(self.sentinel))
client = base.FederatedUserClient(grant.user.entity, grant.project.entity)
server = self.useFixture(fixtures.Server(client))
samples = self.sentinel.metering.samples.list(meter_name='vcpus')
resources = [s.resource_id for s in samples]
self.assertIn(server.entity.id, resources)
#events = self.sentinel.metering.events.list(q=COMPUTE_CREATE_START_QUERY)
#instances = [t['value'] for e in events for t in e['traits'] if t['name'] == 'instance_id']
#self.assertIn(server.entity.id, instances)
# vi: ts=4 et:
|
spjmurray/openstack-sentinel
|
sentinel/tests/functional/metering/v2/test_meters.py
|
Python
|
apache-2.0
| 1,567 | 0.005105 |
"""Auto-generated file, do not edit by hand. 878 metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_878 = PhoneMetadata(id='001', country_code=878, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{11}', possible_number_pattern='\\d{12}', example_number='101234567890'),
fixed_line=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA', example_number='101234567890'),
mobile=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA', example_number='101234567890'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='10\\d{10}', possible_number_pattern='\\d{12}', example_number='101234567890'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{5})(\\d{5})', format='\\1 \\2 \\3')])
|
ayushgoel/FixGoogleContacts
|
phonenumbers/data/region_878.py
|
Python
|
mit
| 1,847 | 0.009746 |
from fprops import *
from pylab import *
import sys
#P = fluid('water','helmholtz');
#P = fluid('ammonia','pengrob');
P = fluid('carbondioxide','pengrob');
print "SOLVING TRIPLE POINT..."
print "Fluid: %s\nData source: %s" %(P.name, P.source)
try:
p_t, rhof_t, rhog_t = P.triple_point()
except RuntimeError,e:
print "failed to solve triple point"
sys.exit(1)
pmax = 100e6
Tmin = P.T_t
if Tmin == 0:
Tmin = 0.4 * P.T_c
Tmax = 2 * P.T_c
vmin = 1./rhof_t
vmax = 2./rhog_t
TT = linspace(Tmin, Tmax, 100);
vv = logspace(log10(vmin),log10(vmax), 100);
goodT = []
goodv = []
badT = []
badv = []
for T in TT:
sys.stderr.write("+++ T = %f\r" % (T))
for v in vv:
rho = 1./v
S = P.set_Trho(T,rho)
p = S.p
if p > pmax:
continue
h = S.h
#print " p = %f bar, h = %f kJ/kg" % (p/1e5,h/1e3)
if(h > 8000e3):
continue
try:
S = P.set_ph(p,h)
T1 = S.T
rho1 = S.rho
except ValueError,e:
print "ERROR %s at p = %f, h = %f (T = %.12e, rho = %.12e)" % (str(e),p, h,T,rho)
badT.append(T); badv.append(v)
continue
if isnan(T1) or isnan(rho1):
print "ERROR at T1 = %f, rho1 = %f (T = %.12e, rho = %.12e)" % (T1, rho1,T,rho)
badT.append(T); badv.append(v)
else:
goodT.append(T); goodv.append(v)
#print " +++ GOOD RESULT T1 = %f, rho1 = %f" % (T1, rho1)
figure()
print "i \tbad T \tbad v"
for i in range(len(badT)):
print "%d\t%e\t%e" % (i,badT[i], badv[i])
print "TOTAL %d BAD POINTS" % (len(badT))
print "AXIS =",axis()
semilogx(badv, badT, 'rx')
axis([vmin,vmax,Tmin,Tmax])
print "AXIS =",axis()
hold(1)
semilogx(goodv, goodT, 'g.')
# plot saturation curves
TTs = linspace(P.T_t, P.T_c, 300)
TT1 = []
vf1 = []
vg1 = []
for T in TTs:
try:
S = P.set_Tx(T,0)
p = S.p
rhof = S.rho
S = P.set_Tx(T,1)
rhog = S.rho
except:
continue;
TT1.append(T)
vf1.append(1./rhof)
vg1.append(1./rhog)
semilogx(vf1,TT1,"b-")
semilogx(vg1,TT1,"b-")
axis([vmin,vmax,Tmin,Tmax])
title("convergence of (p,h) solver for %s" % P.name)
xlabel("specific volume")
ylabel("temperature")
show()
ion()
|
georgyberdyshev/ascend
|
models/johnpye/fprops/python/solve_ph_array.py
|
Python
|
gpl-2.0
| 2,054 | 0.041383 |
# vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Windows
"""
from __future__ import absolute_import, print_function
from tests import common
from bleachbit.FileUtilities import extended_path, extended_path_undo
from bleachbit.Windows import *
from bleachbit import logger, FSE
import sys
import tempfile
import unittest
import platform
from decimal import Decimal
if 'win32' == sys.platform:
import _winreg
from win32com.shell import shell
def put_files_into_recycle_bin():
"""Put a file and a folder into the recycle bin"""
# make a file and move it to the recycle bin
import tempfile
tests = ('regular', u'unicode-emdash-u\u2014', 'long' + 'x' * 100)
for test in tests:
(fd, filename) = tempfile.mkstemp(
prefix='bleachbit-recycle-file', suffix=test)
os.close(fd)
move_to_recycle_bin(filename)
# make a folder and move it to the recycle bin
dirname = tempfile.mkdtemp(prefix='bleachbit-recycle-folder')
common.touch_file(os.path.join(dirname, 'file'))
move_to_recycle_bin(dirname)
@unittest.skipUnless('win32' == sys.platform, 'not running on windows')
class WindowsTestCase(common.BleachbitTestCase):
"""Test case for module Windows"""
def test_get_recycle_bin(self):
"""Unit test for get_recycle_bin"""
for f in get_recycle_bin():
self.assert_(os.path.exists(extended_path(f)), f)
if not common.destructive_tests('get_recycle_bin'):
return
put_files_into_recycle_bin()
# clear recycle bin
counter = 0
for f in get_recycle_bin():
counter += 1
FileUtilities.delete(f)
self.assert_(counter >= 3, 'deleted %d' % counter)
# now it should be empty
for f in get_recycle_bin():
self.fail('recycle bin should be empty, but it is not')
def test_delete_locked_file(self):
"""Unit test for delete_locked_file"""
tests = ('regular', u'unicode-emdash-u\u2014', 'long' + 'x' * 100)
for test in tests:
(fd, pathname) = tempfile.mkstemp(
prefix='bleachbit-delete-locked-file', suffix=test)
os.close(fd)
self.assert_(os.path.exists(pathname))
try:
delete_locked_file(pathname)
except pywintypes.error as e:
if 5 == e.winerror and not shell.IsUserAnAdmin():
pass
else:
raise
self.assert_(os.path.exists(pathname))
logger.info('reboot Windows and check the three files are deleted')
def test_delete_registry_key(self):
"""Unit test for delete_registry_key"""
# (return value, key, really_delete)
tests = ((False, 'HKCU\\Software\\BleachBit\\DoesNotExist', False, ),
(False, 'HKCU\\Software\\BleachBit\\DoesNotExist', True, ),
(True, 'HKCU\\Software\\BleachBit\\DeleteThisKey', False, ),
(True, 'HKCU\\Software\\BleachBit\\DeleteThisKey', True, ), )
# create a nested key
key = 'Software\\BleachBit\\DeleteThisKey'
subkey = key + '\\AndThisKey'
hkey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, subkey)
hkey.Close()
# test
for test in tests:
rc = test[0]
key = test[1]
really_delete = test[2]
return_value = delete_registry_key(key, really_delete)
self.assertEqual(rc, return_value)
if really_delete:
self.assertFalse(detect_registry_key(key))
# Test Unicode key. In BleachBit 0.7.3 this scenario would lead to
# the error (bug 537109)
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position
# 11: ordinal not in range(128)
key = r'Software\\BleachBit\\DeleteThisKey'
hkey = _winreg.CreateKey(
_winreg.HKEY_CURRENT_USER, key + r'\\AndThisKey-Ö')
hkey.Close()
return_value = delete_registry_key(u'HKCU\\' + key, True)
self.assertTrue(return_value)
return_value = delete_registry_key(u'HKCU\\' + key, True)
self.assertFalse(return_value)
def test_delete_registry_value(self):
"""Unit test for delete_registry_value"""
#
# test: value does exist
#
# create a name-value pair
key = 'Software\\BleachBit'
hkey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, key)
value_name = 'delete_this_value_name'
_winreg.SetValueEx(
hkey, value_name, 0, _winreg.REG_SZ, 'delete this value')
hkey.Close()
# delete and confirm
self.assertTrue(
delete_registry_value('HKCU\\' + key, value_name, False))
self.assertTrue(
delete_registry_value('HKCU\\' + key, value_name, True))
self.assertFalse(
delete_registry_value('HKCU\\' + key, value_name, False))
self.assertFalse(
delete_registry_value('HKCU\\' + key, value_name, True))
#
# test: value does not exist
#
self.assertFalse(delete_registry_value(
'HKCU\\' + key, 'doesnotexist', False))
self.assertFalse(delete_registry_value(
'HKCU\\' + key, 'doesnotexist', True))
self.assertFalse(delete_registry_value(
'HKCU\\doesnotexist', value_name, False))
self.assertFalse(delete_registry_value(
'HKCU\\doesnotexist', value_name, True))
def test_detect_registry_key(self):
"""Test for detect_registry_key()"""
self.assert_(detect_registry_key('HKCU\\Software\\Microsoft\\'))
self.assert_(not detect_registry_key('HKCU\\Software\\DoesNotExist'))
def test_get_autostart_path(self):
"""Unit test for get_autostart_path"""
pathname = get_autostart_path()
dirname = os.path.dirname(pathname)
self.assert_(os.path.exists(dirname),
'startup directory does not exist: %s' % dirname)
def test_get_known_folder_path(self):
"""Unit test for get_known_folder_path"""
version = platform.uname()[3][0:3]
ret = get_known_folder_path('LocalAppDataLow')
self.assertNotEqual(ret, '')
if version <= '6.0':
# Before Vista
self.assertEqual(ret, None)
return
# Vista or later
self.assertNotEqual(ret, None)
self.assert_(os.path.exists(ret))
def test_get_fixed_drives(self):
"""Unit test for get_fixed_drives"""
drives = []
for drive in get_fixed_drives():
drives.append(drive)
self.assertEqual(drive, drive.upper())
self.assert_("C:\\" in drives)
def test_get_windows_version(self):
"""Unit test for get_windows_version"""
v = get_windows_version()
self.assert_(v >= 5.1)
self.assert_(v > 5)
self.assert_(isinstance(v, Decimal))
def test_empty_recycle_bin(self):
"""Unit test for empty_recycle_bin"""
# check the function basically works
for drive in get_fixed_drives():
ret = empty_recycle_bin(drive, really_delete=False)
self.assert_(isinstance(ret, (int, long)))
if not common.destructive_tests('recycle bin'):
return
# check it deletes files for fixed drives
put_files_into_recycle_bin()
for drive in get_fixed_drives():
ret = empty_recycle_bin(drive, really_delete=True)
self.assert_(isinstance(ret, (int, long)))
# check it deletes files for all drives
put_files_into_recycle_bin()
ret = empty_recycle_bin(None, really_delete=True)
self.assert_(isinstance(ret, (int, long)))
# Repeat two for reasons.
# 1. Trying to empty an empty recycling bin can cause
# a 'catastrophic failure' error (handled in the function)
# 2. It should show zero bytes were deleted
for drive in get_fixed_drives():
ret = empty_recycle_bin(drive, really_delete=True)
self.assertEqual(ret, 0)
def test_file_wipe(self):
"""Unit test for file_wipe
There are more tests in testwipe.py
"""
from bleachbit.WindowsWipe import file_wipe
dirname = tempfile.mkdtemp(prefix='bleachbit-file-wipe')
filenames = ('short', 'long' + 'x' * 250, u'utf8-ɡælɪk')
for filename in filenames:
longname = os.path.join(dirname, filename)
logger.debug('file_wipe(%s)', longname)
def _write_file(longname, contents):
self.write_file(longname, contents)
return longname
import win32api
shortname = extended_path_undo(
win32api.GetShortPathName(extended_path(longname)))
self.assertExists(shortname)
return shortname
def _test_wipe(contents):
shortname = _write_file(longname, contents)
logger.debug('test_file_wipe(): filename length={}, shortname length ={}, contents length={}'.format(
len(longname), len(shortname), len(contents)))
if shell.IsUserAnAdmin():
# wiping requires admin privileges
file_wipe(shortname)
file_wipe(longname)
else:
with self.assertRaises(pywintypes.error):
file_wipe(shortname)
file_wipe(longname)
self.assertExists(shortname)
os.remove(extended_path(shortname))
self.assertNotExists(shortname)
# A small file that fits in MFT
_test_wipe('')
# requires wiping of extents
_test_wipe('secret' * 100000)
import shutil
shutil.rmtree(dirname, True)
if shell.IsUserAnAdmin():
logger.warning('You should also run test_file_wipe() without admin privileges.')
else:
logger.warning('You should also run test_file_wipe() with admin privileges.')
def test_is_process_running(self):
# winlogon.exe runs on Windows XP and Windows 7
# explorer.exe does not run on Appveyor
tests = ((True, 'winlogon.exe'),
(True, 'WinLogOn.exe'),
(False, 'doesnotexist.exe'))
for test in tests:
self.assertEqual(test[0],
is_process_running(test[1]),
'Expecting is_process_running(%s) = %s' %
(test[1], test[0]))
def test_setup_environment(self):
"""Unit test for setup_environment"""
setup_environment()
envs = ['commonappdata', 'documents', 'music', 'pictures', 'video',
'localappdata']
version = platform.uname()[3][0:3]
if version >= '6.0':
envs.append('localappdatalow')
for env in envs:
self.assert_(os.path.exists(os.environ[env].decode('utf8')))
def test_split_registry_key(self):
"""Unit test for split_registry_key"""
tests = (('HKCU\\Software', _winreg.HKEY_CURRENT_USER, 'Software'),
('HKLM\\SOFTWARE', _winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE'),
('HKU\\.DEFAULT', _winreg.HKEY_USERS, '.DEFAULT'))
for (input_key, expected_hive, expected_key) in tests:
(hive, key) = split_registry_key(input_key)
self.assertEqual(expected_hive, hive)
self.assertEqual(expected_key, key)
def test_start_with_computer(self):
"""Unit test for start_with_computer*"""
b = start_with_computer_check()
self.assert_(isinstance(b, bool))
# opposite setting
start_with_computer(not b)
two_b = start_with_computer_check()
self.assert_(isinstance(two_b, bool))
self.assertEqual(b, not two_b)
# original setting
start_with_computer(b)
three_b = start_with_computer_check()
self.assert_(isinstance(b, bool))
self.assertEqual(b, three_b)
def test_parse_windows_build(self):
"""Unit test for parse_windows_build"""
tests = (('5.1.2600', Decimal('5.1')),
('5.1', Decimal('5.1')),
('10.0.10240', 10),
('10.0', 10))
for test in tests:
self.assertEqual(parse_windows_build(test[0]), test[1])
# test for crash
parse_windows_build()
parse_windows_build(platform.version())
parse_windows_build(platform.uname()[3])
def test_path_on_network(self):
"""Unit test for path_on_network"""
self.assertFalse(path_on_network('c:\\bleachbit.exe'))
self.assertFalse(path_on_network('a:\\bleachbit.exe'))
self.assertTrue(path_on_network('\\\\Server\\Folder\\bleachbit.exe'))
def test_shell_change_notify(self):
"""Unit test for shell_change_notify"""
ret = shell_change_notify()
self.assertEqual(ret, 0)
|
brahmastra2016/bleachbit
|
tests/TestWindows.py
|
Python
|
gpl-3.0
| 13,915 | 0.000216 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1006230072.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/MODEL1006230072
|
MODEL1006230072/model.py
|
Python
|
cc0-1.0
| 427 | 0.009368 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of hr_timesheet_no_closed_project_task,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# hr_timesheet_invoice_hide_to_invoice is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# hr_timesheet_invoice_hide_to_invoice is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with hr_timesheet_no_closed_project_task.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
PROJECT_SELECTION = [('template', 'Template'),
('draft', 'New'),
('open', 'In Progress'),
('cancelled', 'Cancelled'),
('pending', 'Pending'),
('close', 'Closed')]
class ProjectTask(models.Model):
_inherit = 'project.task'
stage_closed = fields.Boolean(related='stage_id.closed', string='Closed',
readonly=True)
project_state = fields.Selection(PROJECT_SELECTION,
related='project_id.state',
string='Project State',
readonly=True)
|
acsone/acsone-addons
|
hr_timesheet_no_closed_project_task/models/project_task.py
|
Python
|
agpl-3.0
| 1,843 | 0 |
#!/usr/bin/env python
# encoding: utf-8
from django.contrib.auth.decorators import (
permission_required as original_permission_required,
login_required as original_login_required,)
from keyauth.decorators import key_required as original_key_required
from functools import wraps
class DjangoToYardDecorator(object):
'''
Adapt django's decorators to yard resources
'''
def __init__(self, func):
self.original_decorator = func
def __call__(self, *args, **kwargs):
def decorator(func):
@wraps(func)
def wrapper(klass, request, *rargs, **rkwargs):
def func_wrapper(request, *a, **k):
return func(klass, request, *rargs, **rkwargs)
original_decorator = self.original_decorator(*args, **kwargs)
return original_decorator(func_wrapper)(
request, *rargs, **rkwargs)
return wrapper
return decorator
def login_required(*args, **kwargs):
'''
Check if user is authenticated
'''
return DjangoToYardDecorator( original_login_required )(*args, **kwargs)
def permission_required(*args, **kwargs):
'''
Check if user has permissions
'''
return DjangoToYardDecorator(original_permission_required)(*args, **kwargs)
def key_required(*args, **kwargs):
'''
Check key for access
'''
return DjangoToYardDecorator( original_key_required )(*args, **kwargs)
|
laginha/yard
|
src/yard/resources/decorators/adapted.py
|
Python
|
mit
| 1,471 | 0.004759 |
import os
import errno
class LockError (Exception):
pass
class LockIsLocked (LockError):
pass
class LockIsUnlocked (LockError):
pass
class Lock (object):
def __init__ (self, path):
self.path = path
self.locked = False
def acquire (self):
if self.locked:
raise LocksLocked()
try:
os.mkdir(self.path)
self.locked = True
except OSError, detail:
if detail.errno == errno.EEXIST:
raise LockIsLocked()
else:
raise
def release (self):
if not self.locked:
raise LockIsUnlocked()
try:
os.rmdir(self.path)
self.locked = False
except OSError, detail:
if detail.errno == errno.ENOENT:
raise LockIsUnlocked()
else:
raise
def __del__ (self):
if self.locked:
self.release()
|
larsks/gitblogger
|
gitblogger/lock.py
|
Python
|
gpl-3.0
| 966 | 0.009317 |
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
# @author: Dave Tucker, Hewlett-Packard Development Company L.P.
import time
from oslo.config import cfg
import requests
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
ODL_NETWORK = 'network'
ODL_NETWORKS = 'networks'
ODL_SUBNET = 'subnet'
ODL_SUBNETS = 'subnets'
ODL_PORT = 'port'
ODL_PORTS = 'ports'
not_found_exception_map = {ODL_NETWORKS: n_exc.NetworkNotFound,
ODL_SUBNETS: n_exc.SubnetNotFound,
ODL_PORTS: n_exc.PortNotFound}
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
def try_del(d, keys):
"""Ignore key errors when deleting from a dictionary."""
for key in keys:
try:
del d[key]
except KeyError:
pass
class JsessionId(requests.auth.AuthBase):
"""Attaches the JSESSIONID and JSESSIONIDSSO cookies to an HTTP Request.
If the cookies are not available or when the session expires, a new
set of cookies are obtained.
"""
def __init__(self, url, username, password):
"""Initialization function for JsessionId."""
# NOTE(kmestery) The 'limit' paramater is intended to limit how much
# data is returned from ODL. This is not implemented in the Hydrogen
# release of OpenDaylight, but will be implemented in the Helium
# timeframe. Hydrogen will silently ignore this value.
self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1'
self.username = username
self.password = password
self.auth_cookies = None
self.last_request = None
self.expired = None
self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60
self.session_deadline = 0
def obtain_auth_cookies(self):
"""Make a REST call to obtain cookies for ODL authenticiation."""
r = requests.get(self.url, auth=(self.username, self.password))
r.raise_for_status()
jsessionid = r.cookies.get('JSESSIONID')
jsessionidsso = r.cookies.get('JSESSIONIDSSO')
if jsessionid and jsessionidsso:
self.auth_cookies = dict(JSESSIONID=jsessionid,
JSESSIONIDSSO=jsessionidsso)
def __call__(self, r):
"""Verify timestamp for Tomcat session timeout."""
if time.time() > self.session_deadline:
self.obtain_auth_cookies()
self.session_deadline = time.time() + self.session_timeout
r.prepare_cookies(self.auth_cookies)
return r
class OpenDaylightMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for OpenDaylight.
This driver was a port from the Tail-F NCS MechanismDriver. The API
exposed by ODL is slightly different from the API exposed by NCS,
but the general concepts are the same.
"""
auth = None
out_of_sync = True
def initialize(self):
self.url = cfg.CONF.ml2_odl.url
self.timeout = cfg.CONF.ml2_odl.timeout
self.username = cfg.CONF.ml2_odl.username
self.password = cfg.CONF.ml2_odl.password
self.auth = JsessionId(self.url, self.username, self.password)
self.vif_type = portbindings.VIF_TYPE_OVS
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
# Postcommit hooks are used to trigger synchronization.
def create_network_postcommit(self, context):
self.synchronize('create', ODL_NETWORKS, context)
def update_network_postcommit(self, context):
self.synchronize('update', ODL_NETWORKS, context)
def delete_network_postcommit(self, context):
self.synchronize('delete', ODL_NETWORKS, context)
def create_subnet_postcommit(self, context):
self.synchronize('create', ODL_SUBNETS, context)
def update_subnet_postcommit(self, context):
self.synchronize('update', ODL_SUBNETS, context)
def delete_subnet_postcommit(self, context):
self.synchronize('delete', ODL_SUBNETS, context)
def create_port_postcommit(self, context):
self.synchronize('create', ODL_PORTS, context)
def update_port_postcommit(self, context):
self.synchronize('update', ODL_PORTS, context)
def delete_port_postcommit(self, context):
self.synchronize('delete', ODL_PORTS, context)
def synchronize(self, operation, object_type, context):
"""Synchronize ODL with Neutron following a configuration change."""
if self.out_of_sync:
self.sync_full(context)
else:
self.sync_object(operation, object_type, context)
def filter_create_network_attributes(self, network, context, dbcontext):
"""Filter out network attributes not required for a create."""
try_del(network, ['status', 'subnets'])
def filter_create_subnet_attributes(self, subnet, context, dbcontext):
"""Filter out subnet attributes not required for a create."""
pass
def filter_create_port_attributes(self, port, context, dbcontext):
"""Filter out port attributes not required for a create."""
self.add_security_groups(context, dbcontext, port)
# TODO(kmestery): Converting to uppercase due to ODL bug
# https://bugs.opendaylight.org/show_bug.cgi?id=477
port['mac_address'] = port['mac_address'].upper()
try_del(port, ['status'])
def sync_resources(self, resource_name, collection_name, resources,
context, dbcontext, attr_filter):
"""Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.
"""
to_be_synced = []
for resource in resources:
try:
urlpath = collection_name + '/' + resource['id']
self.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
attr_filter(resource, context, dbcontext)
to_be_synced.append(resource)
key = resource_name if len(to_be_synced) == 1 else collection_name
# 400 errors are returned if an object exists, which we ignore.
self.sendjson('post', collection_name, {key: to_be_synced}, [400])
@utils.synchronized('odl-sync-full')
def sync_full(self, context):
"""Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thead in here at a time.
"""
if not self.out_of_sync:
return
dbcontext = context._plugin_context
networks = context._plugin.get_networks(dbcontext)
subnets = context._plugin.get_subnets(dbcontext)
ports = context._plugin.get_ports(dbcontext)
self.sync_resources(ODL_NETWORK, ODL_NETWORKS, networks,
context, dbcontext,
self.filter_create_network_attributes)
self.sync_resources(ODL_SUBNET, ODL_SUBNETS, subnets,
context, dbcontext,
self.filter_create_subnet_attributes)
self.sync_resources(ODL_PORT, ODL_PORTS, ports,
context, dbcontext,
self.filter_create_port_attributes)
self.out_of_sync = False
def filter_update_network_attributes(self, network, context, dbcontext):
"""Filter out network attributes for an update operation."""
try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
def filter_update_subnet_attributes(self, subnet, context, dbcontext):
"""Filter out subnet attributes for an update operation."""
try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
'allocation_pools', 'tenant_id'])
def filter_update_port_attributes(self, port, context, dbcontext):
"""Filter out port attributes for an update operation."""
self.add_security_groups(context, dbcontext, port)
try_del(port, ['network_id', 'id', 'status', 'mac_address',
'tenant_id', 'fixed_ips'])
create_object_map = {ODL_NETWORKS: filter_create_network_attributes,
ODL_SUBNETS: filter_create_subnet_attributes,
ODL_PORTS: filter_create_port_attributes}
update_object_map = {ODL_NETWORKS: filter_update_network_attributes,
ODL_SUBNETS: filter_update_subnet_attributes,
ODL_PORTS: filter_update_port_attributes}
def sync_single_resource(self, operation, object_type, obj_id,
context, attr_filter_create, attr_filter_update):
"""Sync over a single resource from Neutron to OpenDaylight.
Handle syncing a single operation over to OpenDaylight, and correctly
filter attributes out which are not required for the requisite
operation (create or update) being handled.
"""
dbcontext = context._plugin_context
if operation == 'create':
urlpath = object_type
method = 'post'
else:
urlpath = object_type + '/' + obj_id
method = 'put'
try:
obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1])
resource = obj_getter(dbcontext, obj_id)
except not_found_exception_map[object_type]:
LOG.debug(_('%(object_type)s not found (%(obj_id)s)'),
{'object_type': object_type.capitalize(),
'obj_id': obj_id})
else:
if operation == 'create':
attr_filter_create(self, resource, context, dbcontext)
elif operation == 'update':
attr_filter_update(self, resource, context, dbcontext)
try:
# 400 errors are returned if an object exists, which we ignore.
self.sendjson(method, urlpath, {object_type[:-1]: resource},
[400])
except Exception:
with excutils.save_and_reraise_exception():
self.out_of_sync = True
def sync_object(self, operation, object_type, context):
"""Synchronize the single modified record to ODL."""
obj_id = context.current['id']
self.sync_single_resource(operation, object_type, obj_id, context,
self.create_object_map[object_type],
self.update_object_map[object_type])
def add_security_groups(self, context, dbcontext, port):
"""Populate the 'security_groups' field with entire records."""
groups = [context._plugin.get_security_group(dbcontext, sg)
for sg in port['security_groups']]
port['security_groups'] = groups
def sendjson(self, method, urlpath, obj, ignorecodes=[]):
"""Send json to the OpenDaylight controller."""
headers = {'Content-Type': 'application/json'}
data = jsonutils.dumps(obj, indent=2) if obj else None
if self.url:
url = '/'.join([self.url, urlpath])
LOG.debug(_('ODL-----> sending URL (%s) <-----ODL') % url)
LOG.debug(_('ODL-----> sending JSON (%s) <-----ODL') % obj)
r = requests.request(method, url=url,
headers=headers, data=data,
auth=self.auth, timeout=self.timeout)
# ignorecodes contains a list of HTTP error codes to ignore.
if r.status_code in ignorecodes:
return
r.raise_for_status()
def bind_port(self, context):
LOG.debug(_("Attempting to bind port %(port)s on "
"network %(network)s"),
{'port': context.current['id'],
'network': context.network.current['id']})
for segment in context.network.network_segments:
if self.check_segment(segment):
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details)
LOG.debug(_("Bound using segment: %s"), segment)
return
else:
LOG.debug(_("Refusing to bind port for segment ID %(id)s, "
"segment %(seg)s, phys net %(physnet)s, and "
"network type %(nettype)s"),
{'id': segment[api.ID],
'seg': segment[api.SEGMENTATION_ID],
'physnet': segment[api.PHYSICAL_NETWORK],
'nettype': segment[api.NETWORK_TYPE]})
def validate_port_binding(self, context):
if self.check_segment(context.bound_segment):
LOG.debug(_('Binding valid.'))
return True
LOG.warning(_("Binding invalid for port: %s"), context.current)
def unbind_port(self, context):
LOG.debug(_("Unbinding port %(port)s on "
"network %(network)s"),
{'port': context.current['id'],
'network': context.network.current['id']})
def check_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
constants.TYPE_VXLAN]
|
Juniper/contrail-dev-neutron
|
neutron/plugins/ml2/drivers/mechanism_odl.py
|
Python
|
apache-2.0
| 15,181 | 0 |
# -*- coding: utf-8 -*-
#
# Cheroke-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import validations
from util import *
from consts import *
URL_APPLY = '/plugin/error_redir/apply'
REDIRECTION_TYPE = [
('0', N_('Internal')),
('1', N_('External'))
]
VALIDATIONS = [
('new_redir', validations.is_path)
]
NOTE_ERROR = N_('HTTP Error to match.')
NOTE_REDIR = N_('Target to access whenever the HTTP Error occurs.')
NOTE_TYPE = N_('Whether the redirection should be Internal or External.')
def commit():
# New entry
key = CTK.post.pop('key')
new_error = CTK.post.pop('new_error')
new_redir = CTK.post.pop('new_redir')
new_type = CTK.post.pop('new_type')
if key and new_error and new_redir and new_type:
CTK.cfg['%s!%s!url' %(key, new_error)] = new_redir
CTK.cfg['%s!%s!show'%(key, new_error)] = new_type
return CTK.cfg_reply_ajax_ok()
# Modification
return CTK.cfg_apply_post()
def sorting_func (x,y):
if x == y == 'default':
return 0
if x == 'default':
return 1
if y == 'default':
return -1
return cmp(int(x), int(y))
class Content (CTK.Container):
def __init__ (self, refreshable, key, url_apply, **kwargs):
CTK.Container.__init__ (self, **kwargs)
# List
entries = CTK.cfg.keys(key)
entries.sort (sorting_func)
if entries:
table = CTK.Table({'id': 'error-redirection'})
table.set_header(1)
table += [CTK.RawHTML(x) for x in ('Error', 'Redirection', 'Type', '')]
for i in entries:
show = CTK.ComboCfg ('%s!%s!show'%(key,i), trans_options(REDIRECTION_TYPE))
redir = CTK.TextCfg ('%s!%s!url'%(key,i), False)
rm = CTK.ImageStock('del')
table += [CTK.RawHTML(i), redir, show, rm]
rm.bind ('click', CTK.JS.Ajax (url_apply,
data = {"%s!%s"%(key,i): ''},
complete = refreshable.JS_to_refresh()))
submit = CTK.Submitter (url_apply)
submit += table
self += submit
# Add new
redir_codes = [('default', _('Default Error'))]
redir_codes += filter (lambda x: not x[0] in entries, ERROR_CODES)
table = CTK.PropsTable()
table.Add (_('Error'), CTK.ComboCfg('new_error', redir_codes, {'class':'noauto'}), _(NOTE_ERROR))
table.Add (_('Redirection'), CTK.TextCfg ('new_redir', False, {'class':'noauto'}), _(NOTE_REDIR))
table.Add (_('Type'), CTK.ComboCfg('new_type', trans_options(REDIRECTION_TYPE), {'class':'noauto'}), _(NOTE_TYPE))
submit = CTK.Submitter(url_apply)
dialog = CTK.Dialog({'title': _('Add New Custom Error'), 'width': 540})
dialog.AddButton (_("Close"), 'close')
dialog.AddButton (_("Add"), submit.JS_to_submit())
submit += table
submit += CTK.HiddenField ({'name': 'key', 'value': key})
submit.bind ('submit_success', refreshable.JS_to_refresh())
submit.bind ('submit_success', dialog.JS_to_close())
dialog += submit
self += dialog
add_new = CTK.Button(_('Add New'))
add_new.bind ('click', dialog.JS_to_show())
self += add_new
class Plugin_error_redir (CTK.Plugin):
def __init__ (self, key, vsrv_num):
CTK.Plugin.__init__ (self, key)
url_apply = '%s/%s' %(URL_APPLY, vsrv_num)
# Content
refresh = CTK.Refreshable ({'id': 'plugin_error'})
refresh.register (lambda: Content(refresh, key, url_apply).Render())
self += CTK.Indenter (refresh)
# Validation, and Public URLs
CTK.publish ('^%s/[\d]+'%(URL_APPLY), commit, method="POST", validation=VALIDATIONS)
|
lmcro/webserver
|
admin/plugins/error_redir.py
|
Python
|
gpl-2.0
| 4,572 | 0.012905 |
"""
Bootstrapping script that create a basic Pimlico setup, either for an existing config file, or for a new project.
Distribute this with your Pimlico project code. You don't need to distribute Pimlico itself
with your project, since it can be downloaded later. Just distribute a directory tree containing your config files,
your own code and this Python script, which will fetch everything else it needs.
Another use is to get a whole new project up and running. Use the `newproject.py` script for that purpose, which
calls this script.
"""
from __future__ import print_function
import os
import sys
from io import open
# Provide simply Py2-3 compatibility without requiring other libraries
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
import tarfile
import json
RAW_URL = "https://raw.githubusercontent.com/markgw/pimlico/"
DOWNLOAD_URL = "https://github.com/markgw/pimlico/archive/"
GIT_URL = "https://github.com/markgw/pimlico.git"
GITHUB_API = "https://api.github.com"
def lookup_pimlico_versions():
# Use Github API to find all tagged releases
tag_api_url = "%s/repos/markgw/pimlico/tags" % GITHUB_API
try:
tag_response = urlopen(tag_api_url).read().decode("utf-8")
except Exception as e:
print("Could not fetch Pimlico release tags from {}: {}".format(tag_api_url, e))
sys.exit(1)
tag_data = json.loads(tag_response)
return [tag["name"] for tag in reversed(tag_data)]
def lookup_bleeding_edge(branch_url):
release_url = "{}admin/release.txt".format(branch_url)
try:
release_data = urlopen(release_url).read().decode("utf-8")
except Exception as e:
print("Could not fetch Pimlico release from {}: {}".format(release_url, e))
sys.exit(1)
return release_data.splitlines()[-1].lstrip("v")
def find_config_value(config_path, key, start_in_pipeline=False):
with open(config_path, "r", encoding="utf-8") as f:
in_pipeline = start_in_pipeline
for line in f:
line = line.strip("\n ")
if in_pipeline and line:
# Look for the required key in the pipeline section
line_key, __, line_value = line.partition("=")
if line_key.strip() == key:
return line_value.strip()
elif line.startswith("["):
# Section heading
# Start looking for keys if we're in the pipeline section
in_pipeline = line.strip("[]") == "pipeline"
elif line.upper().startswith("%% INCLUDE"):
# Found include directive: follow into the included file
filename = line[10:].strip()
# Get filename relative to current config file
filename = os.path.join(os.path.dirname(config_path), filename)
found_value = find_config_value(filename, key, start_in_pipeline=in_pipeline)
if found_value is not None:
return found_value
# Didn't find the key anywhere
return
def extract(tar_path):
extract_path = os.path.dirname(tar_path)
with tarfile.open(tar_path, "r:gz") as tar:
for item in tar:
tar.extract(item, extract_path)
def tar_dirname(tar_path):
with tarfile.open(tar_path, "r:gz") as tar:
# Expect first member to be a directory
member = tar.next()
if not member.isdir():
raise ValueError("downloaded tar file was expected to contain a directory, but didn't")
return member.name
def symlink(source, link_name):
"""
Symlink creator that works on Windows.
"""
os_symlink = getattr(os, "symlink", None)
if callable(os_symlink):
os_symlink(source, link_name)
else:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(source) else 0
if csl(link_name, source, flags) == 0:
raise ctypes.WinError()
def bootstrap(config_file, git=False):
current_dir = os.path.abspath(os.path.dirname(__file__))
branch_name = git if type(git) is str else "master"
branch_url = "{}{}/".format(RAW_URL, branch_name)
if os.path.exists(os.path.join(current_dir, "pimlico")):
print("Pimlico source directory already exists: delete it if you want to fetch again")
sys.exit(1)
# Check the config file to find the version of Pimlico we need
version = find_config_value(config_file, "release")
if version is None:
print("Could not find Pimlico release in config file %s" % config_file)
sys.exit(1)
major_version = int(version.partition(".")[0])
print("Config file requires Pimlico version {}".format(version))
available_releases = lookup_pimlico_versions()
bleeding_edge = lookup_bleeding_edge(branch_url)
tags = available_releases
# If the bleeding edge version is compatible (same major version) just use that
if int(bleeding_edge.lstrip("v").partition(".")[0]) == major_version:
print("Bleeding edge ({}) is compatible".format(bleeding_edge))
fetch_release = "master"
else:
if git:
print("Error: tried to clone the Git repo instead of fetching a release, but config file is not " \
"compatible with latest Pimlico version")
sys.exit(1)
# Find the latest release that has the same major version
compatible_tags = [t for t in tags if int(t.lstrip("v").partition(".")[0]) == major_version]
fetch_release = compatible_tags[-1]
print("Fetching latest release of major version {}, which is {}".format(major_version, fetch_release))
if git:
# Clone the latest version of the code from the Git repository
# Allow the git kwarg to name a branch to clone
if type(git) is str:
args = "--branch {} ".format(git)
else:
args = ""
print("Cloning git repository ({})".format("{} branch".format(git) if type(git) is str else "master"))
import subprocess
subprocess.check_call("git clone {}{}".format(args, GIT_URL), shell=True)
else:
archive_url = "%s%s.tar.gz" % (DOWNLOAD_URL, fetch_release)
print("Downloading Pimlico source code from {}".format(archive_url))
tar_download_path = os.path.join(current_dir, "archive.tar.gz")
with open(tar_download_path, "wb") as archive_file:
archive_file.write(urlopen(archive_url).read())
print("Extracting source code")
extracted_dirname = tar_dirname(tar_download_path)
extract(tar_download_path)
# Extracted source code: remove the archive
os.remove(tar_download_path)
os.rename(os.path.join(current_dir, extracted_dirname), os.path.join(current_dir, "pimlico"))
print("Pimlico source (%s) is now available in directory pimlico/" % fetch_release)
# Create symlink to pimlico.sh, so it's easier to run
print("Creating symlink pimlico.sh for running Pimlico")
symlink(os.path.join("pimlico", "bin", "pimlico.sh"), "pimlico.sh")
if __name__ == "__main__":
args = sys.argv[1:]
if "--git" in args:
args.remove("--git")
git = True
else:
git = False
if len(args) == 0:
print("Usage:")
print(" python bootstrap.py [--git] <config_file>")
print()
print("Specify a Pimlico config file to set up Pimlico for")
print("If you want to start a new project, with an empty config file, use the newproject.py script")
print()
print("If you specify --git, Pimlico will be cloned as a Git repository, rather ")
print("than downloaded from a release. This only works on Linux and requires that Git is ")
print("installed. Most of the time, you don't want to do this: it's only for Pimlico development")
sys.exit(1)
else:
config_file = os.path.abspath(args[0])
bootstrap(config_file, git=git)
|
markgw/pimlico
|
admin/bootstrap.py
|
Python
|
gpl-3.0
| 8,150 | 0.003067 |
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
from distutils.sysconfig import get_python_version, get_python_lib
except ImportError:
from sysconfig import get_python_version
from distutils.sysconfig import get_python_lib
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(get_python_lib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = walker.next()
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe)<>flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
f = open(filename,'rb'); f.read(8) # skip magic & date
try:
code = marshal.load(f); f.close()
except ValueError:
f.seek(0); f.read(12) # skip magic & date & file size; file size added in Python 3.3
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if sys.version_info > (3, 3):
return False # Can't scan recent formats
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
|
mayankcu/Django-social
|
venv/Lib/site-packages/distribute-0.6.28-py2.7.egg/setuptools/command/bdist_egg.py
|
Python
|
bsd-3-clause
| 18,594 | 0.006023 |
import sys
from tempfile import NamedTemporaryFile, TemporaryFile, mktemp
import os
from numpy import memmap
from numpy import arange, allclose, asarray
from numpy.testing import *
class TestMemmap(TestCase):
def setUp(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3,4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def tearDown(self):
self.tmpfp.close()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
def test_open_with_filename(self):
tmpname = mktemp('','mmap')
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
os.unlink(tmpname)
def test_unnamed_file(self):
f = TemporaryFile()
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
f.close()
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
self.assertEqual(offset, fp.offset)
self.assertEqual(mode, fp.mode)
del fp
def test_filename(self):
tmpname = mktemp('','mmap')
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = os.path.abspath(tmpname)
fp[:] = self.data[:]
self.assertEqual(abspath, fp.filename)
b = fp[:1]
self.assertEqual(abspath, b.filename)
del b
del fp
os.unlink(tmpname)
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
self.assertEqual(fp.filename, self.tmpfp.name)
@dec.knownfailureif(sys.platform=='gnu0', "This test is known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def test_arithmetic_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert tmp._mmap is not fp._mmap
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[[(1, 2), (2, 3)]]
if isinstance(tmp, memmap):
assert tmp._mmap is not fp._mmap
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert fp[:2, :2]._mmap is fp._mmap
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert(new1.base is fp)
assert(new2.base is fp)
new_array = asarray(fp)
assert(new_array.base is fp)
if __name__ == "__main__":
run_module_suite()
|
mbalasso/mynumpy
|
numpy/core/tests/test_memmap.py
|
Python
|
bsd-3-clause
| 4,069 | 0.002949 |
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
srid = 27700
council_id = 'W06000021'
districts_name = 'polling_district'
stations_name = 'polling_station.shp'
elections = [
'local.monmouthshire.2017-05-04',
'parl.2017-06-08'
]
def district_record_to_dict(self, record):
return {
'internal_council_id': str(record[1]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': record[3]
}
def station_record_to_dict(self, record):
station = {
'internal_council_id': record[0],
'postcode' : '',
'address' : "%s\n%s" % (record[2].strip(), record[4].strip()),
}
if str(record[1]).strip() == '10033354925':
"""
There is a dodgy point in this file.
It has too many digits for a UK national grid reference.
Joe queried, Monmouthshire provided this corrected point by email
"""
station['location'] = Point(335973, 206322, srid=27700)
return station
|
chris48s/UK-Polling-Stations
|
polling_stations/apps/data_collection/management/commands/import_monmouthshire.py
|
Python
|
bsd-3-clause
| 1,244 | 0.004019 |
import numpy as np, time, itertools
from collections import OrderedDict
from .misc_utils import *
from . import distributions
concat = np.concatenate
import theano.tensor as T, theano
from importlib import import_module
import scipy.optimize
from .keras_theano_setup import floatX, FNOPTS
from keras.layers.core import Layer
from .filters import *
from .filtered_env import *
import random
import copy
import opensim as osim
from osim.env import *
# ================================================================
# Make agent
# ================================================================
def get_agent_cls(name):
p, m = name.rsplit('.', 1)
mod = import_module(p)
constructor = getattr(mod, m)
return constructor
# ================================================================
# Stats
# ================================================================
def add_episode_stats(stats, paths):
reward_key = "reward_raw" if "reward_raw" in paths[0] else "reward"
episoderewards = np.array([path[reward_key].sum() for path in paths])
pathlengths = np.array([pathlength(path) for path in paths])
stats["EpisodeRewards"] = episoderewards
stats["EpisodeLengths"] = pathlengths
stats["NumEpBatch"] = len(episoderewards)
stats["EpRewMean"] = episoderewards.mean()
stats["EpRewSEM"] = episoderewards.std()/np.sqrt(len(paths))
stats["EpRewMax"] = episoderewards.max()
stats["EpRewMin"] = episoderewards.min()
stats["EpLenMean"] = pathlengths.mean()
stats["EpLenMax"] = pathlengths.max()
stats["EpLenMin"] = pathlengths.min()
stats["RewPerStep"] = episoderewards.sum()/pathlengths.sum()
def add_prefixed_stats(stats, prefix, d):
for (k,v) in d.items():
stats[prefix+"_"+k] = v
# ================================================================
# Policy Gradients
# ================================================================
def compute_advantage(vf, paths, gamma, lam):
# Compute return, baseline, advantage
for path in paths:
path["return"] = discount(path["reward"], gamma)
b = path["baseline"] = vf.predict(path)
b1 = np.append(b, 0 if path["terminated"] else b[-1])
deltas = path["reward"] + gamma*b1[1:] - b1[:-1]
path["advantage"] = discount(deltas, gamma * lam)
alladv = np.concatenate([path["advantage"] for path in paths])
# Standardize advantage
std = alladv.std()
mean = alladv.mean()
for path in paths:
path["advantage"] = (path["advantage"] - mean) / std
PG_OPTIONS = [
("timestep_limit", int, 0, "maximum length of trajectories"),
("n_iter", int, 200, "number of batch"),
("parallel", int, 0, "collect trajectories in parallel"),
("timesteps_per_batch", int, 10000, ""),
("gamma", float, 0.99, "discount"),
("lam", float, 1.0, "lambda parameter from generalized advantage estimation"),
]
def run_policy_gradient_algorithm(env, agent, usercfg=None, callback=None):
cfg = update_default_config(PG_OPTIONS, usercfg)
cfg.update(usercfg)
print("policy gradient config", cfg)
# if cfg["parallel"]:
# raise NotImplementedError
tstart = time.time()
seed_iter = itertools.count()
for _ in range(cfg["n_iter"]):
# Rollouts ========
paths = get_paths(env, agent, cfg, seed_iter)
paths_subsampled = paths #subsample_paths(paths)
compute_advantage(agent.baseline, paths_subsampled, gamma=cfg["gamma"], lam=cfg["lam"])
# VF Update ========
vf_stats = agent.baseline.fit(paths_subsampled)
# Pol Update ========
pol_stats = agent.updater(paths_subsampled)
# Stats ========
stats = OrderedDict()
add_episode_stats(stats, paths)
add_prefixed_stats(stats, "vf", vf_stats)
add_prefixed_stats(stats, "pol", pol_stats)
stats["TimeElapsed"] = time.time() - tstart
if callback: callback(stats)
def run_policy_gradient_algorithm_hardmining(env, agent, usercfg=None, callback=None, seed_iter=None):
cfg = update_default_config(PG_OPTIONS, usercfg)
cfg.update(usercfg)
print("policy gradient config", cfg)
# if cfg["parallel"]:
# raise NotImplementedError
tstart = time.time()
if seed_iter is None:
seed_iter = itertools.count()
for _ in range(cfg["n_iter"]):
# Rollouts ========
paths = get_paths(env, agent, cfg, seed_iter)
paths_subsampled = paths #subsample_paths(paths)
compute_advantage(agent.baseline, paths_subsampled, gamma=cfg["gamma"], lam=cfg["lam"])
# VF Update ========
vf_stats = agent.baseline.fit(paths_subsampled)
# Pol Update ========
pol_stats = agent.updater(paths_subsampled)
# Stats ========
stats = OrderedDict()
add_episode_stats(stats, paths)
add_prefixed_stats(stats, "vf", vf_stats)
add_prefixed_stats(stats, "pol", pol_stats)
stats["TimeElapsed"] = time.time() - tstart
if callback: callback(stats)
# def subsample_paths(gpaths):
# paths = copy.deepcopy(gpaths)
# for i in range(len(paths)):
# plen = paths[i]['action'].shape[0]
# rno = random.sample(range(plen), 2*plen/3)
# for j in paths[i].keys():
# paths[i][j] = np.delete(paths[i][j], rno, axis=0)
# return paths
def parallel_rollout_worker((agent, ts_limit, ts_batch, iffilter, seed)):
try:
# print("Paralel rollout has been called")
return do_rollouts_serial(agent, ts_limit, ts_batch, iffilter, seed)
except Exception, e:
print("Exception in rollout worker: %s" % e)
import traceback; traceback.print_exc()
raise
def get_paths(env, agent, cfg, seed_iter):
paths = []
if cfg["parallel"]:
start_time = time.time()
from multiprocessing import Pool
# from pathos.multiprocessing import ProcessPool as Pool
num_processes = int(cfg["parallel"])
pool = Pool(processes=num_processes)
# very simple scheme, split work evenly among pool workers (queue would be better)
try:
def callback(result):
print("Length of paths: ", len(result), type(result))
paths.extend([path for paths_list in result for path in paths_list])
args_list = [(agent,
cfg['timestep_limit'],
cfg['timesteps_per_batch'] / num_processes,
cfg['filter'], next(seed_iter)
) for _ in range(num_processes)]
print(args_list)
result = pool.map_async(parallel_rollout_worker, args_list, callback=callback)
# result = pool.map(parallel_rollout_worker, args_list)
result.wait()#1e5)
if not paths:
# print("Paths is still empty")
# raise Exception
result.get()
except KeyboardInterrupt:
pool.terminate()
raise
except Exception:
pool.terminate()
raise
else:
pool.close()
finally:
pool.join()
print("Time elapsed (%d workers): %.2f" % (num_processes, time.time() - start_time))
else:
paths = do_rollouts_serial(agent, cfg["timestep_limit"], cfg["timesteps_per_batch"], cfg["filter"], next(seed_iter))
return paths
def rollout(env, agent, timestep_limit, seed):
"""
Simulate the env and agent for timestep_limit steps
"""
ob = env._reset(difficulty = 2, seed = seed)
terminated = False
data = defaultdict(list)
for _ in range(timestep_limit):
ob = agent.obfilt(ob)
data["observation"].append(ob)
action, agentinfo = agent.act(ob)
data["action"].append(action)
for (k,v) in agentinfo.items():
data[k].append(v)
ob,rew,done,envinfo = env.step(action)
data["reward"].append(rew)
rew = agent.rewfilt(rew)
for (k,v) in envinfo.items():
data[k].append(v)
if done:
terminated = True
break
data = {k:np.array(v) for (k,v) in data.items()}
data["terminated"] = terminated
return data
def do_rollouts_serial(agent, timestep_limit, n_timesteps, iffilter, seed):
env = RunEnv(False)
if iffilter==2:
ofd = FeatureInducer(env.observation_space)
env = FilteredEnv(env, ob_filter=ofd)
elif iffilter==1:
ofd = ConcatPrevious(env.observation_space)
env = FilteredEnv(env, ob_filter=ofd)
paths = []
timesteps_sofar = 0
while True:
np.random.seed(seed)
path = rollout(env, agent, timestep_limit, seed)
paths.append(path)
timesteps_sofar += pathlength(path)
if timesteps_sofar > n_timesteps:
break
print("Length of paths: ", len(paths))
env.close()
return paths
def pathlength(path):
return len(path["action"])
def animate_rollout(env, agent, n_timesteps,delay=.01):
total_reward = 0.
ob = env.reset()
print("Applying filter on Environment")
ofd = ConcatPrevious(env.observation_space)
# ob = ofd(ob)
# env.render()
# ob = np.array(ob)
for i in range(n_timesteps):
ob = ofd(ob)
ob = agent.obfilt(ob)
a, _info = agent.act(ob)
ob, _rew, done, _info = env.step(a)
# _rew = agent.rewfilt(_rew)
total_reward += _rew
# env.render()
ob = np.array(ob)
if done:
print(("terminated after %s timesteps"%i))
break
time.sleep(delay)
print(a.tolist())
print("Total episode reward = {}".format(total_reward))
# ================================================================
# Stochastic policies
# ================================================================
class StochPolicy(object):
@property
def probtype(self):
raise NotImplementedError
@property
def trainable_variables(self):
raise NotImplementedError
@property
def input(self):
raise NotImplementedError
def get_output(self):
raise NotImplementedError
def act(self, ob, stochastic=True):
prob = self._act_prob(ob[None])
if stochastic:
return self.probtype.sample(prob)[0], {"prob" : prob[0]}
else:
return self.probtype.maxprob(prob)[0], {"prob" : prob[0]}
def finalize(self):
self._act_prob = theano.function([self.input], self.get_output(), **FNOPTS)
class ProbType(object):
def sampled_variable(self):
raise NotImplementedError
def prob_variable(self):
raise NotImplementedError
def likelihood(self, a, prob):
raise NotImplementedError
def loglikelihood(self, a, prob):
raise NotImplementedError
def kl(self, prob0, prob1):
raise NotImplementedError
def entropy(self, prob):
raise NotImplementedError
def maxprob(self, prob):
raise NotImplementedError
class StochPolicyKeras(StochPolicy, EzPickle):
def __init__(self, net, probtype):
EzPickle.__init__(self, net, probtype)
self._net = net
self._probtype = probtype
self.finalize()
@property
def probtype(self):
return self._probtype
@property
def net(self):
return self._net
@property
def trainable_variables(self):
return self._net.trainable_weights
@property
def variables(self):
return self._net.get_params()[0]
@property
def input(self):
return self._net.input
def get_output(self):
return self._net.output
def get_updates(self):
self._net.output #pylint: disable=W0104
return self._net.updates
def get_flat(self):
return flatten(self.net.get_weights())
def set_from_flat(self, th):
weights = self.net.get_weights()
self._weight_shapes = [weight.shape for weight in weights]
self.net.set_weights(unflatten(th, self._weight_shapes))
class Categorical(ProbType):
def __init__(self, n):
self.n = n
def sampled_variable(self):
return T.ivector('a')
def prob_variable(self):
return T.matrix('prob')
def likelihood(self, a, prob):
return prob[T.arange(prob.shape[0]), a]
def loglikelihood(self, a, prob):
return T.log(self.likelihood(a, prob))
def kl(self, prob0, prob1):
return (prob0 * T.log(prob0/prob1)).sum(axis=1)
def entropy(self, prob0):
return - (prob0 * T.log(prob0)).sum(axis=1)
def sample(self, prob):
return distributions.categorical_sample(prob)
def maxprob(self, prob):
return prob.argmax(axis=1)
class CategoricalOneHot(ProbType):
def __init__(self, n):
self.n = n
def sampled_variable(self):
return T.matrix('a')
def prob_variable(self):
return T.matrix('prob')
def likelihood(self, a, prob):
return (a * prob).sum(axis=1)
def loglikelihood(self, a, prob):
return T.log(self.likelihood(a, prob))
def kl(self, prob0, prob1):
return (prob0 * T.log(prob0/prob1)).sum(axis=1)
def entropy(self, prob0):
return - (prob0 * T.log(prob0)).sum(axis=1)
def sample(self, prob):
assert prob.ndim == 2
inds = distributions.categorical_sample(prob)
out = np.zeros_like(prob)
out[np.arange(prob.shape[0]), inds] = 1
return out
def maxprob(self, prob):
out = np.zeros_like(prob)
out[prob.argmax(axis=1)] = 1
class DiagGauss(ProbType):
def __init__(self, d):
self.d = d
def sampled_variable(self):
return T.matrix('a')
def prob_variable(self):
return T.matrix('prob')
def loglikelihood(self, a, prob):
mean0 = prob[:,:self.d]
std0 = prob[:, self.d:]
# exp[ -(a - mu)^2/(2*sigma^2) ] / sqrt(2*pi*sigma^2)
return - 0.5 * T.square((a - mean0) / std0).sum(axis=1) - 0.5 * T.log(2.0 * np.pi) * self.d - T.log(std0).sum(axis=1)
def likelihood(self, a, prob):
return T.exp(self.loglikelihood(a, prob))
def kl(self, prob0, prob1):
mean0 = prob0[:, :self.d]
std0 = prob0[:, self.d:]
mean1 = prob1[:, :self.d]
std1 = prob1[:, self.d:]
return T.log(std1 / std0).sum(axis=1) + ((T.square(std0) + T.square(mean0 - mean1)) / (2.0 * T.square(std1))).sum(axis=1) - 0.5 * self.d
def entropy(self, prob):
std_nd = prob[:, self.d:]
return T.log(std_nd).sum(axis=1) + .5 * np.log(2 * np.pi * np.e) * self.d
def sample(self, prob):
mean_nd = prob[:, :self.d]
std_nd = prob[:, self.d:]
return np.random.randn(prob.shape[0], self.d).astype(floatX) * std_nd + mean_nd
def maxprob(self, prob):
return prob[:, :self.d]
def test_probtypes():
theano.config.floatX = 'float64'
np.random.seed(0)
prob_diag_gauss = np.array([-.2, .3, .4, -.5, 1.1, 1.5, .1, 1.9])
diag_gauss = DiagGauss(prob_diag_gauss.size // 2)
yield validate_probtype, diag_gauss, prob_diag_gauss
prob_categorical = np.array([.2, .3, .5])
categorical = Categorical(prob_categorical.size)
yield validate_probtype, categorical, prob_categorical
def validate_probtype(probtype, prob):
N = 100000
# Check to see if mean negative log likelihood == differential entropy
Mval = np.repeat(prob[None, :], N, axis=0)
M = probtype.prob_variable()
X = probtype.sampled_variable()
calcloglik = theano.function([X, M], T.log(probtype.likelihood(X, M)), allow_input_downcast=True)
calcent = theano.function([M], probtype.entropy(M), allow_input_downcast=True)
Xval = probtype.sample(Mval)
logliks = calcloglik(Xval, Mval)
entval_ll = - logliks.mean()
entval_ll_stderr = logliks.std() / np.sqrt(N)
entval = calcent(Mval).mean()
print(entval, entval_ll, entval_ll_stderr)
assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas
# Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
M2 = probtype.prob_variable()
q = prob + np.random.randn(prob.size) * 0.1
Mval2 = np.repeat(q[None, :], N, axis=0)
calckl = theano.function([M, M2], probtype.kl(M, M2), allow_input_downcast=True)
klval = calckl(Mval, Mval2).mean()
logliks = calcloglik(Xval, Mval2)
klval_ll = - entval - logliks.mean()
klval_ll_stderr = logliks.std() / np.sqrt(N)
print(klval, klval_ll, klval_ll_stderr)
assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas
# ================================================================
# Value functions
# ================================================================
class Baseline(object):
def fit(self, paths):
raise NotImplementedError
def predict(self, path):
raise NotImplementedError
class TimeDependentBaseline(Baseline):
def __init__(self):
self.baseline = None
def fit(self, paths):
rets = [path["return"] for path in paths]
maxlen = max(len(ret) for ret in rets)
retsum = np.zeros(maxlen)
retcount = np.zeros(maxlen)
for ret in rets:
retsum[:len(ret)] += ret
retcount[:len(ret)] += 1
retmean = retsum / retcount
i_depletion = np.searchsorted(-retcount, -4)
self.baseline = retmean[:i_depletion]
pred = concat([self.predict(path) for path in paths])
return {"EV" : explained_variance(pred, concat(rets))}
def predict(self, path):
if self.baseline is None:
return np.zeros(pathlength(path))
else:
lenpath = pathlength(path)
lenbase = len(self.baseline)
if lenpath > lenbase:
return concat([self.baseline, self.baseline[-1] + np.zeros(lenpath-lenbase)])
else:
return self.baseline[:lenpath]
class NnRegression(EzPickle):
def __init__(self, net, mixfrac=1.0, maxiter=25):
EzPickle.__init__(self, net, mixfrac, maxiter)
self.net = net
self.mixfrac = mixfrac
x_nx = net.input
self.predict = theano.function([x_nx], net.output, **FNOPTS)
ypred_ny = net.output
ytarg_ny = T.matrix("ytarg")
var_list = net.trainable_weights
l2 = 1e-3 * T.add(*[T.square(v).sum() for v in var_list])
N = x_nx.shape[0]
mse = T.sum(T.square(ytarg_ny - ypred_ny))/N
symb_args = [x_nx, ytarg_ny]
loss = mse + l2
self.opt = LbfgsOptimizer(loss, var_list, symb_args, maxiter=maxiter, extra_losses={"mse":mse, "l2":l2})
def fit(self, x_nx, ytarg_ny):
nY = ytarg_ny.shape[1]
ypredold_ny = self.predict(x_nx)
out = self.opt.update(x_nx, ytarg_ny*self.mixfrac + ypredold_ny*(1-self.mixfrac))
yprednew_ny = self.predict(x_nx)
out["PredStdevBefore"] = ypredold_ny.std()
out["PredStdevAfter"] = yprednew_ny.std()
out["TargStdev"] = ytarg_ny.std()
if nY==1:
out["EV_before"] = explained_variance_2d(ypredold_ny, ytarg_ny)[0]
out["EV_after"] = explained_variance_2d(yprednew_ny, ytarg_ny)[0]
else:
out["EV_avg"] = explained_variance(yprednew_ny.ravel(), ytarg_ny.ravel())
return out
class NnVf(object):
def __init__(self, net, timestep_limit, regression_params):
self.reg = NnRegression(net, **regression_params)
self.timestep_limit = timestep_limit
def predict(self, path):
ob_no = self.preproc(path["observation"])
return self.reg.predict(ob_no)[:,0]
def fit(self, paths):
ob_no = concat([self.preproc(path["observation"]) for path in paths], axis=0)
vtarg_n1 = concat([path["return"] for path in paths]).reshape(-1,1)
return self.reg.fit(ob_no, vtarg_n1)
def preproc(self, ob_no):
return concat([ob_no, np.arange(len(ob_no)).reshape(-1,1) / float(self.timestep_limit)], axis=1)
class NnCpd(EzPickle):
def __init__(self, net, probtype, maxiter=25):
EzPickle.__init__(self, net, probtype, maxiter)
self.net = net
x_nx = net.input
prob = net.output
a = probtype.sampled_variable()
var_list = net.trainable_weights
loglik = probtype.loglikelihood(a, prob)
self.loglikelihood = theano.function([a, x_nx], loglik, **FNOPTS)
loss = - loglik.mean()
symb_args = [x_nx, a]
self.opt = LbfgsOptimizer(loss, var_list, symb_args, maxiter=maxiter)
def fit(self, x_nx, a):
return self.opt.update(x_nx, a)
class SetFromFlat(object):
def __init__(self, var_list):
theta = T.vector()
start = 0
updates = []
for v in var_list:
shape = v.shape
size = T.prod(shape)
updates.append((v, theta[start:start+size].reshape(shape)))
start += size
self.op = theano.function([theta],[], updates=updates,**FNOPTS)
def __call__(self, theta):
self.op(theta.astype(floatX))
class GetFlat(object):
def __init__(self, var_list):
self.op = theano.function([], T.concatenate([v.flatten() for v in var_list]),**FNOPTS)
def __call__(self):
return self.op() #pylint: disable=E1101
class EzFlat(object):
def __init__(self, var_list):
self.gf = GetFlat(var_list)
self.sff = SetFromFlat(var_list)
def set_params_flat(self, theta):
self.sff(theta)
def get_params_flat(self):
return self.gf()
class LbfgsOptimizer(EzFlat):
def __init__(self, loss, params, symb_args, extra_losses=None, maxiter=25):
EzFlat.__init__(self, params)
self.all_losses = OrderedDict()
self.all_losses["loss"] = loss
if extra_losses is not None:
self.all_losses.update(extra_losses)
self.f_lossgrad = theano.function(list(symb_args), [loss, flatgrad(loss, params)],**FNOPTS)
self.f_losses = theano.function(symb_args, list(self.all_losses.values()),**FNOPTS)
self.maxiter=maxiter
def update(self, *args):
thprev = self.get_params_flat()
def lossandgrad(th):
self.set_params_flat(th)
l,g = self.f_lossgrad(*args)
g = g.astype('float64')
return (l,g)
losses_before = self.f_losses(*args)
theta, _, opt_info = scipy.optimize.fmin_l_bfgs_b(lossandgrad, thprev, maxiter=self.maxiter)
del opt_info['grad']
print(opt_info)
self.set_params_flat(theta)
losses_after = self.f_losses(*args)
info = OrderedDict()
for (name,lossbefore, lossafter) in zip(list(self.all_losses.keys()), losses_before, losses_after):
info[name+"_before"] = lossbefore
info[name+"_after"] = lossafter
return info
def numel(x):
return T.prod(x.shape)
def flatgrad(loss, var_list):
grads = T.grad(loss, var_list)
return T.concatenate([g.flatten() for g in grads])
# ================================================================
# Keras
# ================================================================
class ConcatFixedStd(Layer):
input_ndim = 2
def __init__(self, **kwargs):
Layer.__init__(self, **kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.logstd = theano.shared(np.zeros(input_dim,floatX), name='{}_logstd'.format(self.name))
self.trainable_weights = [self.logstd]
super(ConcatFixedStd, self).build(input_shape)
def compute_ouput_shape(self, input_shape):
return (input_shape[0], input_shape[1] * 2)
def call(self, x, mask=None):
Mean = x
Std = T.repeat(T.exp(self.logstd)[None, :], Mean.shape[0], axis=0)
return T.concatenate([Mean, Std], axis=1)
# ================================================================
# Video monitoring
# ================================================================
def VIDEO_NEVER(_):
return False
def VIDEO_ALWAYS(_):
return True
|
abhinavagarwalla/modular_rl
|
modular_rl/core.py
|
Python
|
mit
| 24,088 | 0.008261 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry import value
from telemetry.value import improvement_direction
from telemetry.value import none_values
from telemetry.value import scalar
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
name='http://www.bar.com/'))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir,
name='http://www.baz.com/'))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
name='http://www.foo.com/'))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class ValueTest(TestBase):
def testRepr(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
description='desc', tir_label='my_ir',
improvement_direction=improvement_direction.DOWN)
expected = ('ScalarValue(http://www.bar.com/, x, unit, 3, important=True, '
'description=desc, tir_label=my_ir, '
'improvement_direction=down, grouping_keys={}')
self.assertEquals(expected, str(v))
def testBuildbotValueType(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
improvement_direction=improvement_direction.DOWN)
self.assertEquals('default', v.GetBuildbotDataType(
value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
self.assertEquals([3], v.GetBuildbotValue())
self.assertEquals(('x', page0.name),
v.GetChartAndTraceNameForPerPageResult())
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=False,
improvement_direction=improvement_direction.DOWN)
self.assertEquals(
'unimportant',
v.GetBuildbotDataType(value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
def testScalarSamePageMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page0, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page1, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
vM = scalar.ScalarValue.MergeLikeValuesFromDifferentPages([v0, v1])
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(
page0, 'x', 'unit', 1, improvement_direction=improvement_direction.DOWN)
v1 = scalar.ScalarValue(page0, 'x', 'unit', None, none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(None, vM.values)
expected_none_value_reason = (
'Merging values containing a None value results in a None value. '
'None values: [ScalarValue(http://www.bar.com/, x, unit, None, '
'important=True, description=None, tir_label=None, '
'improvement_direction=down, grouping_keys={}]')
self.assertEquals(expected_none_value_reason, vM.none_value_reason)
def testScalarWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', None,
improvement_direction=improvement_direction.UP))
def testScalarWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', 1, none_value_reason='n',
improvement_direction=improvement_direction.UP))
def testAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', 42, important=False,
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': 42})
def testNoneValueAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', None, important=False,
none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': None, 'none_value_reason': 'n'})
def testFromDictInt(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
'improvement_direction': improvement_direction.DOWN,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42)
self.assertEquals(v.improvement_direction, improvement_direction.DOWN)
def testFromDictFloat(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42.4,
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42.4)
def testFromDictWithoutImprovementDirection(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertIsNone(v.improvement_direction)
def testFromDictNoneValue(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': None,
'none_value_reason': 'n',
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, None)
self.assertEquals(v.none_value_reason, 'n')
|
catapult-project/catapult-csm
|
telemetry/telemetry/value/scalar_unittest.py
|
Python
|
bsd-3-clause
| 7,682 | 0.002343 |
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import numpy as np
def predictClothesGeneral(temp):
dataFile = open("data.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
Y2 = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(int(data[i].split(":")[3]))
Y2.append(int(data[i].split(":")[4]))
clf = RandomForestClassifier(n_estimators=25)
clf2 = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
clf2.fit(X,Y2)
pants = clf.predict([[temp]])
tops = clf2.predict([[temp]])
s = "I recommend you wear a pair of "
if pants == 1:
s = s + "jeans"
else:
s = s + "khaki shorts"
s = s + " and a "
if tops == 1:
s = s + "shirt, its a nice day out!"
elif tops == 2:
s = s + "sweat shirt."
else:
s = s + "jacket, it will be chilly today."
return s
def predictFromFileGeneral(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
data2 = data[i].split(":")
print "At " + data2[1].split(",")[0] + " degrees... " + predictClothesGeneral(float(data2[1].split(",")[0]))
def addToKnownList(shirt, temp):
dataFile = open("userAdded.txt", 'a')
dataFile.write(str(shirt + ":" + str(temp)) + '\n')
def predictClothesData(temp):
dataFile = open("userAdded.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(data[i].split(":")[0])
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
predict = clf.predict([[temp]])
return predict
def predictFromFileData(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
data2 = data[i].split(":")
print "At " + data2[1].split(",")[0] + " degrees... I would recommend a " + predictClothesData(float(data2[1].split(",")[0]))[0]
|
epaglier/Project-JARVIS
|
jarvis-features/Weather AI/weatherai.py
|
Python
|
gpl-3.0
| 2,382 | 0.011755 |
# -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Defines a crystal lattice class.
"""
import numpy as np
import scipy.linalg as la
from fsc.export import export
# TODO: move to a separate module # pylint: disable=fixme,useless-suppression
@export
class Lattice:
"""
Defines a periodic lattice.
"""
def __init__(self, matrix):
self.matrix = np.array(matrix)
def __array__(self):
return self.matrix
@property
def reciprocal_lattice(self):
return type(self)(matrix=2 * np.pi * la.inv(self.matrix).T)
def get_cartesian_coords(self, fractional_coords):
return np.dot(fractional_coords, self.matrix)
def get_fractional_coords(self, cartesian_coords):
return la.solve(self.matrix.T, np.array(cartesian_coords).T).T
def get_cartesian_distance(self, fractional_coord_1, fractional_coord_2):
return la.norm(
self.get_cartesian_coords(fractional_coord_1) -
self.get_cartesian_coords(fractional_coord_2)
)
def get_reciprocal_cartesian_distance( # pylint: disable=invalid-name
self, reciprocal_fractional_coord_1, reciprocal_fractional_coord_2
):
return self.reciprocal_lattice.get_cartesian_distance(
reciprocal_fractional_coord_1, reciprocal_fractional_coord_2
)
|
Z2PackDev/bands_inspect
|
bands_inspect/lattice.py
|
Python
|
apache-2.0
| 1,413 | 0 |
"""
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.contrib.gis import gdal
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = set()
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def get_expression_column(self, evaluator):
"""
Helper method to return the quoted column string from the evaluator
for its expression.
"""
for expr, col_tup in evaluator.cols:
if expr is evaluator.expression:
return '%s.%s' % tuple(map(self.quote_name, col_tup))
raise Exception("Could not find the column for the expression.")
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_lookup_sql() method')
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide geometry_columns() method')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
@python_2_unicode_compatible
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m:
return (float(m.group('major')), float(m.group('flattening')))
else:
return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __str__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return six.text_type(self.srs)
except Exception:
return six.text_type(self.wkt)
|
ericholscher/django
|
django/contrib/gis/db/backends/base.py
|
Python
|
bsd-3-clause
| 11,584 | 0.001468 |
# Given a linked list, remove the nth node from the end of list and return its head.
# For example,
# Given linked list: 1->2->3->4->5, and n = 2.
# After removing the second node from the end, the linked list becomes 1->2->3->5.
# Note:
# Given n will always be valid.
# Try to do this in one pass.
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} head
# @param {integer} n
# @return {ListNode}
def removeNthFromEnd(self, head, n):
# no extra space
fast = slow = head
for _ in range(n):
fast = fast.next
if not fast:
return head.next
while fast.next:
fast, slow = fast.next, slow.next
slow.next = slow.next.next
return head
# extra space O(n)
# if not head:
# return head
# i = 0
# dic = {}
# node = head;
# while node:
# dic[i] = node
# node = node.next
# i += 1
# p = i - n
# if p is 0:
# head = head.next
# else:
# dic[p-1].next = dic.get(p+1, None)
# return head
def printList(self, head):
print "==========="
cur = head
while cur:
print cur.val
cur = cur.next
print "==========="
s = Solution()
n0 = ListNode(1)
n1 = ListNode(2)
n2 = ListNode(3)
n3 = ListNode(4)
n4 = ListNode(5)
# n5 = ListNode(6)
# n6 = ListNode(7)
n0.next = n1
n1.next = n2
n2.next = n3
n3.next = n4
# n4.next = n5
# n5.next = n6
# n0 = s.removeNthFromEnd(n0, 3)
# n0 = s.removeNthFromEnd(None, 0)
s.printList(n0)
|
abawchen/leetcode
|
solutions/019_remove_nth_node_from_end_of_list.py
|
Python
|
mit
| 1,751 | 0.003427 |
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ImageSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'_from': 'V1ObjectReference',
'paths': 'list[V1ImageSourcePath]',
'pull_secret': 'V1LocalObjectReference'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'_from': 'from',
'paths': 'paths',
'pull_secret': 'pullSecret'
}
def __init__(self, _from=None, paths=None, pull_secret=None):
"""
V1ImageSource - a model defined in Swagger
"""
self.__from = _from
self._paths = paths
self._pull_secret = pull_secret
@property
def _from(self):
"""
Gets the _from of this V1ImageSource.
From is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.
:return: The _from of this V1ImageSource.
:rtype: V1ObjectReference
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1ImageSource.
From is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.
:param _from: The _from of this V1ImageSource.
:type: V1ObjectReference
"""
self.__from = _from
@property
def paths(self):
"""
Gets the paths of this V1ImageSource.
Paths is a list of source and destination paths to copy from the image.
:return: The paths of this V1ImageSource.
:rtype: list[V1ImageSourcePath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1ImageSource.
Paths is a list of source and destination paths to copy from the image.
:param paths: The paths of this V1ImageSource.
:type: list[V1ImageSourcePath]
"""
self._paths = paths
@property
def pull_secret(self):
"""
Gets the pull_secret of this V1ImageSource.
PullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.
:return: The pull_secret of this V1ImageSource.
:rtype: V1LocalObjectReference
"""
return self._pull_secret
@pull_secret.setter
def pull_secret(self, pull_secret):
"""
Sets the pull_secret of this V1ImageSource.
PullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.
:param pull_secret: The pull_secret of this V1ImageSource.
:type: V1LocalObjectReference
"""
self._pull_secret = pull_secret
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1ImageSource.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
detiber/lib_openshift
|
lib_openshift/models/v1_image_source.py
|
Python
|
apache-2.0
| 5,181 | 0.001544 |
"""Auto-generated file, do not edit by hand. NZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NZ = PhoneMetadata(id='NZ', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[14]\\d{2,3}', possible_length=(3, 4)),
emergency=PhoneNumberDesc(national_number_pattern='111', example_number='111', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='111|4098', example_number='111', possible_length=(3, 4)),
sms_services=PhoneNumberDesc(national_number_pattern='4098', example_number='4098', possible_length=(4,)),
short_data=True)
|
gencer/python-phonenumbers
|
python/phonenumbers/shortdata/region_NZ.py
|
Python
|
apache-2.0
| 673 | 0.008915 |
from django.conf.urls import url
from .views import ZadanieCreateView, ZadanieDetailView, ZadanieUpdateView
urlpatterns = [
url(r'^dodaj/(?P<dopuszczenie_id>[0-9]+)/$', ZadanieCreateView.as_view(), name='create'),
url(r'(?P<pk>[0-9]+)/detail/$', ZadanieDetailView.as_view(), name='detail'),
url(r'(?P<pk>[0-9]+)/update/$', ZadanieUpdateView.as_view(), name='update'),
]
|
szymanskirafal/ab
|
zadania/urls.py
|
Python
|
mit
| 388 | 0.010309 |
import multiprocessing
import os
from datetime import timedelta
import easy_env
from dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
PORT = os.environ.get('PORT', '8000')
WEB_WORKERS = easy_env.get_int('WEB_WORKERS', multiprocessing.cpu_count())
bind = ":" + PORT
workers = WEB_WORKERS
timeout = timedelta(minutes=30).seconds
accesslog = '-'
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s'
|
vaniakosmos/memes-reposter
|
server.py
|
Python
|
mit
| 432 | 0 |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import time
from openerp import pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
class is_api(osv.osv):
_name = 'is_api'
_description = u'Fonctions générales'
def get_usagers_structure(self, cr, uid, structure_id, context=None):
""" Retourner la liste des usagers appartenants à la structure passée en paramètre
"""
usager_line_obj = self.pool.get('ove.usager.structure')
line_ids = usager_line_obj.search(cr, uid, [('structure_id','=',structure_id)], context=context)
print 'line_ids *****', line_ids
usagers = list(set([line['usager_id'][0] for line in usager_line_obj.read(cr, uid, line_ids, ['usager_id'], context=context)]))
return usagers
def get_usager_groups(self, cr, uid, usager_id, context=None):
""" Retourner les groupes associés à l'usager passé en paramètre
"""
group_obj = self.pool.get('ove.groupe')
group_ids = group_obj.search(cr, uid, [('usager_id','=', usager_id)], context=context)
groups = []
for group in group_obj.read(cr, uid, group_ids, ['id', 'code'], context=context):
groups.append({'id':group['id'], 'code':group['code']})
newlist = sorted(groups, key=lambda k: k['code'])
return newlist
def get_users_usager(self, cr, uid, structure_lines, context=None):
""" Retourner les utilisateurs liés aux groupes de l'usager à partir des structures qui leurs appartient
"""
users = {'group_1':[], 'group_2':[], 'group_3':[], 'group_4':[], 'group_5':[],
'group_6':[], 'group_7':[], 'group_8':[], 'group_9':[], 'group_10':[]
}
if not structure_lines:
return users
for line in structure_lines:
if line.structure_id.users_line:
for user_line in line.structure_id.users_line:
if user_line.group_1 and line.group_1:
users['group_1'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_2 and line.group_2:
users['group_2'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_3 and line.group_3:
users['group_3'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_4 and line.group_4:
users['group_4'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_5 and line.group_5:
users['group_5'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_6 and line.group_6:
users['group_6'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_7 and line.group_7:
users['group_7'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_8 and line.group_8:
users['group_8'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_9 and line.group_9:
users['group_9'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
""" Eliminer les doublons des listes """
users.update({'group_1': list(set(users['group_1']))})
users.update({'group_2': list(set(users['group_2']))})
users.update({'group_3': list(set(users['group_3']))})
users.update({'group_4': list(set(users['group_4']))})
users.update({'group_5': list(set(users['group_5']))})
users.update({'group_6': list(set(users['group_6']))})
users.update({'group_7': list(set(users['group_7']))})
users.update({'group_8': list(set(users['group_8']))})
users.update({'group_9': list(set(users['group_9']))})
users.update({'group_10': list(set(users['group_10']))})
return users
def create_group(self, cr, uid, code_groupe, prefix, name_group, users, usager_id, context=None):
""" Création d'un groupe OVE
"""
vals = {
'code': code_groupe,
'name': prefix + ' - ' + name_group,
'user_ids': [[6, 0, users]],
'usager_id': usager_id,
}
return self.pool.get('ove.groupe').create(cr, uid, vals, context=context)
def associate_groupe_usager(self, cr, uid, usager_id, group_id, group_usager, context=None):
""" Associer un groupe au groupe correspondant de l'usager
"""
usager_obj = self.pool.get('is.usager')
if group_usager == 'G1':
usager_obj.write(cr, uid, usager_id, {'group_1': group_id}, context=context)
if group_usager == 'G2':
usager_obj.write(cr, uid, usager_id, {'group_2': group_id}, context=context)
if group_usager == 'G3':
usager_obj.write(cr, uid, usager_id, {'group_3': group_id}, context=context)
if group_usager == 'G4':
usager_obj.write(cr, uid, usager_id, {'group_4': group_id}, context=context)
if group_usager == 'G5':
usager_obj.write(cr, uid, usager_id, {'group_5': group_id}, context=context)
if group_usager == 'G6':
usager_obj.write(cr, uid, usager_id, {'group_6': group_id}, context=context)
if group_usager == 'G7':
usager_obj.write(cr, uid, usager_id, {'group_7': group_id}, context=context)
if group_usager == 'G8':
usager_obj.write(cr, uid, usager_id, {'group_8': group_id}, context=context)
if group_usager == 'G9':
usager_obj.write(cr, uid, usager_id, {'group_9': group_id}, context=context)
if group_usager == 'G10':
usager_obj.write(cr, uid, usager_id, {'group_10': group_id}, context=context)
return True
def create_ove_groups(self, cr, uid, prefix, users, usager_id, context=None):
""" Création de l'ensemble des groupes pour chaque usager ou structure
"""
group_id = self.create_group(cr, uid, 'G1', prefix, 'Groupe Impression', users['group_1'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G1', context)
group_id = self.create_group(cr, uid, 'G2', prefix, 'Groupe Donnée Administrative', users['group_2'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G2', context)
group_id = self.create_group(cr, uid, 'G3', prefix, 'Groupe Donnée Administrative Modification', users['group_3'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G3', context)
group_id = self.create_group(cr, uid, 'G4', prefix, 'Groupe Donnée Institutionnelle', users['group_4'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G4', context)
group_id = self.create_group(cr, uid, 'G5', prefix, 'Groupe Donnée Institutionnelle Modification', users['group_5'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G5', context)
group_id = self.create_group(cr, uid, 'G6', prefix, 'Groupe Donnée Institutionnelle Validation', users['group_6'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G6', context)
group_id = self.create_group(cr, uid, 'G7', prefix, 'Groupe Donnée métier', users['group_7'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G7', context)
group_id = self.create_group(cr, uid, 'G8', prefix, 'Groupe Donnée métier Modification', users['group_8'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G8', context)
group_id = self.create_group(cr, uid, 'G9', prefix, 'Groupe Donnée métier Validation', users['group_9'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G9', context)
group_id = self.create_group(cr, uid, 'G10', prefix, 'Groupe Structure', users['group_10'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G10', context)
return True
def update_usager_groupes(self, cr, uid, usager_id, users, context=None):
""" Mettre à jour les groupes de l'usager courant
"""
groups = self.get_usager_groups(cr, uid, usager_id, context=context)
for group in groups:
if group['code'] == 'G1':
self.update_ove_group(cr, uid, group['id'], users['group_1'], context)
if group['code'] == 'G2':
self.update_ove_group(cr, uid, group['id'], users['group_2'], context)
if group['code'] == 'G3':
self.update_ove_group(cr, uid, group['id'], users['group_3'], context)
if group['code'] == 'G4':
self.update_ove_group(cr, uid, group['id'], users['group_4'], context)
if group['code'] == 'G5':
self.update_ove_group(cr, uid, group['id'], users['group_5'], context)
if group['code'] == 'G6':
self.update_ove_group(cr, uid, group['id'], users['group_6'], context)
if group['code'] == 'G7':
self.update_ove_group(cr, uid, group['id'], users['group_7'], context)
if group['code'] == 'G8':
self.update_ove_group(cr, uid, group['id'], users['group_8'], context)
if group['code'] == 'G9':
self.update_ove_group(cr, uid, group['id'], users['group_9'], context)
if group['code'] == 'G10':
self.update_ove_group(cr, uid, group['id'], users['group_10'], context)
return True
def update_ove_group(self, cr, uid, group_id, users, context=None):
""" Mettre à jour d'un groupe d'un usager
"""
vals = {
'user_ids': [[6, 0, users]],
}
return self.pool.get('ove.groupe').write(cr, uid, group_id, vals, context=context)
def get_missed_ove_group(self, cr, uid, usager_groups, context=None):
""" Chercher le groupe manquant dans la liste des groupes d'un usager
"""
groups = ['G1', 'G2', 'G3', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10']
exist_groups = []
missed_groups = []
for group in usager_groups:
exist_groups.append(group['code'])
for group in groups:
if group not in exist_groups:
missed_groups.append(group)
else:
continue
return missed_groups
def create_missed_ove_group(self, cr, uid, group, usager_id, prefix, context=None):
""" Créer les groupes manquant de l'usager passé en paramètre
"""
if group == 'G1':
self.create_group(cr, uid, 'G1', prefix, 'Groupe Impression', [], usager_id, context=context)
if group == 'G2':
self.create_group(cr, uid, 'G2', prefix, 'Groupe Donnée Administrative', [], usager_id, context=context)
if group == 'G3':
self.create_group(cr, uid, 'G3', prefix, 'Groupe Donnée Administrative Modification', [], usager_id, context=context)
if group == 'G4':
self.create_group(cr, uid, 'G4', prefix, 'Groupe Donnée Institutionnelle', [], usager_id, context=context)
if group == 'G5':
self.create_group(cr, uid, 'G5', prefix, 'Groupe Donnée Institutionnelle Modification', [], usager_id, context=context)
if group == 'G6':
self.create_group(cr, uid, 'G6', prefix, 'Groupe Donnée Institutionnelle Validation', [], usager_id, context=context)
if group == 'G7':
self.create_group(cr, uid, 'G7', prefix, 'Groupe Donnée métier', [], usager_id, context=context)
if group == 'G8':
self.create_group(cr, uid, 'G8', prefix, 'Groupe Donnée métier Modification', [], usager_id, context=context)
if group == 'G9':
self.create_group(cr, uid, 'G9', prefix, 'Groupe Donnée métier Validation', [], usager_id, context=context)
if group == 'G10':
self.create_group(cr, uid, 'G10', prefix, 'Groupe Structure', [], usager_id, context=context)
return True
|
tonygalmiche/ove_structure
|
is_api.py
|
Python
|
mit
| 13,118 | 0.008564 |
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Copyright 2007 Seth Vidal
import sys
import os
sys.path.insert(0,'/usr/share/yum-cli/')
import yum
import yum.Errors
from utils import YumUtilBase
from yum import _
import logging
import rpmUtils
plugin_autodebuginfo_package_name = "yum-plugin-auto-update-debug-info"
class DebugInfoInstall(YumUtilBase):
NAME = 'debuginfo-install'
VERSION = '1.0'
USAGE = """
debuginfo-install: Install debuginfo packages and their dependencies based on
the name of the non-debug package
debuginfo-install [options] package1 [package2] [package..]"""
def __init__(self):
YumUtilBase.__init__(self,
DebugInfoInstall.NAME,
DebugInfoInstall.VERSION,
DebugInfoInstall.USAGE)
self.logger = logging.getLogger("yum.verbose.cli.debuginfoinstall")
self.optparser = self.getOptionParser()
opts = self.optparser
# Add util commandline options to the yum-cli ones
if hasattr(self, 'getOptionGroup'):
opts = self.getOptionGroup()
opts.add_option("", "--no-debuginfo-plugin",
action="store_true",
help="Turn off automatic installation/update of the yum debuginfo plugin")
self.main()
def doUtilConfigSetup(self, *args, **kwargs):
""" We override this to get our extra option out. """
opts = YumUtilBase.doUtilConfigSetup(self, *args, **kwargs)
self.no_debuginfo_plugin = opts.no_debuginfo_plugin
return opts
def main(self):
# Parse the commandline option and setup the basics.
opts = self.doUtilConfigSetup()
# Check if there is anything to do.
if len(self.cmds) < 1:
print self.optparser.format_help()
sys.exit(0)
if os.geteuid() != 0:
print >> sys.stderr, "You must be root to run this command."
sys.exit(1)
try:
self.doLock()
except yum.Errors.LockError, e:
self.logger.critical("Another application is holding the yum lock, cannot continue")
sys.exit(1)
# enable the -debuginfo repos for enabled primary repos
repos = {}
for repo in self.repos.listEnabled():
repos[repo.id] = repo
for repoid in repos:
di = '%s-debuginfo' % repoid
if di in repos:
continue
repo = repos[repoid]
for r in self.repos.findRepos(di):
self.logger.log(yum.logginglevels.INFO_2,
_('enabling %s') % r.id)
r.enable()
# Note: This is shared with auto-update-debuginfo
for opt in ['repo_gpgcheck', 'gpgcheck', 'cost',
'skip_if_unavailable']:
if hasattr(r, opt):
setattr(r, opt, getattr(repo, opt))
# Setup yum (Ts, RPM db, Repo & Sack)
self.doUtilYumSetup()
self.debugInfo_main()
if hasattr(self, 'doUtilBuildTransaction'):
errc = self.doUtilBuildTransaction()
if errc:
sys.exit(errc)
else:
try:
self.buildTransaction()
except yum.Errors.YumBaseError, e:
self.logger.critical("Error building transaction: %s" % e)
sys.exit(1)
if len(self.tsInfo) < 1:
print 'No debuginfo packages available to install'
self.doUnlock()
sys.exit()
sys.exit(self.doUtilTransaction())
def di_try_install(self, po):
if po.name.endswith('-debuginfo'): # Wildcard matches produce this
return
di_name = '%s-debuginfo' % po.name
if self.pkgSack.searchNevra(name=di_name, arch=po.arch):
test_name = di_name
ver, rel = po.version, po.release
else:
srpm_data = rpmUtils.miscutils.splitFilename(po.sourcerpm) # take the srpmname
srpm_name, ver, rel = srpm_data[0], srpm_data[1], srpm_data[2]
test_name = '%s-debuginfo' % srpm_name
self.install(name=test_name, arch=po.arch, version=ver, release=rel)
def debugInfo_main(self):
"""for each package specified, walk the package's list of deps and install
all the -debuginfo pkgs that match it and its debuginfo"""
# for each pkg
# add that debuginfo to the ts
# look through that pkgs' deps
# add all the debuginfos for the pkgs providing those deps
for pkgglob in self.cmds:
e, m, u = self.rpmdb.matchPackageNames([pkgglob])
for po in e + m:
try:
self.di_try_install(po)
except yum.Errors.InstallError, e:
self.logger.critical('Could not find debuginfo for main pkg: %s' % po)
# do each of its deps
for (n,f,v) in po.requires:
if n.startswith('rpmlib'):
continue
if n.find('.so') != -1:
for pkgtup in self.rpmdb.whatProvides(n,f,v):
deppo = self.rpmdb.searchPkgTuple(pkgtup)[0]
try:
self.di_try_install(deppo)
except yum.Errors.InstallError, e:
self.logger.critical('Could not find debuginfo pkg for dependency package %s' % deppo)
# This is kinda hacky, accessing the option from the plugins code
# but I'm not sure of a better way of doing it
if not self.no_debuginfo_plugin and self.tsInfo:
try:
self.install(pattern=plugin_autodebuginfo_package_name)
except yum.Errors.InstallError, e:
self.logger.critical('Could not find auto debuginfo plugin')
if __name__ == '__main__':
import locale
# This test needs to be before locale.getpreferredencoding() as that
# does setlocale(LC_CTYPE, "")
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error, ex:
# default to C locale if we get a failure.
print >> sys.stderr, 'Failed to set locale, defaulting to C'
os.environ['LC_ALL'] = 'C'
locale.setlocale(locale.LC_ALL, 'C')
if True: # not sys.stdout.isatty():
import codecs
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
sys.stdout.errors = 'replace'
util = DebugInfoInstall()
|
bbradbury/yum-utils
|
debuginfo-install.py
|
Python
|
gpl-2.0
| 7,411 | 0.004723 |
import datetime as dt
import numpy as np
import pandas as pd
# QSTK Imports
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.qsdateutil as du
def get_orders_list(s_file_path):
l_columns = ["year", "month", "day", "sym", "type", "num"]
df_orders_list = pd.read_csv(s_file_path, sep=',', header=None)
df_orders_list = df_orders_list.dropna(axis=1, how='all')
df_orders_list.columns = l_columns
return df_orders_list
def get_orders(df_orders_list):
na_orders_list = df_orders_list.values
l_orders = []
ld_daily_orders = None
for order in na_orders_list:
dt_date = dt.datetime(order[0], order[1], order[2], hour=16)
d_order = {df_orders_list.columns[3]: order[3], \
df_orders_list.columns[4]: order[4], \
df_orders_list.columns[5]: int(order[5])}
if l_orders != [] and dt_date == l_orders[-1][0]:
l_orders[-1][1].append(d_order)
else:
ld_daily_orders = []
ld_daily_orders.append(d_order)
l_orders.append([dt_date, ld_daily_orders])
na_orders = np.array(l_orders)
df_orders = pd.DataFrame(na_orders[:, 1], index=na_orders[:, 0], columns=["ord"])
df_orders = df_orders.sort()
dt_start = df_orders.ix[0].name
dt_end = df_orders.ix[-1].name
ls_symbols = list(set(df_orders_list["sym"]))
ls_symbols.sort() # It is neccesary to sort due the use of set
return df_orders, dt_start, dt_end, ls_symbols
def get_data(dt_start, dt_end, ls_symbols):
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
ls_keys = ["open", "high", "low", "close", "volume", "actual_close"]
dataobj = da.DataAccess('Yahoo')
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method="ffill")
d_data[s_key] = d_data[s_key].fillna(method="bfill")
d_data[s_key] = d_data[s_key].fillna(1.0)
return d_data
def get_prices(dt_start, dt_end, ls_symbols, s_key="close"):
# close = adjusted close
# actual_close = actual close
d_data = get_data(dt_start, dt_end, ls_symbols)
return d_data[s_key]
def process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res):
op = 0
daily_orders = list(df_orders.ix[dt_date, "ord"])
for order in daily_orders:
if order["type"] == "Buy":
op = 1
elif order["type"] == "Sell":
op = -1
df_num.ix[dt_date, order["sym"]] += op * order["num"]
df_res.ix[dt_date, "cash"] += -op * order["num"] * df_prices.ix[dt_date, order["sym"]]
def update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res):
for s_symbol in ls_symbols:
df_num.ix[dt_date, s_symbol] = df_num.ix[dt_last_orders_date, s_symbol]
df_res.ix[dt_date, "cash"] = df_res.ix[dt_last_orders_date, "cash"]
def value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res):
for s_symbol in ls_symbols:
df_val.ix[dt_date, s_symbol] = df_num.ix[dt_date, s_symbol] * df_prices.ix[dt_date, s_symbol]
df_res.ix[dt_date, "port"] = np.sum(df_val.ix[dt_date, :])
df_res.ix[dt_date, "total"] = df_res.ix[dt_date, "port"] + df_res.ix[dt_date, "cash"]
def process_orders(df_orders, df_prices, cash):
ldt_dates = list(df_prices.index)
ls_symbols = list(df_prices.columns)
df_num = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_val = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_res = pd.DataFrame(index=ldt_dates, columns=["port", "cash", "total"])
df_num = df_num.fillna(0.0)
df_val = df_val.fillna(0.0)
df_res = df_res.fillna(0.0)
df_res.ix[0, "cash"] = cash
ldt_orders_dates = list(df_orders.index)
iter_orders_dates = iter(ldt_orders_dates)
dt_orders_date = iter_orders_dates.next()
dt_last_orders_date = dt_orders_date
for dt_date in ldt_dates:
update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res)
if dt_date == dt_orders_date:
process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res)
try:
dt_last_orders_date = dt_orders_date
dt_orders_date = iter_orders_dates.next()
except StopIteration:
pass
value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res)
df_port = df_num.join(df_val, lsuffix="_num", rsuffix="_val").join(df_res)
#df_port.to_csv("port.csv")
return df_port
def save_values(df_port, s_out_file_path):
ldt_dates = df_port.index
na_dates = np.array([[dt_date.year, dt_date.month, dt_date.day] for dt_date in ldt_dates])
na_total = np.array(df_port["total"])
na_values = np.insert(arr=na_dates, obj=3, values=na_total, axis=1)
df_values = pd.DataFrame(na_values, columns=["year", "month", "day", "total"])
df_values.to_csv(s_out_file_path, sep=",", header=False, index=False)
if __name__ == '__main__':
print "start market_sim.py"
s_in_file_path = "data\\q1_orders.csv"
s_out_file_path = "data\\q1_values.csv"
s_cash = "100000"
f_cash = float(s_cash)
df_orders_list = get_orders_list(s_in_file_path)
df_orders, dt_start, dt_end, ls_symbols = get_orders(df_orders_list)
df_prices = get_prices(dt_start, dt_end, ls_symbols)
df_port = process_orders(df_orders, df_prices, f_cash)
save_values(df_port, s_out_file_path)
print "end market_sim.py"
|
gdikos/qstk-on-ec2
|
market_sim.py
|
Python
|
mit
| 5,585 | 0.005909 |
from django.contrib import admin
from django.contrib.admin.widgets import AdminFileWidget, AdminURLFieldWidget
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from models import (Image, Video, Audio, Flash, Collection, Embed, Document,
CollectionRelation, MediaTemplate)
import settings
from forms import (ImageCreationForm, VideoCreationForm, AudioCreationForm,
FlashCreationForm, DocumentCreationForm, EmbedCreationForm)
# from templatetags.media_widgets import snipshot_url
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, 'instance') and value.instance.thumbnail:
thumbnail = value.instance.thumbnail.url
width = value.instance.thumb_width
height = value.instance.thumb_height
# snipshot = snipshot_url(value.instance)
# crop_tag = '''<br /><a class="link" href="#" onclick="var win = window.open('%s','snipshot', 'height=500,width=800,resizable=yes,scrollbars=yes');win.focus();">Crop image with snipshot</a>''' % snipshot
tag = u'<img src="%s" width="%s" height="%s"/>' % (
thumbnail, width, height)
else:
# crop_tag = u""
tag = _("<strong>No Thumbnail available</strong>")
if value:
output.append(u'<a href="%s" target="_blank">%s</a>' % (
value.url, tag))
# output.append(crop_tag)
return mark_safe(u''.join(output))
class AdminExternalURLWidget(AdminURLFieldWidget):
def render(self, name, value, attrs=None):
output = []
tag = _("<strong>No Thumbnail available</strong>")
if value:
output.append(u'<a href="%s" target="_blank">%s</a>' % (value, tag))
output.append(u'<br /><a href="%s" target="_blank">%s</a>' % (value, value))
return mark_safe(u''.join(output))
class GenericCollectionInlineModelAdmin(admin.options.InlineModelAdmin):
ct_field = 'content_type'
ct_fk_field = 'object_id'
fields = ('content_type', 'object_id', 'position')
extra = 3
def __init__(self, parent_model, admin_site):
super(GenericCollectionInlineModelAdmin, self).__init__(parent_model, admin_site)
ctypes = ContentType.objects.all().order_by('id').values_list('id', 'app_label', 'model')
elements = ["%s: '%s/%s'" % (x, y, z) for x, y, z in ctypes]
self.content_types = "{%s}" % ",".join(elements)
def get_formset(self, request, obj=None):
result = super(GenericCollectionInlineModelAdmin, self).get_formset(request, obj)
result.content_types = self.content_types
result.ct_fk_field = self.ct_fk_field
return result
class GenericCollectionTabularInline(GenericCollectionInlineModelAdmin):
template = 'admin/edit_inlines/gen_coll_tabular.html'
class MediaAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('title', 'caption')}),
(_("Content"), {'fields': (('file', 'external_url'),)}),
(_("Credit"), {'fields': ('author', 'one_off_author', 'reproduction_allowed')}),
(_("Metadata"), {'fields': ('metadata', 'mime_type')}),
(_("Connections"), {'fields': ('public', 'site')}),
# (_("Widget"), {'fields': ('width', 'height')}),
(_("Advanced options"), {
'classes': ('collapse',),
'fields': ('widget_template',)
}),
)
add_fieldsets = (
(None, {'fields': ('title',)}),
(_("Content"), {'fields': ('external_url', 'file', 'caption')}),
(_("Rights"), {'fields': ('public', 'reproduction_allowed')}),
(_("Additional Info"), {
'classes': ('collapse',),
'fields': ('creation_date', 'site')
})
)
list_display = ('title', 'author_name', 'mime_type', 'public', 'creation_date')
list_filter = ('site', 'creation_date', 'public')
list_editable = ('public',)
date_hierarchy = 'creation_date'
search_fields = ('caption', 'file')
raw_id_fields = ('author', )
add_form_template = 'admin/massmedia/content_add_form.html'
def get_fieldsets(self, request, obj=None):
"""
Return add_fieldsets if it is a new object and the form has specified
different fieldsets for creation vs. change. Otherwise punt.
"""
if not obj and hasattr(self, 'add_fieldsets'):
return self.add_fieldsets
return super(MediaAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Return a special add form if specified
"""
defaults = {}
if not obj and hasattr(self, 'add_form'):
defaults = {
'form': self.add_form
}
defaults.update(kwargs)
return super(MediaAdmin, self).get_form(request, obj, **defaults)
class ImageAdmin(MediaAdmin):
list_display = ('render_thumb', 'title', 'creation_date')
list_display_links = ('render_thumb', 'title', )
list_editable = tuple()
add_fieldsets = (
(_("Content"), {'fields': ('external_url', 'file', 'caption')}),
(_("Rights"), {'fields': ('public', 'reproduction_allowed')}),
(_("Additional Info"), {
'classes': ('collapse',),
'fields': ('title', 'creation_date', 'site')
})
)
add_form = ImageCreationForm
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(ImageAdmin, self).get_urls()
my_urls = patterns('',
(r'^(?P<image_id>\d+)/crops/add/$', self.add_crop),
(r'^(?P<image_id>\d+)/crops/(?P<object_id>\d+)/$', self.update_crop),
(r'^(?P<image_id>\d+)/crops/(?P<object_id>\d+)/delete/$', self.delete_crop),
url(r'^close/$', self.close_window, name="imagecustomsize_close"),
)
return my_urls + urls
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'file':
kwargs['widget'] = AdminImageWidget
kwargs.pop('request')
return db_field.formfield(**kwargs)
elif db_field.name == 'external_url':
kwargs['widget'] = AdminExternalURLWidget
kwargs.pop('request')
return db_field.formfield(**kwargs)
return super(ImageAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def add_crop(self, request, image_id):
from massmedia.views import ImageCustomSizeCreate
return ImageCustomSizeCreate.as_view()(request, image_id=image_id)
def delete_crop(self, request, image_id, object_id):
from massmedia.views import ImageCustomSizeDelete
return ImageCustomSizeDelete.as_view()(request, image_id=image_id, object_id=object_id)
def update_crop(self, request, image_id, object_id):
from massmedia.views import ImageCustomSizeUpdate
return ImageCustomSizeUpdate.as_view()(request, image_id=image_id, object_id=object_id)
def close_window(self, request):
from django.views.generic.base import TemplateView
return TemplateView.as_view(template_name='admin/massmedia/imagecustomsize/close_window.html')(request)
class VideoAdmin(MediaAdmin):
list_display = ('title', 'thumb', 'author_name', 'mime_type',
'public', 'creation_date')
fieldsets = (
(None, {'fields': ('title', 'caption')}),
(_("Content"), {'fields': (('file', 'external_url'), 'thumbnail')}),
(_("Credit"), {'fields': ('author', 'one_off_author', 'reproduction_allowed')}),
(_("Metadata"), {'fields': ('metadata', 'mime_type')}),
(_("Connections"), {'fields': ('public', 'site')}),
(_("Widget"), {'fields': ('width', 'height')}),
(_("Advanced options"), {
'classes': ('collapse',),
'fields': ('widget_template',)
}),
)
raw_id_fields = ('thumbnail',)
add_fieldsets = (
(None, {'fields': ('title', )}),
(_("Content"), {'fields': (('external_url', 'file'), 'thumbnail')}),
(_("Rights"), {'fields': ('public', 'reproduction_allowed')}),
(_("Additional Info"), {
'classes': ('collapse',),
'fields': ('creation_date', 'site')
})
)
add_form = VideoCreationForm
class AudioAdmin(MediaAdmin, admin.ModelAdmin):
add_form = AudioCreationForm
class FlashAdmin(MediaAdmin):
add_form = FlashCreationForm
class DocumentAdmin(MediaAdmin):
add_form = DocumentCreationForm
class CollectionInline(GenericCollectionTabularInline):
model = CollectionRelation
template = 'admin/edit_inline/gen_coll_tabular.html'
class CollectionAdmin(admin.ModelAdmin):
fields = ('title', 'caption', 'zip_file', 'external_url', 'public', 'site')
list_display = ('title', 'caption', 'public', 'creation_date')
list_filter = ('site', 'creation_date', 'public')
date_hierarchy = 'creation_date'
search_fields = ('caption', )
inlines = (CollectionInline, )
class Media:
js = (
'js/genericcollections.js',
)
class EmbedAdmin(MediaAdmin):
fieldsets = (
(None, {'fields': ('title', 'caption')}),
(_("Content"), {'fields': (('code', ), )}),
(_("Credit"), {'fields': ('author', 'one_off_author', 'reproduction_allowed')}),
(_("Metadata"), {'fields': ('metadata', 'mime_type')}),
(_("Connections"), {'fields': ('public', 'site')}),
(_("Widget"), {'fields': ('width', 'height')}),
(_("Advanced options"), {
'classes': ('collapse',),
'fields': ('widget_template',)
}),
)
add_fieldsets = (
(_("Content"), {'fields': ('title', 'external_url', 'caption')}),
(_("Additional Info"), {
'classes': ('collapse',),
'fields': ('creation_date', 'site')
})
)
add_form = EmbedCreationForm
list_display = ('title', 'mime_type', 'public', 'creation_date')
list_filter = ('site', 'creation_date', 'public')
list_editable = ('public',)
date_hierarchy = 'creation_date'
search_fields = ('caption', )
admin.site.register(Collection, CollectionAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(Audio, AudioAdmin)
admin.site.register(Flash, FlashAdmin)
admin.site.register(Document, DocumentAdmin)
admin.site.register(Embed, EmbedAdmin)
if not settings.FS_TEMPLATES:
admin.site.register(MediaTemplate)
|
callowayproject/django-massmedia
|
massmedia/admin.py
|
Python
|
apache-2.0
| 10,650 | 0.001784 |
from django.conf import settings
from django.contrib.auth.models import UserManager
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
__all__ = (
'get_user_document',
)
MONGOENGINE_USER_DOCUMENT = getattr(
settings, 'MONGOENGINE_USER_DOCUMENT', 'mongoengine.django.auth.User')
def get_user_document():
"""Get the user document class used for authentication.
This is the class defined in settings.MONGOENGINE_USER_DOCUMENT, which
defaults to `mongoengine.django.auth.User`.
"""
name = MONGOENGINE_USER_DOCUMENT
dot = name.rindex('.')
module = import_module(name[:dot])
return getattr(module, name[dot + 1:])
class MongoUserManager(UserManager):
"""A User manager wich allows the use of MongoEngine documents in Django.
To use the manager, you must tell django.contrib.auth to use MongoUser as
the user model. In you settings.py, you need:
INSTALLED_APPS = (
...
'django.contrib.auth',
'mongoengine.django.mongo_auth',
...
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
Django will use the model object to access the custom Manager, which will
replace the original queryset with MongoEngine querysets.
By default, mongoengine.django.auth.User will be used to store users. You
can specify another document class in MONGOENGINE_USER_DOCUMENT in your
settings.py.
The User Document class has the same requirements as a standard custom user
model: https://docs.djangoproject.com/en/dev/topics/auth/customizing/
In particular, the User Document class must define USERNAME_FIELD and
REQUIRED_FIELDS.
`AUTH_USER_MODEL` has been added in Django 1.5.
"""
def contribute_to_class(self, model, name):
super(MongoUserManager, self).contribute_to_class(model, name)
self.dj_model = self.model
self.model = get_user_document()
self.dj_model.USERNAME_FIELD = self.model.USERNAME_FIELD
username = models.CharField(_('username'), max_length=30, unique=True)
username.contribute_to_class(self.dj_model, self.dj_model.USERNAME_FIELD)
self.dj_model.REQUIRED_FIELDS = self.model.REQUIRED_FIELDS
for name in self.dj_model.REQUIRED_FIELDS:
field = models.CharField(_(name), max_length=30)
field.contribute_to_class(self.dj_model, name)
def get(self, *args, **kwargs):
try:
return self.get_query_set().get(*args, **kwargs)
except self.model.DoesNotExist:
# ModelBackend expects this exception
raise self.dj_model.DoesNotExist
@property
def db(self):
raise NotImplementedError
def get_empty_query_set(self):
return self.model.objects.none()
def get_query_set(self):
return self.model.objects
class MongoUser(models.Model):
""""Dummy user model for Django.
MongoUser is used to replace Django's UserManager with MongoUserManager.
The actual user document class is mongoengine.django.auth.User or any
other document class specified in MONGOENGINE_USER_DOCUMENT.
To get the user document class, use `get_user_document()`.
"""
objects = MongoUserManager()
|
LethusTI/supportcenter
|
vendor/mongoengine/mongoengine/django/mongo_auth/models.py
|
Python
|
gpl-3.0
| 3,378 | 0.000592 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from .finders import get_finders
class WagtailEmbedsAppConfig(AppConfig):
name = 'wagtail.embeds'
label = 'wagtailembeds'
verbose_name = _("Wagtail embeds")
def ready(self):
# Check configuration on startup
get_finders()
|
mikedingjan/wagtail
|
wagtail/embeds/apps.py
|
Python
|
bsd-3-clause
| 347 | 0 |
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="pointcloud", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/pointcloud/_meta.py
|
Python
|
mit
| 438 | 0 |
"""
WSGI config for dqc_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dqc_django.settings")
application = get_wsgi_application()
|
arturcalves/dqc
|
dqc_django/wsgi.py
|
Python
|
mit
| 397 | 0 |
# coding: utf-8
"""
This file contains methods for translation norsource rules into rules we can
convert easily to a Typeraft compatible format.
"""
import re
from norsourceparser.core.config import config
from norsourceparser.core.constants import REDUCED_RULE_POS, REDUCED_RULE_GLOSSES, REDUCED_RULE_MORPHOLOGICAL_BREAKUP, \
REDUCED_RULE_VALENCY, REDUCED_RULE_CITATION_FORM, REDUCED_RULE_CONSTRUCTION_FORM, REDUCED_RULE_PRIORITY_AMBIGUOUS, \
REDUCED_RULE_PRIORITY_MERGE, REDUCED_RULE_PRIORITY_DOMINATE
from norsourceparser.core.util import get_pos, get_inflectional_rules, get_valency, get_dominating_pos_rule, \
get_dominating_gloss_rule
from norsourceparser.core.util import split_lexical_entry, get_gloss
class Rule(object):
def __init__(
self,
rule_id,
value,
priority=REDUCED_RULE_PRIORITY_AMBIGUOUS
):
self.rule_id = rule_id
self.value = value
self.priority = priority
def __unicode__(self):
return u"%d %s (Priority %d)" % (self.rule_id, self.value, self.priority)
def get_rules_from_partial_branch(partial_branch):
"""
This method is the main `entry-point` for inferring rules from a branch.
The method will analyse the branch for POS and GLOSS-tags, and possibly morphological
breakups.
:param partial_branch: A list of branch-entries.
:return: Array of rules
"""
# If we are at the terminal, we do nothing just yet.
if len(partial_branch) < 2:
return
rules = []
second_node = partial_branch[1]
terminal = partial_branch[0]
# With the terminal and second node, we can get information
# from the lexical entry
[stem, pos, gloss] = split_lexical_entry(second_node.name)
pos = get_pos(pos, None) or get_pos(second_node.name, None)
gloss = get_gloss(gloss, None) or get_gloss(second_node.name, None)
# If
if len(partial_branch) == 2 and config.DEBUG:
if pos is None:
print("UNABLE TO FIND POS FOR RULE: %s" % second_node.name)
if len(partial_branch) == 2:
# If we only have access to the lexical entry, we return what rules
# we can from here.
# Verbs might yield some valency information here
if pos == "V":
rules.extend(get_verb_valency_rule(partial_branch))
rules.extend(parse_lexical_entry(terminal, stem, pos, gloss))
return rules
if 'bli_pass' in partial_branch[1].name:
# We look for the special case of a bli_pass case here
rules.extend(get_bli_passive_rules(partial_branch))
else:
rules.extend(get_gloss_rules_from_partial_branch(partial_branch))
rules.extend(get_dominating_rules(partial_branch))
if pos == "N":
# If the pos is a Noun, we look for the special noun inflectional rules
rules.extend(get_noun_inflectional_rule(partial_branch))
rules.extend(get_complex_rules(partial_branch))
return rules
def parse_lexical_entry(terminal, stem, pos, gloss):
"""
This method helps us to parse a lexical entry.
To do this, we need the extracted stem pos and gloss from the rule,
as well as the terminal.
:param terminal: The terminal node, which will contain the dictonary-form of the
word we are trying to associate rules with.
:param stem: The parsed stem-form of the word.
:param pos: The POS-tag of the word.
:param gloss: Any gloss-tags so far found of the word.
:return: An array of rules.
"""
rules = []
# Here we are parsing the lexical entry of the branch
if pos is not None:
rules.append(Rule(REDUCED_RULE_POS, pos))
# This happens on e.g. punctuations
if stem is not None and pos is None and gloss is None:
rules.append(Rule(REDUCED_RULE_POS, pos))
# We capture morphological breakup and glosses here.
# This information may get overwritten later up the tree/branch. Yet
# we still do this step in case we have some missing information later up the tree.
if pos in ['N', 'V', 'ADJ']:
rules.append(Rule(REDUCED_RULE_CITATION_FORM, stem))
if stem != terminal.name and stem in terminal.name:
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [stem, re.sub("^"+stem, "", terminal.name)]))
# We do this here so we can capture the correct position
if gloss is not None:
rules.append(Rule(REDUCED_RULE_GLOSSES, ["", gloss], REDUCED_RULE_PRIORITY_MERGE))
else:
if stem not in terminal.name:
# We have morphology, but it is non-concatenative
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [terminal.name]))
else:
# We have no morphology at all here, we don't have any inflections here.
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [stem]))
# We do this here so we can capture the correct position
if gloss is not None:
rules.append(Rule(REDUCED_RULE_GLOSSES, [gloss], REDUCED_RULE_PRIORITY_MERGE))
else:
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [terminal.name]))
if gloss is not None:
rules.append(Rule(REDUCED_RULE_GLOSSES, [gloss], REDUCED_RULE_PRIORITY_MERGE))
return rules
def get_noun_inflectional_rule(partial_branch):
"""
This method helps us to parse an inflectional rule for a noun.
The method accepts a partial branch, but only proceeds if the branch is at least
of length 3. We allow this flexibility as we might not want to control the method-calls to
this method in the calling methods.
If the POS of the branch is found not to be a noun, we simply return.
:param partial_branch: A partial branch.
:return: An array, potentially filled with rules.
"""
rules = []
if len(partial_branch) < 3:
return rules
# Here we are looking for the inflectional rules for nouns
last_node = partial_branch[-1]
lexical_node = partial_branch[1]
terminal = partial_branch[0]
[stem, pos, _] = split_lexical_entry(lexical_node.name)
pos = get_pos(pos, None) or get_pos(lexical_node.name, None)
if pos != 'N':
return rules
inf_rules = get_inflectional_rules(stem, last_node.name)
if inf_rules is None:
return rules
[current_suffix, suffix, glosses] = inf_rules
if glosses is None and config.DEBUG:
print("NONE GLOSSES", glosses)
if current_suffix is None or suffix is None:
# This happens on the rule pl_ind_n_short_0_irule
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [stem]))
rules.append(Rule(REDUCED_RULE_GLOSSES, [".".join(glosses)], REDUCED_RULE_PRIORITY_MERGE))
else:
if current_suffix == '*':
morphological_breakup = [stem, suffix]
glosses = ["", ".".join(glosses)]
else:
if current_suffix not in suffix:
morphological_breakup = [terminal.name]
glosses = ["".join(glosses)]
else:
morphological_breakup = [stem, re.sub("^"+current_suffix, "", suffix)]
glosses = ["", ".".join(glosses)]
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, morphological_breakup))
rules.append(Rule(REDUCED_RULE_GLOSSES, glosses, REDUCED_RULE_PRIORITY_MERGE))
return rules
def get_gloss_rules_from_partial_branch(partial_tree):
"""
Tries to get rules for something other than a verb, noun or adjective. We do this simply by doing a lookup
in the non-inflectional table. This is of course all encapsulated in the get_gloss method, so we just call that,
fishing for luck.
:param partial_tree:
:return: An array of rules
"""
last_rule = partial_tree[-1].name
lexical_rule = partial_tree[1].name
terminal = partial_tree[0].name
[stem, pos, _] = split_lexical_entry(lexical_rule)
pos = get_pos(pos, None) or get_pos(lexical_rule, None)
maybe_gloss = get_gloss(last_rule)
if maybe_gloss is not None:
if pos in ['N', 'ADJ', 'V']:
if stem != terminal and stem in terminal:
# This means we have some inflectional rule, and should
# add the gloss to the suffix
return [Rule(REDUCED_RULE_GLOSSES, ["", maybe_gloss], REDUCED_RULE_PRIORITY_MERGE)]
return [Rule(REDUCED_RULE_GLOSSES, [maybe_gloss], REDUCED_RULE_PRIORITY_MERGE)]
return []
def get_bli_passive_rules(partial_branch):
"""
This method checks for the special case of bli_passives.
:param partial_branch:
:return: An array of rules
"""
rules = []
if len(partial_branch) == 3:
lexical = partial_branch[1]
if 'bli_pass' in lexical.name:
terminal = partial_branch[0]
inflectional = partial_branch[2]
rules.append(Rule(REDUCED_RULE_POS, 'V'))
gloss_rules = ""
if inflectional.name == 'pres-infl_rule':
gloss_rules = 'PRES'
elif inflectional.name == 'pret-finalstr_infl_rule':
gloss_rules = 'PRET'
elif inflectional.name == 'ppart-finalstr-tt_infl_rule':
gloss_rules = 'PRF.PTCP'
if 'bli' in terminal.name:
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, ['bli', re.sub('^bli', '', terminal.name)]))
rules.append(Rule(REDUCED_RULE_GLOSSES, ['', gloss_rules], REDUCED_RULE_PRIORITY_DOMINATE))
else:
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [terminal.name]))
rules.append(Rule(REDUCED_RULE_GLOSSES, [gloss_rules], REDUCED_RULE_PRIORITY_DOMINATE))
return rules
def get_verb_valency_rule(partial_branch):
"""
This method tries to get a valency rule for a verb.
:param partial_branch:
:return:
"""
valency, lex_corr = get_valency(partial_branch[-1].name)
if valency:
return [Rule(REDUCED_RULE_VALENCY, valency), Rule(REDUCED_RULE_CONSTRUCTION_FORM, lex_corr)]
return []
def get_verb_citform(partial_branch):
lex = partial_branch[1].name
if 'vlxm' in lex:
return [Rule(REDUCED_RULE_CITATION_FORM, lex.split("_")[0])]
return []
def get_dominating_rules(partial_branch):
last_rule = partial_branch[-1].name
lexical_rule = partial_branch[1].name
terminal = partial_branch[0].name
[stem, pos, _] = split_lexical_entry(lexical_rule)
pos = get_pos(pos, None) or get_pos(lexical_rule, None)
pos_rule = get_dominating_pos_rule(last_rule)
if pos_rule:
return [Rule(REDUCED_RULE_POS, pos_rule, REDUCED_RULE_PRIORITY_DOMINATE)]
gloss_rule = get_dominating_gloss_rule(last_rule)
if gloss_rule:
if pos in ['N', 'ADJ', 'V']:
if stem != terminal and stem in terminal:
# This means we have some inflectional rule, and should
# add the gloss to the suffix
return [Rule(REDUCED_RULE_GLOSSES, ["", gloss_rule], REDUCED_RULE_PRIORITY_DOMINATE)]
return [Rule(REDUCED_RULE_GLOSSES, [gloss_rule], REDUCED_RULE_PRIORITY_DOMINATE)]
return []
def get_complex_rules(partial_branch):
"""
Currently we only do a special case here.
:param partial_branch:
:return:
"""
if len(partial_branch) < 4:
return []
terminal = partial_branch[0].name
lexical = partial_branch[1]
potential_pass = partial_branch[2]
inflectional = partial_branch[3]
[stem, pos, _] = split_lexical_entry(lexical.name)
if 'pass' in potential_pass.name and get_gloss(inflectional.name, '') == 'PRF':
if stem != terminal and stem in terminal:
return [Rule(REDUCED_RULE_GLOSSES, ['', 'PASS.PTCP'], REDUCED_RULE_PRIORITY_DOMINATE)]
return [Rule(REDUCED_RULE_GLOSSES, ['PASS.PTCP'], REDUCED_RULE_PRIORITY_DOMINATE)]
return []
|
Typecraft/norsourceparser
|
norsourceparser/core/rules.py
|
Python
|
mit
| 11,982 | 0.002921 |
"""
The huginn_record script is used to record flight data from the simulator
"""
from argparse import ArgumentParser
import json
from twisted.internet import reactor
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet.task import LoopingCall
from huginn import configuration
from huginn.io import CSVFDMDataWriter
def get_arguments():
parser = ArgumentParser(description="Record the fdm data")
parser.add_argument("--host",
action="store",
default="127.0.0.1",
help="the simulator ip address")
parser.add_argument("--port",
action="store",
default=configuration.WEB_SERVER_PORT,
type=int,
help="the simulator http port")
parser.add_argument("--dt",
default=1.0,
help="How often to request data from the simulator")
parser.add_argument("output", help="the output file")
return parser.parse_args()
def request_fdm_data(args, csv_telemetry_writer):
agent = Agent(reactor)
url = "http://%s:%d/fdm" % (args.host, args.port)
d = agent.request("GET",
url,
Headers({}),
None)
d.addCallback(process_fdm_data_response, csv_telemetry_writer)
return d
def process_fdm_data_response(response, csv_telemetry_writer):
d = readBody(response)
d.addCallback(save_fdm_data, csv_telemetry_writer)
return d
def save_fdm_data(body, csv_telemetry_writer):
fdm_data = json.loads(body)
csv_telemetry_writer.write_fdm_data(fdm_data)
for variable in ["time", "altitude", "airspeed", "heading"]:
print("%s\t%f" % (variable, fdm_data[variable]))
print ("")
def main():
args = get_arguments()
output_file = open(args.output, "w")
variables = ["time", "dt", "latitude", "longitude", "altitude",
"airspeed", "heading", "x_acceleration", "y_acceleration",
"z_acceleration", "roll_rate", "pitch_rate", "yaw_rate",
"temperature", "static_pressure", "total_pressure",
"roll", "pitch", "thrust", "aileron", "elevator", "rudder",
"throttle"]
csv_telemetry_writer = CSVFDMDataWriter(variables, output_file)
csv_telemetry_writer.write_header()
task = LoopingCall(request_fdm_data, args, csv_telemetry_writer)
task.start(args.dt)
reactor.run() # @UndefinedVariable
output_file.close()
|
pmatigakis/Huginn
|
huginn/cli/huginn_record.py
|
Python
|
bsd-3-clause
| 2,622 | 0.000381 |
from __future__ import unicode_literals
from frappe import _
from frappe.desk.moduleview import add_setup_section
def get_data():
data = [
{
"label": _("Users"),
"icon": "icon-group",
"items": [
{
"type": "doctype",
"name": "User",
"description": _("System and Website Users")
},
{
"type": "doctype",
"name": "Role",
"description": _("User Roles")
}
]
},
{
"label": _("Permissions"),
"icon": "icon-lock",
"items": [
{
"type": "page",
"name": "permission-manager",
"label": _("Role Permissions Manager"),
"icon": "icon-lock",
"description": _("Set Permissions on Document Types and Roles")
},
{
"type": "page",
"name": "user-permissions",
"label": _("User Permissions Manager"),
"icon": "icon-shield",
"description": _("Set Permissions per User")
},
{
"type": "page",
"name": "modules_setup",
"label": _("Show / Hide Modules"),
"icon": "icon-upload",
"description": _("Show or hide modules globally.")
},
{
"type": "report",
"is_query_report": True,
"doctype": "User",
"icon": "icon-eye-open",
"name": "Permitted Documents For User",
"description": _("Check which Documents are readable by a User")
},
{
"type": "report",
"doctype": "DocShare",
"icon": "icon-share",
"name": "Document Share Report",
"description": _("Report of all document shares")
}
]
},
{
"label": _("Settings"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "System Settings",
"label": _("System Settings"),
"description": _("Language, Date and Time settings"),
"hide_count": True
},
{
"type": "doctype",
"name": "Scheduler Log",
"description": _("Log of error on automated events (scheduler).")
},
]
},
{
"label": _("Data"),
"icon": "icon-th",
"items": [
{
"type": "page",
"name": "data-import-tool",
"label": _("Import / Export Data"),
"icon": "icon-upload",
"description": _("Import / Export Data from .csv files.")
},
{
"type": "doctype",
"name": "Naming Series",
"description": _("Set numbering series for transactions."),
"hide_count": True
},
{
"type": "doctype",
"name": "Rename Tool",
"label": _("Bulk Rename"),
"description": _("Rename many items by uploading a .csv file."),
"hide_count": True
},
{
"type": "page",
"name": "backups",
"label": _("Download Backups"),
"description": _("List of backups available for download"),
"icon": "icon-download"
},
]
},
{
"label": _("Email"),
"icon": "icon-envelope",
"items": [
{
"type": "doctype",
"name": "Email Account",
"description": _("Add / Manage Email Accounts.")
},
{
"type": "doctype",
"name": "Email Alert",
"description": _("Setup Email Alert based on various criteria.")
},
{
"type": "doctype",
"name": "Standard Reply",
"description": _("Standard replies to common queries.")
},
]
},
{
"label": _("Printing"),
"icon": "icon-print",
"items": [
{
"type": "page",
"label": "Print Format Builder",
"name": "print-format-builder",
"description": _("Drag and Drop tool to build and customize Print Formats.")
},
{
"type": "doctype",
"name": "Print Settings",
"description": _("Set default format, page size, print style etc.")
},
{
"type": "doctype",
"name": "Print Format",
"description": _("Customized HTML Templates for printing transactions.")
},
]
},
{
"label": _("Workflow"),
"icon": "icon-random",
"items": [
{
"type": "doctype",
"name": "Workflow",
"description": _("Define workflows for forms.")
},
{
"type": "doctype",
"name": "Workflow State",
"description": _("States for workflow (e.g. Draft, Approved, Cancelled).")
},
{
"type": "doctype",
"name": "Workflow Action",
"description": _("Actions for workflow (e.g. Approve, Cancel).")
},
]
},
{
"label": _("Integrations"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "applications",
"label": _("Application Installer"),
"description": _("Install Applications."),
"icon": "icon-download"
},
{
"type": "doctype",
"name": "Social Login Keys",
"description": _("Enter keys to enable login via Facebook, Google, GitHub."),
},
{
"type": "doctype",
"name": "Dropbox Backup",
"description": _("Manage cloud backups on Dropbox"),
"hide_count": True
}
]
},
{
"label": _("Customize"),
"icon": "icon-glass",
"items": [
{
"type": "doctype",
"name": "Customize Form",
"description": _("Change field properties (hide, readonly, permission etc.)"),
"hide_count": True
},
{
"type": "doctype",
"name": "Custom Field",
"description": _("Add fields to forms.")
},
{
"type": "doctype",
"name": "Custom Script",
"description": _("Add custom javascript to forms.")
},
{
"type": "doctype",
"name": "DocType",
"description": _("Add custom forms.")
}
]
},
]
add_setup_section(data, "frappe", "website", _("Website"), "icon-globe")
return data
|
sbktechnology/trufil-frappe
|
frappe/config/setup.py
|
Python
|
mit
| 5,480 | 0.042336 |
import logging
from django.conf import settings
from django.db import models
from mkt.site.mail import send_mail
from mkt.site.models import ModelBase
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
log = logging.getLogger('z.abuse')
class AbuseReport(ModelBase):
# NULL if the reporter is anonymous.
reporter = models.ForeignKey(UserProfile, null=True,
blank=True, related_name='abuse_reported')
ip_address = models.CharField(max_length=255, default='0.0.0.0')
# An abuse report can be for an addon, a user, or a website. Only one of
# these should be null.
addon = models.ForeignKey(Webapp, null=True, related_name='abuse_reports')
user = models.ForeignKey(UserProfile, null=True,
related_name='abuse_reports')
website = models.ForeignKey(Website, null=True,
related_name='abuse_reports')
message = models.TextField()
read = models.BooleanField(default=False)
class Meta:
db_table = 'abuse_reports'
@property
def object(self):
return self.addon or self.user or self.website
def send(self):
obj = self.object
if self.reporter:
user_name = '%s (%s)' % (self.reporter.name, self.reporter.email)
else:
user_name = 'An anonymous coward'
if self.addon:
type_ = 'App'
elif self.user:
type_ = 'User'
else:
type_ = 'Website'
subject = u'[%s] Abuse Report for %s' % (type_, obj.name)
msg = u'%s reported abuse for %s (%s%s).\n\n%s' % (
user_name, obj.name, settings.SITE_URL, obj.get_url_path(),
self.message)
send_mail(subject, msg, recipient_list=(settings.ABUSE_EMAIL,))
@classmethod
def recent_high_abuse_reports(cls, threshold, period, addon_id=None):
"""
Returns AbuseReport objects for the given threshold over the given time
period (in days). Filters by addon_id if provided.
E.g. Greater than 5 abuse reports for all webapps in the past 7 days.
"""
abuse_sql = ['''
SELECT `abuse_reports`.*,
COUNT(`abuse_reports`.`addon_id`) AS `num_reports`
FROM `abuse_reports`
INNER JOIN `addons` ON (`abuse_reports`.`addon_id` = `addons`.`id`)
WHERE `abuse_reports`.`created` >= %s ''']
params = [period]
if addon_id:
abuse_sql.append('AND `addons`.`id` = %s ')
params.append(addon_id)
abuse_sql.append('GROUP BY addon_id HAVING num_reports > %s')
params.append(threshold)
return list(cls.objects.raw(''.join(abuse_sql), params))
def send_abuse_report(request, obj, message):
report = AbuseReport(ip_address=request.META.get('REMOTE_ADDR'),
message=message)
if request.user.is_authenticated():
report.reporter = request.user
if isinstance(obj, Webapp):
report.addon = obj
elif isinstance(obj, UserProfile):
report.user = obj
elif isinstance(obj, Website):
report.website = obj
report.save()
report.send()
# Trigger addon high abuse report detection task.
if isinstance(obj, Webapp):
from mkt.webapps.tasks import find_abuse_escalations
find_abuse_escalations.delay(obj.id)
|
eviljeff/zamboni
|
mkt/abuse/models.py
|
Python
|
bsd-3-clause
| 3,467 | 0 |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/linters/css_linter.py."""
from __future__ import annotations
import os
import subprocess
from core.tests import test_utils
from scripts import scripts_test_utils
from . import css_linter
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
CONFIG_PATH = os.path.join(
PARENT_DIR, 'oppia', 'core', 'templates', 'css', '.stylelintrc')
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_CSS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.css')
INVALID_CSS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'invalid.css')
class ThirdPartyCSSLintChecksManagerTests(test_utils.LinterTestBase):
"""Tests for ThirdPartyCSSLintChecksManager class."""
def test_all_filepaths_with_success(self):
filepaths = [VALID_CSS_FILEPATH, INVALID_CSS_FILEPATH]
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, filepaths)
returned_filepaths = third_party_linter.all_filepaths
self.assertEqual(returned_filepaths, filepaths)
def test_perform_all_lint_checks_with_invalid_file(self):
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [INVALID_CSS_FILEPATH])
lint_task_report = third_party_linter.lint_css_files()
self.assert_same_list_elements([
'19:16',
'Unexpected whitespace before \":\" declaration-colon-space-'
'before'], lint_task_report.get_report())
self.assertEqual('Stylelint', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_perform_all_lint_checks_with_invalid_stylelint_path(self):
def mock_join(*unused_args):
return 'node_modules/stylelint/bin/stylelinter.js'
join_swap = self.swap(os.path, 'join', mock_join)
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [INVALID_CSS_FILEPATH])
with self.print_swap, join_swap, self.assertRaisesRegexp(
Exception,
'ERROR Please run start.sh first to install node-eslint or '
'node-stylelint and its dependencies.'):
third_party_linter.perform_all_lint_checks()
def test_perform_all_lint_checks_with_stderr(self):
def mock_popen(unused_commands, stdout, stderr): # pylint: disable=unused-argument
return scripts_test_utils.PopenStub(stdout=b'True', stderr=b'True')
popen_swap = self.swap_with_checks(subprocess, 'Popen', mock_popen)
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [VALID_CSS_FILEPATH])
with self.print_swap, popen_swap, self.assertRaisesRegexp(
Exception, 'True'
):
third_party_linter.perform_all_lint_checks()
def test_perform_all_lint_checks_with_no_files(self):
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [])
lint_task_report = third_party_linter.perform_all_lint_checks()
self.assertEqual(
'There are no HTML or CSS files to lint.',
lint_task_report[0].get_report()[0])
self.assertEqual('CSS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_perform_all_lint_checks_with_valid_file(self):
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [VALID_CSS_FILEPATH])
lint_task_report = third_party_linter.perform_all_lint_checks()
self.assertTrue(isinstance(lint_task_report, list))
def test_get_linters(self):
custom_linter, third_party_linter = css_linter.get_linters(
CONFIG_PATH, [VALID_CSS_FILEPATH, INVALID_CSS_FILEPATH])
self.assertEqual(custom_linter, None)
self.assertTrue(
isinstance(
third_party_linter, css_linter.ThirdPartyCSSLintChecksManager))
|
kevinlee12/oppia
|
scripts/linters/css_linter_test.py
|
Python
|
apache-2.0
| 4,592 | 0.000653 |
"""Compute the archive start time of a HADS/DCP/COOP network"""
import sys
import datetime
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, logger
LOG = logger()
THISYEAR = datetime.datetime.now().year
HADSDB = get_dbconn("hads")
MESOSITEDB = get_dbconn("mesosite")
def get_minvalid(sid):
""" "Do sid"""
cursor = HADSDB.cursor()
for yr in range(2002, THISYEAR + 1):
cursor.execute(
f"SELECT min(valid) from raw{yr} WHERE station = %s", (sid,)
)
minv = cursor.fetchone()[0]
if minv is not None:
return minv
def do_network(network):
"""Do network"""
nt = NetworkTable(network)
for sid in nt.sts:
sts = get_minvalid(sid)
if sts is None:
continue
if (
nt.sts[sid]["archive_begin"] is None
or nt.sts[sid]["archive_begin"] != sts
):
osts = nt.sts[sid]["archive_begin"]
fmt = "%Y-%m-%d %H:%M"
LOG.info(
"%s [%s] new sts: %s OLD sts: %s",
sid,
network,
sts.strftime(fmt),
osts.strftime(fmt) if osts is not None else "null",
)
cursor = MESOSITEDB.cursor()
cursor.execute(
"UPDATE stations SET archive_begin = %s WHERE id = %s and "
"network = %s",
(sts, sid, network),
)
cursor.close()
MESOSITEDB.commit()
def main(argv):
"""Go main Go"""
if len(argv) == 1:
# If we run without args, we pick a "random" network!
cursor = MESOSITEDB.cursor()
cursor.execute(
"SELECT id from networks where id ~* 'DCP' or id ~* 'COOP' "
"ORDER by id ASC"
)
networks = []
for row in cursor:
networks.append(row[0])
jday = int(datetime.date.today().strftime("%j"))
network = networks[jday % len(networks)]
LOG.info("auto-picked %s", network)
else:
network = argv[1]
do_network(network)
if __name__ == "__main__":
main(sys.argv)
|
akrherz/iem
|
scripts/dbutil/compute_hads_sts.py
|
Python
|
mit
| 2,162 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-11-08 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cashier', '0014_fix_transaction'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='typeOfTransaction',
field=models.CharField(choices=[('debt', 'Gæld'), ('expense', 'Udlæg')], default='debt', max_length=7, verbose_name='Type'),
),
]
|
Rotendahl/DormitoryLife
|
cashier/migrations/0015_auto_20171108_1351.py
|
Python
|
gpl-3.0
| 552 | 0.001818 |
from __future__ import absolute_import, unicode_literals
from mopidy import listener
class CoreListener(listener.Listener):
"""
Marker interface for recipients of events sent by the core actor.
Any Pykka actor that mixes in this class will receive calls to the methods
defined here when the corresponding events happen in the core actor. This
interface is used both for looking up what actors to notify of the events,
and for providing default implementations for those listeners that are not
interested in all events.
"""
@staticmethod
def send(event, **kwargs):
"""Helper to allow calling of core listener events"""
listener.send(CoreListener, event, **kwargs)
def on_event(self, event, **kwargs):
"""
Called on all events.
*MAY* be implemented by actor. By default, this method forwards the
event to the specific event methods.
:param event: the event name
:type event: string
:param kwargs: any other arguments to the specific event handlers
"""
# Just delegate to parent, entry mostly for docs.
super(CoreListener, self).on_event(event, **kwargs)
def track_playback_paused(self, tl_track, time_position):
"""
Called whenever track playback is paused.
*MAY* be implemented by actor.
:param tl_track: the track that was playing when playback paused
:type tl_track: :class:`mopidy.models.TlTrack`
:param time_position: the time position in milliseconds
:type time_position: int
"""
pass
def track_playback_resumed(self, tl_track, time_position):
"""
Called whenever track playback is resumed.
*MAY* be implemented by actor.
:param tl_track: the track that was playing when playback resumed
:type tl_track: :class:`mopidy.models.TlTrack`
:param time_position: the time position in milliseconds
:type time_position: int
"""
pass
def track_playback_started(self, tl_track):
"""
Called whenever a new track starts playing.
*MAY* be implemented by actor.
:param tl_track: the track that just started playing
:type tl_track: :class:`mopidy.models.TlTrack`
"""
pass
def track_playback_ended(self, tl_track, time_position):
"""
Called whenever playback of a track ends.
*MAY* be implemented by actor.
:param tl_track: the track that was played before playback stopped
:type tl_track: :class:`mopidy.models.TlTrack`
:param time_position: the time position in milliseconds
:type time_position: int
"""
pass
def playback_state_changed(self, old_state, new_state):
"""
Called whenever playback state is changed.
*MAY* be implemented by actor.
:param old_state: the state before the change
:type old_state: string from :class:`mopidy.core.PlaybackState` field
:param new_state: the state after the change
:type new_state: string from :class:`mopidy.core.PlaybackState` field
"""
pass
def tracklist_changed(self):
"""
Called whenever the tracklist is changed.
*MAY* be implemented by actor.
"""
pass
def playlists_loaded(self):
"""
Called when playlists are loaded or refreshed.
*MAY* be implemented by actor.
"""
pass
def playlist_changed(self, playlist):
"""
Called whenever a playlist is changed.
*MAY* be implemented by actor.
:param playlist: the changed playlist
:type playlist: :class:`mopidy.models.Playlist`
"""
pass
def playlist_deleted(self, uri):
"""
Called whenever a playlist is deleted.
*MAY* be implemented by actor.
:param uri: the URI of the deleted playlist
:type uri: string
"""
pass
def options_changed(self):
"""
Called whenever an option is changed.
*MAY* be implemented by actor.
"""
pass
def volume_changed(self, volume):
"""
Called whenever the volume is changed.
*MAY* be implemented by actor.
:param volume: the new volume in the range [0..100]
:type volume: int
"""
pass
def mute_changed(self, mute):
"""
Called whenever the mute state is changed.
*MAY* be implemented by actor.
:param mute: the new mute state
:type mute: boolean
"""
pass
def seeked(self, time_position):
"""
Called whenever the time position changes by an unexpected amount, e.g.
at seek to a new time position.
*MAY* be implemented by actor.
:param time_position: the position that was seeked to in milliseconds
:type time_position: int
"""
pass
def stream_title_changed(self, title):
"""
Called whenever the currently playing stream title changes.
*MAY* be implemented by actor.
:param title: the new stream title
:type title: string
"""
pass
|
mokieyue/mopidy
|
mopidy/core/listener.py
|
Python
|
apache-2.0
| 5,283 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('askapp', '0012_auto_20170203_1436'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='the_answer',
new_name='is_answer',
),
migrations.AddField(
model_name='post',
name='accepted',
field=models.DateTimeField(null=True),
),
]
|
BanzaiTokyo/akihabara-tokyo
|
askapp/migrations/0013_auto_20170206_0748.py
|
Python
|
apache-2.0
| 542 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Proxy AMI-related calls from the cloud controller, to the running
objectstore daemon.
"""
import json
import random
import urllib
from nova import vendor
import boto
import boto.s3
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
def modify(context, image_id, operation):
conn(context).make_request(
method='POST',
bucket='_images',
query_args=qs({'image_id': image_id, 'operation': operation}))
return True
def register(context, image_location):
""" rpc call to register a new image based from a manifest """
image_id = utils.generate_uid('ami')
conn(context).make_request(
method='PUT',
bucket='_images',
query_args=qs({'image_location': image_location,
'image_id': image_id}))
return image_id
def list(context, filter_list=[]):
""" return a list of all images that a user can see
optionally filtered by a list of image_id """
# FIXME: send along the list of only_images to check for
response = conn(context).make_request(
method='GET',
bucket='_images')
result = json.loads(response.read())
if not filter_list is None:
return [i for i in result if i['imageId'] in filter_list]
return result
def deregister(context, image_id):
""" unregister an image """
conn(context).make_request(
method='DELETE',
bucket='_images',
query_args=qs({'image_id': image_id}))
def conn(context):
return boto.s3.connection.S3Connection (
aws_access_key_id='%s:%s' % (context.user.access, context.project.name),
aws_secret_access_key=context.user.secret,
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
port=FLAGS.s3_port,
host=FLAGS.s3_host)
def qs(params):
pairs = []
for key in params.keys():
pairs.append(key + '=' + urllib.quote(params[key]))
return '&'.join(pairs)
|
joshuamckenty/yolo-octo-wookie
|
nova/endpoint/images.py
|
Python
|
apache-2.0
| 2,782 | 0.002516 |
from sqlalchemy.schema import (
Table,
Column,
MetaData,
ForeignKey)
from sqlalchemy.types import (
Text,
JSON,
DateTime,
Integer,
String)
from collections import defaultdict
from uuid import uuid4
import datetime
class SchemaStore:
def __init__(self):
self.metadata = defaultdict(MetaData)
self.tables = defaultdict(list)
def _import_schema(self, schema_name):
def fkey(target):
return ForeignKey(schema_name + '.' + target)
def make_uuid():
return str(uuid4())
metadata = self.metadata[schema_name]
tables = self.tables[schema_name]
tables.append(Table(
"ealgis_metadata", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256), nullable=False),
Column('family', String(256), nullable=True),
Column('uuid', String(36), nullable=False, default=make_uuid),
Column('description', Text(), nullable=False),
Column('date_created', DateTime(timezone=True), default=datetime.datetime.utcnow, nullable=False),
Column('date_published', DateTime(timezone=True), nullable=False),
schema=schema_name))
tables.append(Table(
"dependencies", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256), nullable=False),
Column('uuid', String(36), nullable=False),
schema=schema_name))
tables.append(Table(
"table_info", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256)),
Column('metadata_json', JSON()),
schema=schema_name))
tables.append(Table(
"column_info", metadata,
Column('id', Integer, primary_key=True),
Column('table_info_id', Integer, fkey('table_info.id'), nullable=False),
Column('name', String(256)),
Column('schema_name', String(256)),
Column('metadata_json', JSON()),
schema=schema_name))
tables.append(Table(
"geometry_source", metadata,
Column('id', Integer, primary_key=True),
Column('table_info_id', Integer, fkey('table_info.id'), nullable=False),
Column('gid_column', String(256)),
Column('geometry_type', String(256)),
schema=schema_name))
tables.append(Table(
"geometry_source_projection", metadata,
Column('id', Integer, primary_key=True),
Column('geometry_source_id', Integer, fkey('table_info.id'), nullable=False),
Column('geometry_column', String(256)),
Column('srid', Integer),
schema=schema_name))
tables.append(Table(
"geometry_linkage", metadata,
Column('id', Integer, primary_key=True),
# in the source schema: may not be the same schema as this Table instance
Column('geometry_source_schema_name', String, nullable=False),
Column('geometry_source_id', Integer, nullable=False),
# these must be in this schema
Column('attr_table_id', Integer, fkey('table_info.id'), nullable=False),
Column('attr_column', String(256)),
schema=schema_name))
tables.append(Table(
"mailbox", metadata,
Column('id', Integer, primary_key=True),
Column('from', String(256)),
Column('to', String(256)),
Column('message', JSON()),
schema=schema_name))
def load_schema(self, schema_name):
if schema_name not in self.metadata:
self._import_schema(schema_name)
return self.metadata[schema_name], self.tables[schema_name]
store = SchemaStore()
|
grahame/ealgis
|
django/ealgis/dataschema/schema_v1.py
|
Python
|
gpl-3.0
| 3,861 | 0.001554 |
"""
* The SDK website: https://slack.dev/python-slack-sdk/
* PyPI package: https://pypi.org/project/slack-sdk/
Here is the list of key modules in this SDK:
#### Web API Client
* Web API client: `slack_sdk.web.client`
* asyncio-based Web API client: `slack_sdk.web.async_client`
#### Webhook / response_url Client
* Webhook client: `slack_sdk.webhook.client`
* asyncio-based Webhook client: `slack_sdk.webhook.async_client`
#### Socket Mode Client
* The built-in Socket Mode client: `slack_sdk.socket_mode.builtin.client`
* [aiohttp](https://pypi.org/project/aiohttp/) based client: `slack_sdk.socket_mode.aiohttp`
* [websocket_client](https://pypi.org/project/websocket-client/) based client: `slack_sdk.socket_mode.websocket_client`
* [websockets](https://pypi.org/project/websockets/) based client: `slack_sdk.socket_mode.websockets`
#### OAuth
* `slack_sdk.oauth.installation_store.installation_store`
* `slack_sdk.oauth.state_store`
#### Audit Logs API Client
* `slack_sdk.audit_logs.v1.client`
* `slack_sdk.audit_logs.v1.async_client`
#### SCIM API Client
* `slack_sdk.scim.v1.client`
* `slack_sdk.scim.v1.async_client`
"""
import logging
from logging import NullHandler
# from .rtm import RTMClient # noqa
from .web import WebClient # noqa
from .webhook import WebhookClient # noqa
# Set default logging handler to avoid "No handler found" warnings.
logging.getLogger(__name__).addHandler(NullHandler())
|
slackapi/python-slackclient
|
slack_sdk/__init__.py
|
Python
|
mit
| 1,429 | 0.002099 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-05 09:13
from __future__ import unicode_literals
import uuid
from django.core.exceptions import FieldDoesNotExist
from django.db import migrations
def set_uuid_field(apps, schema_editor):
"""
Set a random uuid value to all existing rows in all models containing an 'uuid' attribute in database.
"""
base = apps.get_app_config('base')
for model_class in base.get_models():
ids = model_class.objects.values_list('id', flat=True)
if ids:
for pk in ids:
try:
model_class.objects.filter(pk=pk).update(uuid=uuid.uuid4())
except FieldDoesNotExist:
break
class Migration(migrations.Migration):
dependencies = [
('base', '0062_add_uuid_field'),
]
operations = [
migrations.RunPython(set_uuid_field, elidable=True, reverse_code=migrations.RunPython.noop),
]
|
uclouvain/osis
|
base/migrations/0063_populate_uuid_values.py
|
Python
|
agpl-3.0
| 969 | 0.002064 |
# -*- coding: utf-8 -*-
import os, sys, time, multiprocessing, re
from .processes import ForkedProcess
from .remoteproxy import ClosedError
from ..python2_3 import basestring, xrange
class CanceledError(Exception):
"""Raised when the progress dialog is canceled during a processing operation."""
pass
class Parallelize(object):
"""
Class for ultra-simple inline parallelization on multi-core CPUs
Example::
## Here is the serial (single-process) task:
tasks = [1, 2, 4, 8]
results = []
for task in tasks:
result = processTask(task)
results.append(result)
print(results)
## Here is the parallelized version:
tasks = [1, 2, 4, 8]
results = []
with Parallelize(tasks, workers=4, results=results) as tasker:
for task in tasker:
result = processTask(task)
tasker.results.append(result)
print(results)
The only major caveat is that *result* in the example above must be picklable,
since it is automatically sent via pipe back to the parent process.
"""
def __init__(self, tasks=None, workers=None, block=True, progressDialog=None, randomReseed=True, **kwds):
"""
=============== ===================================================================
**Arguments:**
tasks list of objects to be processed (Parallelize will determine how to
distribute the tasks). If unspecified, then each worker will receive
a single task with a unique id number.
workers number of worker processes or None to use number of CPUs in the
system
progressDialog optional dict of arguments for ProgressDialog
to update while tasks are processed
randomReseed If True, each forked process will reseed its random number generator
to ensure independent results. Works with the built-in random
and numpy.random.
kwds objects to be shared by proxy with child processes (they will
appear as attributes of the tasker)
=============== ===================================================================
"""
## Generate progress dialog.
## Note that we want to avoid letting forked child processes play with progress dialogs..
self.showProgress = False
if progressDialog is not None:
self.showProgress = True
if isinstance(progressDialog, basestring):
progressDialog = {'labelText': progressDialog}
from ..widgets.ProgressDialog import ProgressDialog
self.progressDlg = ProgressDialog(**progressDialog)
if workers is None:
workers = self.suggestedWorkerCount()
if not hasattr(os, 'fork'):
workers = 1
self.workers = workers
if tasks is None:
tasks = range(workers)
self.tasks = list(tasks)
self.reseed = randomReseed
self.kwds = kwds.copy()
self.kwds['_taskStarted'] = self._taskStarted
def __enter__(self):
self.proc = None
if self.workers == 1:
return self.runSerial()
else:
return self.runParallel()
def __exit__(self, *exc_info):
if self.proc is not None: ## worker
exceptOccurred = exc_info[0] is not None ## hit an exception during processing.
try:
if exceptOccurred:
sys.excepthook(*exc_info)
finally:
#print os.getpid(), 'exit'
os._exit(1 if exceptOccurred else 0)
else: ## parent
if self.showProgress:
try:
self.progressDlg.__exit__(None, None, None)
except Exception:
pass
def runSerial(self):
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
self.progress = {os.getpid(): []}
return Tasker(self, None, self.tasks, self.kwds)
def runParallel(self):
self.childs = []
## break up tasks into one set per worker
workers = self.workers
chunks = [[] for i in xrange(workers)]
i = 0
for i in range(len(self.tasks)):
chunks[i%workers].append(self.tasks[i])
## fork and assign tasks to each worker
for i in range(workers):
proc = ForkedProcess(target=None, preProxy=self.kwds, randomReseed=self.reseed)
if not proc.isParent:
self.proc = proc
return Tasker(self, proc, chunks[i], proc.forkedProxies)
else:
self.childs.append(proc)
## Keep track of the progress of each worker independently.
self.progress = dict([(ch.childPid, []) for ch in self.childs])
## for each child process, self.progress[pid] is a list
## of task indexes. The last index is the task currently being
## processed; all others are finished.
try:
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
## process events from workers until all have exited.
activeChilds = self.childs[:]
self.exitCodes = []
pollInterval = 0.01
while len(activeChilds) > 0:
waitingChildren = 0
rem = []
for ch in activeChilds:
try:
n = ch.processRequests()
if n > 0:
waitingChildren += 1
except ClosedError:
#print ch.childPid, 'process finished'
rem.append(ch)
if self.showProgress:
self.progressDlg += 1
#print "remove:", [ch.childPid for ch in rem]
for ch in rem:
activeChilds.remove(ch)
while True:
try:
pid, exitcode = os.waitpid(ch.childPid, 0)
self.exitCodes.append(exitcode)
break
except OSError as ex:
if ex.errno == 4: ## If we get this error, just try again
continue
#print "Ignored system call interruption"
else:
raise
#print [ch.childPid for ch in activeChilds]
if self.showProgress and self.progressDlg.wasCanceled():
for ch in activeChilds:
ch.kill()
raise CanceledError()
## adjust polling interval--prefer to get exactly 1 event per poll cycle.
if waitingChildren > 1:
pollInterval *= 0.7
elif waitingChildren == 0:
pollInterval /= 0.7
pollInterval = max(min(pollInterval, 0.5), 0.0005) ## but keep it within reasonable limits
time.sleep(pollInterval)
finally:
if self.showProgress:
self.progressDlg.__exit__(None, None, None)
for ch in self.childs:
ch.join()
if len(self.exitCodes) < len(self.childs):
raise Exception("Parallelizer started %d processes but only received exit codes from %d." % (len(self.childs), len(self.exitCodes)))
for code in self.exitCodes:
if code != 0:
raise Exception("Error occurred in parallel-executed subprocess (console output may have more information).")
return [] ## no tasks for parent process.
@staticmethod
def suggestedWorkerCount():
if 'linux' in sys.platform:
## I think we can do a little better here..
## cpu_count does not consider that there is little extra benefit to using hyperthreaded cores.
try:
cores = {}
pid = None
with open('/proc/cpuinfo') as fd:
for line in fd:
m = re.match(r'physical id\s+:\s+(\d+)', line)
if m is not None:
pid = m.groups()[0]
m = re.match(r'cpu cores\s+:\s+(\d+)', line)
if m is not None:
cores[pid] = int(m.groups()[0])
return sum(cores.values())
except:
return multiprocessing.cpu_count()
else:
return multiprocessing.cpu_count()
def _taskStarted(self, pid, i, **kwds):
## called remotely by tasker to indicate it has started working on task i
#print pid, 'reported starting task', i
if self.showProgress:
if len(self.progress[pid]) > 0:
self.progressDlg += 1
if pid == os.getpid(): ## single-worker process
if self.progressDlg.wasCanceled():
raise CanceledError()
self.progress[pid].append(i)
class Tasker(object):
def __init__(self, parallelizer, process, tasks, kwds):
self.proc = process
self.par = parallelizer
self.tasks = tasks
for k, v in kwds.items():
setattr(self, k, v)
def __iter__(self):
## we could fix this up such that tasks are retrieved from the parent process one at a time..
for i, task in enumerate(self.tasks):
self.index = i
#print os.getpid(), 'starting task', i
self._taskStarted(os.getpid(), i, _callSync='off')
yield task
if self.proc is not None:
#print os.getpid(), 'no more tasks'
self.proc.close()
def process(self):
"""
Process requests from parent.
Usually it is not necessary to call this unless you would like to
receive messages (such as exit requests) during an iteration.
"""
if self.proc is not None:
self.proc.processRequests()
def numWorkers(self):
"""
Return the number of parallel workers
"""
return self.par.workers
#class Parallelizer:
#"""
#Use::
#p = Parallelizer()
#with p(4) as i:
#p.finish(do_work(i))
#print p.results()
#"""
#def __init__(self):
#pass
#def __call__(self, n):
#self.replies = []
#self.conn = None ## indicates this is the parent process
#return Session(self, n)
#def finish(self, data):
#if self.conn is None:
#self.replies.append((self.i, data))
#else:
##print "send", self.i, data
#self.conn.send((self.i, data))
#os._exit(0)
#def result(self):
#print self.replies
#class Session:
#def __init__(self, par, n):
#self.par = par
#self.n = n
#def __enter__(self):
#self.childs = []
#for i in range(1, self.n):
#c1, c2 = multiprocessing.Pipe()
#pid = os.fork()
#if pid == 0: ## child
#self.par.i = i
#self.par.conn = c2
#self.childs = None
#c1.close()
#return i
#else:
#self.childs.append(c1)
#c2.close()
#self.par.i = 0
#return 0
#def __exit__(self, *exc_info):
#if exc_info[0] is not None:
#sys.excepthook(*exc_info)
#if self.childs is not None:
#self.par.replies.extend([conn.recv() for conn in self.childs])
#else:
#self.par.finish(None)
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/pyqtgraph/multiprocess/parallelizer.py
|
Python
|
gpl-3.0
| 12,494 | 0.014647 |
# coding: utf-8
# pylint: disable=missing-docstring, invalid-name
import flask
import auth
import config
import model
from main import app
yahoo_config = dict(
access_token_url='https://api.login.yahoo.com/oauth/v2/get_token',
authorize_url='https://api.login.yahoo.com/oauth/v2/request_auth',
base_url='https://query.yahooapis.com/',
consumer_key=config.CONFIG_DB.auth_yahoo_id,
consumer_secret=config.CONFIG_DB.auth_yahoo_secret,
request_token_url='https://api.login.yahoo.com/oauth/v2/get_request_token',
)
yahoo = auth.create_oauth_app(yahoo_config, 'yahoo')
@app.route('/_s/callback/yahoo/oauth-authorized/')
def yahoo_authorized():
response = yahoo.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(flask.url_for('index'))
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
fields = 'guid, emails, familyName, givenName, nickname'
me = yahoo.get(
'/v1/yql',
data={
'format': 'json',
'q': 'select %s from social.profile where guid = me;' % fields,
'realm': 'yahooapis.com',
},
)
user_db = retrieve_user_from_yahoo(me.data['query']['results']['profile'])
return auth.signin_via_social(user_db)
@yahoo.tokengetter
def get_yahoo_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/yahoo/')
def signin_yahoo():
return auth.signin_oauth(yahoo)
def retrieve_user_from_yahoo(response):
auth_id = 'yahoo_%s' % response['guid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
names = [response.get('givenName', ''), response.get('familyName', '')]
emails = response.get('emails', {})
if not isinstance(emails, list):
emails = [emails]
emails = [e for e in emails if 'handle' in e]
emails.sort(key=lambda e: e.get('primary', False))
email = emails[0]['handle'] if emails else ''
return auth.create_or_get_user_db(
auth_id=auth_id,
name=' '.join(names).strip() or response['nickname'],
username=response['nickname'],
email=email,
verified=True,
)
|
sidharta/hansel-app
|
main/auth/yahoo.py
|
Python
|
mit
| 2,275 | 0 |
import os
import time
import pandas as pd
import pytest
import requests
from botocore.session import Session
from great_expectations.datasource.batch_kwargs_generator import (
S3SubdirReaderBatchKwargsGenerator,
)
port = 5555
url_host = os.getenv("GE_TEST_LOCALHOST_URL", "127.0.0.1")
endpoint_uri = f"http://{url_host}:%s/" % port
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret"
@pytest.fixture(scope="module")
def s3_base():
# writable local S3 system
import shlex
import subprocess
proc = subprocess.Popen(shlex.split("moto_server s3 -p %s" % port))
timeout = 5
while timeout > 0:
try:
r = requests.get(endpoint_uri)
if r.ok:
break
except:
pass
timeout -= 0.1
time.sleep(0.1)
yield
proc.terminate()
proc.wait()
@pytest.fixture(scope="module")
def mock_s3_bucket(s3_base):
bucket = "test_bucket"
session = Session()
client = session.create_client("s3", endpoint_url=endpoint_uri)
client.create_bucket(Bucket=bucket, ACL="public-read")
df = pd.DataFrame({"c1": [1, 2, 3], "c2": ["a", "b", "c"]})
keys = [
"data/for/you.csv",
"data/for/me.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=df.to_csv(index=None).encode("utf-8"), Key=key
)
yield bucket
@pytest.fixture
def s3_subdir_generator(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
try:
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/for",
reader_options={"sep": ","},
)
yield generator
except ImportError as e:
pytest.skip(str(e))
@pytest.fixture
def s3_subdir_generator_with_partition(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
try:
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/",
reader_options={"sep": ","},
)
yield generator
except ImportError as e:
pytest.skip(str(e))
def test_s3_subdir_generator_basic_operation(s3_subdir_generator):
# S3 Generator sees *only* configured assets
assets = s3_subdir_generator.get_available_data_asset_names()
print(assets)
assert set(assets["names"]) == {
("you", "file"),
("me", "file"),
}
def test_s3_subdir_generator_reader_options_configuration(s3_subdir_generator):
batch_kwargs_list = [
kwargs
for kwargs in s3_subdir_generator.get_iterator(data_asset_name="you", limit=10)
]
print(batch_kwargs_list)
assert batch_kwargs_list[0]["reader_options"] == {"sep": ","}
def test_s3_subdir_generator_build_batch_kwargs_no_partition_id(s3_subdir_generator):
batch_kwargs = s3_subdir_generator.build_batch_kwargs("you")
assert batch_kwargs["s3"] in [
"s3a://test_bucket/data/for/you.csv",
]
def test_s3_subdir_generator_build_batch_kwargs_partition_id(
s3_subdir_generator_with_partition, basic_sparkdf_datasource
):
batch_kwargs = s3_subdir_generator_with_partition.build_batch_kwargs("for", "you")
assert batch_kwargs["s3"] == "s3a://test_bucket/data/for/you.csv"
|
great-expectations/great_expectations
|
tests/datasource/batch_kwarg_generator/test_s3_subdir_reader_generator.py
|
Python
|
apache-2.0
| 3,834 | 0.001826 |
import os
import logging
import re
from typing import Final
logger: Final = logging.getLogger("Npo.Utils")
pattern: Final = re.compile('[a-z0-9]{2,}', re.IGNORECASE)
def looks_like_form(form: str):
"""
Checks if the given string looks like a form. E.g. it represents json, xml, a file, or 'stdin'.
Otherwise it can e.g. be interpreted as the text for search
"""
if form.startswith("{") or form.startswith("<"):
logger.debug("Detected a string that look like either json or xml")
return True
if os.path.isfile(form):
logger.debug("Detected existing file %s" % form)
return True
if form.endswith(".json") or form.endswith(".xml"):
logger.warning("Form %s looks like a file name, but it is not a file." % form)
return True
if form == "-":
logger.debug("Detected explicit stdin")
return True
if not pattern.match(form):
logger.warning("Form does not look like a credible text search. It doesn't look like a file either though")
return False
return False
|
npo-poms/pyapi
|
npoapi/utils.py
|
Python
|
gpl-3.0
| 1,076 | 0.003717 |
# -*- coding: utf-8 -*-
import pairing_heap as pheap
from copy import deepcopy,copy
import threading
import Queue
import requests
from requests.auth import HTTPDigestAuth
import json
import sys
import communication
import config
import time
import L_sprit
# グローバル変数の宣言
LIMIT_SELECTION = 0
SELECTON_RATE = 0
EXCHANGE_RATE = 0
MODE_CHANGE_THRESHOLD = 0.50
ALL_COST = 0
columns = 0
rows = 0
mode_flag = "N"
fwd_ahead = []
back_ahead = []
thresh = MODE_CHANGE_THRESHOLD
class Node :
def __init__ (self, board, selection,exchange,distance):
self.board = board
self.selection = selection
self.exchange = exchange
self.mydistance = distance
def get_next_nodes(self): #渡したノードに隣接するノードを返す
nodes_dic = {}
board = self.board
for i in range(len(board)): #選択するマスを変えたノードをキューに追加する。
for j in range(len(board[0])):
x,y = (i,j)
#右と交換
nodes_dic[((i,j),"R")] = Node(exchange(board,(x, y), (x + 1, y)) , (x + 1, y),(x,y),0)
#左と交換
if x == 0:
# 左への移動は存在しない
nodes_dic[((i,j),"L")] = Node(None, (x - 1, y), (x,y),0)
else:
# 一つ左の選択のRを流用する
#nodes_dic[((i,j),"L")] = Node(exchange(board,(x, y), (x - 1, y)) , (x - 1, y))
nodes_dic[((i,j),"L")] = Node(nodes_dic[((i - 1, j), "R")].board, (x - 1, y), (x, y),0)
#上と交換
if y == 0:
# 上への移動は存在しない
nodes_dic[((i,j),"U")] = Node(None, (x, y - 1), (x,y), 0)
else:
# 一つ上の選択のDを流用する
#nodes_dic[((i,j),"U")] = Node(exchange(board,(x, y), (x, y - 1)) , (x, y - 1))
nodes_dic[((i,j),"U")] = Node(nodes_dic[((i, j - 1), "D")].board, (x, y - 1), (x,y), 0)
#下と交換
nodes_dic[((i,j),"D")] = Node(exchange(board,(x, y), (x, y + 1)) , (x, y + 1),(x,y),0)
return nodes_dic
def make_problem(w, h):
arr = []
for i in range(w):
column = []
for j in range(h):
column.append((i, j))
arr.append(column)
return arr
def transpose(arr2d): #転置した2次元配列を返す
result = []
for i in range(len(arr2d[0])):
arr = []
for j in range(len(arr2d)):
arr.append(arr2d[j][i])
result.append(arr)
return result
def operations_to_list(operations): #operationsの型を普通のリストに戻した物を返す
pair = operations
lst = []
while pair != ():
lst.append(pair[0])
pair = pair[1]
return lst
def exchange (then_board, start, destination): # then_boadのstartとdestinationを交換したboardを返す
# 変更されたcolumnだけをdeep copyする
x, y = start
new_x, new_y = destination
if not(0 <= new_x < len(then_board) and 0 <= new_y < len(then_board[0])):
return None
startImg = then_board[x][y]
destImg = then_board[new_x][new_y]
return [
then_board[x] if x != start[0] and x != destination[0]
else [destImg if (x, y) == start
else (startImg if (x, y) == destination else then_board[x][y])
for y in range(len(then_board[0]))]
for x in range(len(then_board))]
board = copy(then_board)
board[x] = deepcopy(then_board[x])
if x != new_x:
board[new_x] = deepcopy(then_board[new_x])
destination_element = board[new_x][new_y]
board[new_x][new_y] = board[x][y]
board[x][y] = destination_element
return board
def create_distance_table(goal): #距離計算用のテーブルを返す
table = []
for i in range(len(goal)):
col = []
for j in range(len(goal[0])):
col.append(None)
table.append(col)
for i in range(len(goal)):
for j in range(len(goal[0])):
(goal_x, goal_y) = goal[i][j]
table[goal_x][goal_y] = (i, j)
return table
def distance_to_goal(table, board): #ノードとゴールノードまでの予測距離を返す。引数は(距離計算用テーブル,ゴールのボード)
ans = 0
for i in range(len(board)):
for j in range(len(board[0])):
(board_x, board_y) = board[i][j]
a = table[board_x][board_y]
b = (i, j)
x = abs(a[0] - b[0])
y = abs(a[1] - b[1])
ans += x + y
return ans * EXCHANGE_RATE
def point_md(point,board, table):
table_x, table_y = board[point[0]][point[1]]
a = table[table_x][table_y]
x = abs(a[0] - point[0])
y = abs(a[1] - point[1])
ans = x + y
return ans
def fast_distance_to_goal(looking_node,node, table):
parent_distance = looking_node.mydistance
parent_board = looking_node.board
selection = node.selection
exchange = node.exchange
child_board = node.board
exchange_distance = point_md(selection,parent_board, table) - point_md(exchange ,child_board, table)
selection_distance = point_md(exchange ,parent_board, table) - point_md(selection,child_board, table)
child_distance = parent_distance - (exchange_distance + selection_distance)
node.mydistance = child_distance
return child_distance * EXCHANGE_RATE
def tuplenode (node) : #ノードをtupleの形にした物を返す
return (tuple([tuple(a) for a in node.board]) , node.selection)
def caliculate_cost (operations): #現在のoperationsのコストを返す
pair = operations
cost = 0
lst = []
while pair != ():
if pair[0][0] == "S":
cost += SELECTON_RATE
else:
cost += EXCHANGE_RATE
pair = pair[1]
return cost
def count_missmatch_image(board1, board2):#board1とboard2間の不一致画像の数を返す
counts = 0
for i in range(len(board1)):
for j in range(len(board1[0])):
try:
if board1[i][j] != board2[i][j]:
counts += 1
except:
print "----"
print board1
print board2
sys.exit()
return counts
def count_selection(operations): #選択を数える
count = 0
for op in operations:
if op[0] == "S":
count += 1
return count
def encode_answer_format(operations_list,L_answer_text):
selectcount = 1
changecount = 0
ans = ""
word = ""
for i in range(len(operations_list)):
if((operations_list[i] == "L")or(operations_list[i] == "R")or(operations_list[i] == "U")or(operations_list[i] == "D")):
word += operations_list[i]
changecount +=1
else:
ans = "\r\n" + word[::-1] + ans
ans = "\r\n" + str(changecount) +ans
ans = "\r\n" + operations_list[i][1:] + ans
word = ""
changecount = 0
selectcount += 1
ans = str(selectcount) + "\r\n" +L_answer_text+ ans
return ans
# リストの先頭から順番に実行する
def move_position(move_list, pos):
pos = list(pos)
for move in move_list:
if move == "L":
pos[0] -= 1
elif move == "R":
pos[0] += 1
elif move == "U":
pos[1] -= 1
elif move == "D":
pos[1] += 1
return tuple(pos)
def reverse_operations(operations):
reverse_table = {
"L": "R",
"R": "L",
"U": "D",
"D": "U"
}
result = []
moves = []
for op in operations:
if op[0] == "S":
pos = (int(op[1], 16), int(op[2], 16))
rev_moves = [reverse_table[a] for a in moves]
new_pos = move_position(reversed(moves), pos)
new_op = "S%X%X" % new_pos
result.append(new_op)
result += rev_moves
moves = []
else:
moves.append(op)
rev_moves = [reverse_table[a] for a in moves]
result += rev_moves
return result
def astar_step(queue, checked_nodes, table, min_distance, tag, fwd_ahead, back_ahead):
dummy, looking_node, operations, selection_count = queue.pop() #キューの先頭を取り出
g_star = caliculate_cost(operations)
checked_nodes[(tuplenode(looking_node),tag)] = operations #chacked_nodes集合にチェック済みとして追加
next_nodes = looking_node.get_next_nodes() #looking_nodeに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items() : #中身全部取り出すぜー
cost = 0
select = False
if key[0] != looking_node.selection :
select = True
cost += SELECTON_RATE
added_operation = (key[1],("S%X%X"%key[0],operations))
else:
added_operation = (key[1],operations)
if node.board != None and not((tuplenode(node),tag) in checked_nodes): #各隣接ノードがcheckd_nodesに無ければキューに追加。
h_star = fast_distance_to_goal(looking_node,node, table)
f_star = g_star + h_star
if select:
new_selection_count = selection_count + 1
else:
new_selection_count = selection_count
if new_selection_count <= LIMIT_SELECTION:
queue.push((f_star + cost + EXCHANGE_RATE, node, added_operation, new_selection_count))
if h_star <= min_distance:
min_distance = h_star
print "%s distance=%d tag=%s" % (operations_to_list(added_operation), h_star, tag)
#if int(h_star) == 0:
#cost = -1000000000
#print "stop!"
return min_distance
def forward(problem, answer, checked_nodes,L_answer_text, result_queue):
global mode_flag ,fwd_ahead, back_ahead, thresh
ans_status = 0
distance_table = create_distance_table(answer)
static_h_star = distance_to_goal(distance_table,problem)/EXCHANGE_RATE
print static_h_star
queue = pheap.Empty(key=lambda a: a[0]) #空のキューを作成
forward_min = 999999999999
my_tag = "f"
back_tag = "b"
true_ans = answer
next_nodes = Node(problem,(0,0),(0,0),static_h_star).get_next_nodes() #problemに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items(): #中身全部取り出すぜー
added_operation = (key[1],("S%X%X"%key[0],()))
if node.board != None :
h_star = distance_to_goal(distance_table,node.board)
h_star = fast_distance_to_goal(Node(problem,(0,0),(0,0),static_h_star),node, distance_table)
queue.push((h_star+SELECTON_RATE+EXCHANGE_RATE, node, added_operation, 1))
while not queue.is_empty:
operations = queue.element[2]
if queue.element[1].board == true_ans: #仮に取り出したキューが正答と一致したら終了
print "forward goal"
print operations_to_list(operations)
print "cost=%d" % caliculate_cost(operations)
ALL_COST = caliculate_cost(operations)
result_queue.put(encode_answer_format(operations_to_list(operations)))
return
if (tuplenode(queue.element[1]),back_tag) in checked_nodes:
print "ぶつかったforward"
fwd_op = list(reversed(operations_to_list(operations)))
fwd_cost = caliculate_cost(operations)
back_op = checked_nodes[(tuplenode(queue.element[1]),back_tag)]
back_cost = caliculate_cost(back_op) - SELECTON_RATE
back_op = reverse_operations(operations_to_list(back_op))[1:]
full_op = fwd_op + back_op
full_cost = fwd_cost + back_cost
ALL_COST = full_cost
result_queue.put(encode_answer_format(list(reversed(full_op)), L_answer_text))
return
fwd_ahead = queue.element[1].board
if count_missmatch_image(fwd_ahead, back_ahead) <= int(rows * columns * thresh):# and mode_flag == "N":
print "mode change!"
mode_flag = "A"
thresh *= MODE_CHANGE_THRESHOLD
ans_status = 0
if mode_flag == "A" and ans_status == 0:
print "change answer!"
answer = back_ahead
distance_table = create_distance_table(answer)
print distance_table
ans_status = 1
forward_min = min(forward_min, astar_step(queue, checked_nodes, distance_table, forward_min, my_tag, fwd_ahead, back_ahead))
def back(problem, answer, checked_nodes, L_answer_text, result_queue):
global mode_flag, fwd_ahead, back_ahead, thresh
ans_status = 0
distance_table = create_distance_table(problem)
static_h_star = distance_to_goal(distance_table,answer)/EXCHANGE_RATE
print static_h_star
queue = pheap.Empty(key=lambda a: a[0]) #空のキューを作成
back_min = 999999999999
my_tag = "b"
fwd_tag = "f"
true_prob = problem
next_nodes = Node(answer,(0,0), (0,0),static_h_star).get_next_nodes() #problemに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items() : #中身全部取り出すぜー
added_operation = (key[1],("S%X%X"%key[0],()))
if node.board != None :
h_star = fast_distance_to_goal(Node(answer,(0,0),(0,0),static_h_star),node, distance_table)
queue.push((h_star+SELECTON_RATE+EXCHANGE_RATE, node, added_operation, 1))
while not queue.is_empty:
operations = queue.element[2]
if queue.element[1].board == true_prob: #仮に取り出したキューが正答と一致したら終了
print "back goal"
print operations_to_list(operations)
print "cost=%d" % caliculate_cost(operations)
ALL_COST = caliculate_cost(operations)
result_queue.put(encode_answer_format(list(reversed(reverse_operations(operations_to_list(operations))))))
return
if (tuplenode(queue.element[1]),fwd_tag) in checked_nodes:
print "ぶつかったback"
fwd_op = checked_nodes[(tuplenode(queue.element[1]),fwd_tag)]
fwd_op = list(reversed(operations_to_list(fwd_op)))
fwd_cost = caliculate_cost(operations)
back_op = operations
back_cost = caliculate_cost(back_op) - SELECTON_RATE
back_op = reverse_operations(operations_to_list(back_op))[1:]
full_op = fwd_op + back_op
full_cost = fwd_cost + back_cost
ALL_COST = full_cost
result_queue.put(encode_answer_format(list(reversed(full_op)), L_answer_text))
return
back_ahead = queue.element[1].board
if count_missmatch_image(fwd_ahead, back_ahead) <= int(rows * columns * thresh):# and mode_flag == "N":
print "mode change!"
mode_flag = "A"
thresh *= MODE_CHANGE_THRESHOLD
ans_status = 0
if mode_flag == "A" and ans_status == 0:
print "change answer!"
problem = fwd_ahead
distance_table = create_distance_table(problem)
print distance_table
ans_status = 1
back_min = min(back_min, astar_step(queue, checked_nodes, distance_table, back_min, my_tag, fwd_ahead, back_ahead))
def solve(sortedImages, splitColumns, splitRows, limit, sel_rate, exc_rate, target_columns, target_rows):
global LIMIT_SELECTION, SELECTON_RATE, EXCHANGE_RATE, rows, columns, fwd_ahead, back_ahead
LIMIT_SELECTION = limit
SELECTON_RATE = sel_rate
EXCHANGE_RATE = exc_rate
problem = make_problem(splitColumns, splitRows)
answer = sortedImages
columns = splitColumns
rows = splitRows
checked_nodes = {} #set() #チェック済みのノード集合
problem,L_answer_text = L_sprit.corner_L_sprit(target_columns, target_rows, problem,answer)
LIMIT_SELECTION -= 1
fwd_ahead = problem
back_ahead = answer
result_queue = Queue.Queue()
fwd_thr = threading.Thread(target=forward, name="fwd", args=(problem, answer, checked_nodes, L_answer_text, result_queue))
back_thr = threading.Thread(target=back, name="back", args=(problem, answer, checked_nodes, L_answer_text, result_queue))
fwd_thr.daemon = True
back_thr.daemon = True
fwd_thr.start()
back_thr.start()
while True:
try:
# 1秒ごとにタイムアウトする
# タイムアウト時にキューに内容が無ければEmpty例外が出る
return result_queue.get(True, 1)
except Queue.Empty:
# 例外が出ても何もしない
pass
except KeyboardInterrupt:
print "aborting"
# kill flagをセットしてスレッドを終了させる
kill_flag = True
sys.exit(0)
#main
master = ""
target_columns = 4
target_rows = 4
if len(sys.argv) == 3:
master = sys.argv[1]
target_columns,target_rows = sys.argv[2].split("-")
elif len(sys.argv) == 2:
if '.' in sys.argv[1]:
master = sys.argv[1]
elif '-' in sys.argv[1]:
target_columns,target_rows = sys.argv[1].split("-")
master = config.master
else:
master = config.master
para = communication.get_problem(master)
ans_str = solve(para['answer'], para['columns'], para['rows'], para['lim_select'], para['selection_rate'], para['exchange_rate'],int(target_columns),int(target_rows))
communication.post(master, ans_str)
|
SP2LC/procon25-main
|
A-star/L-dynamic.py
|
Python
|
apache-2.0
| 17,085 | 0.024341 |
#!/usr/bin/env python
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from cStringIO import StringIO
from collections import defaultdict
import sys
import yaml
raw_classifications = open('classifications.yaml').read()
doctypes = yaml.load(raw_classifications)
def movefile(path, destination):
print "Moving file %s to %s" % (path, destination)
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = file(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
interpreter.process_page(page)
fp.close()
device.close()
str = retstr.getvalue()
retstr.close()
return str
def make_classification(text):
maybe_docs = defaultdict(int)
for doctypes_name, docstrings in doctypes.items():
for string in docstrings:
if string in text:
maybe_docs[doctypes_name] += text.count(string) * 10
continue
if string.lower() in text.lower():
maybe_docs[doctypes_name] += text.count(string) * 5
continue
if not maybe_docs:
classification = 'unknown'
classification_score = -99
return classification, classification_score
classification, classification_score = sorted(maybe_docs.iteritems())[0]
if classification_score < 50:
classification = 'unsure'
classification_score = -1
return classification, classification_score
def findbarcode(pdf):
import os
os.popen("rm /tmp/x*.png").read()
os.popen("convert -density 300 %s /tmp/x.png" % pdf).read()
barcode = os.popen("zbarimg -q /tmp/x*.png").read().strip()
if barcode:
print "%s has a barcode of %s" % (pdf, barcode)
def main():
import os
pdffiles = []
if len(sys.argv) == 1:
for root, dirnames, filenames in os.walk("/home/dannyla"):
for filename in filenames:
if filename.lower().endswith('pdf'):
pdffiles.append(os.path.join(root, filename))
else:
pdffiles = sys.argv[1:]
for pdf in pdffiles:
pdf_strings = convert_pdf_to_txt(pdf)
classification, classification_score = make_classification(pdf_strings)
print "%s is a %s document (score:%d)" % (pdf, classification, classification_score)
findbarcode(pdf)
movefile(pdf, classification)
if __name__ == '__main__':
main()
|
daniellawrence/pdfclassification
|
main.py
|
Python
|
mit
| 2,887 | 0.002425 |
"""
Tests for courseware middleware
"""
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.http import Http404
from mock import patch
import courseware.courses as courses
from courseware.middleware import RedirectUnenrolledMiddleware
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class CoursewareMiddlewareTestCase(ModuleStoreTestCase):
"""Tests that courseware middleware is correctly redirected"""
def setUp(self):
super(CoursewareMiddlewareTestCase, self).setUp()
self.course = CourseFactory.create()
def check_user_not_enrolled_redirect(self):
"""A UserNotEnrolled exception should trigger a redirect"""
request = RequestFactory().get("dummy_url")
response = RedirectUnenrolledMiddleware().process_exception(
request, courses.UserNotEnrolled(self.course.id)
)
self.assertEqual(response.status_code, 302)
# make sure we redirect to the course about page
expected_url = reverse(
"about_course", args=[self.course.id.to_deprecated_string()]
)
target_url = response._headers['location'][1]
self.assertTrue(target_url.endswith(expected_url))
def test_user_not_enrolled_redirect(self):
self.check_user_not_enrolled_redirect()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_MKTG_SITE": True})
def test_user_not_enrolled_redirect_mktg(self):
self.check_user_not_enrolled_redirect()
def test_process_404(self):
"""A 404 should not trigger anything"""
request = RequestFactory().get("dummy_url")
response = RedirectUnenrolledMiddleware().process_exception(
request, Http404()
)
self.assertIsNone(response)
|
eestay/edx-platform
|
lms/djangoapps/courseware/tests/test_middleware.py
|
Python
|
agpl-3.0
| 1,873 | 0 |
"""
Cryo-EM density map I/O
@warning: dragons ahead, this module is experimental
"""
import numpy
import struct
class DensityMapFormatError(ValueError):
pass
class ByteOrder(object):
NATIVE = '='
LITTLE = '<'
BIG = '>'
class DensityInfo(object):
def __init__(self, data, spacing, origin, shape=None, header=None, axes=None):
self.data = data
self.spacing = spacing
self.origin = origin
self.header = header
self.shape = shape
self.axes = axes
if shape is None and data is not None:
self.shape = self.data.shape
class HeaderInfo(object):
def __init__(self, fields):
fields = tuple(fields)
if not len(fields) == 25:
raise ValueError(fields)
self._fields = fields
def __getitem__(self, i):
return self._fields[i]
def __iter__(self):
return iter(self._fields)
@property
def nc(self):
return self._fields[0]
@property
def nr(self):
return self._fields[1]
@property
def ns(self):
return self._fields[2]
@property
def mode(self):
return self._fields[3]
@property
def ncstart(self):
return self._fields[4]
@property
def nrstart(self):
return self._fields[5]
@property
def nsstart(self):
return self._fields[6]
@property
def nx(self):
return self._fields[7]
@property
def ny(self):
return self._fields[8]
@property
def nz(self):
return self._fields[9]
@property
def x(self):
return self._fields[10]
@property
def y(self):
return self._fields[11]
@property
def z(self):
return self._fields[12]
@property
def alpha(self):
return self._fields[13]
@property
def beta(self):
return self._fields[14]
@property
def gamma(self):
return self._fields[15]
@property
def mapc(self):
return self._fields[16]
@property
def mapr(self):
return self._fields[17]
@property
def maps(self):
return self._fields[18]
@property
def amin(self):
return self._fields[19]
@property
def amax(self):
return self._fields[20]
@property
def amean(self):
return self._fields[21]
@property
def ispg(self):
return self._fields[22]
@property
def nsymbt(self):
return self._fields[23]
@property
def lskflg(self):
return self._fields[24]
class DensityMapReader(object):
"""
Binary MRC density map reader.
@param filename: input MRC file name
@type filename: str
"""
HEADER_SIZE = 1024
def __init__(self, filename):
self._filename = filename
@property
def filename(self):
"""
Input MRC file name
@rtype: str
"""
return self._filename
def _rawheader(self, stream):
"""
Read and return the raw binary header.
"""
raw = stream.read(DensityMapReader.HEADER_SIZE)
return bytes(raw)
def _inspect(self, rawheader, order):
"""
Parse a raw binary header.
"""
format = '{0}10l6f3l3f3l'.format(order)
fields = struct.unpack(format, rawheader[:4 * 25])
return HeaderInfo(fields)
def _spacing(self, header):
if header.nx != 0 and header.ny != 0 and header.nz != 0:
return (header.x / header.nx, header.y / header.ny, header.z / header.nz)
else:
return (0, 0, 0)
def _origin(self, header, spacing=None):
if spacing is None:
spacing = self._spacing(header)
origin = header.ncstart, header.nrstart, header.nsstart
return [origin[i] * spacing[i] for i in range(3)]
def _shape(self, header):
return (header.ns, header.nr, header.nc)
def inspect_header(self, order=ByteOrder.NATIVE):
"""
Parse the raw binary header of the density map.
@param order: byte order (defaults to L{ByteOrder.NATIVE})
@type order: str
@return: header information
@rtype: L{HeaderInfo}
"""
with open(self.filename, 'rb') as stream:
raw = self._rawheader(stream)
return self._inspect(raw, order)
def read_header(self):
"""
Read the header of the density map only.
@return: density info without any actual data (density.data is None)
@rtype: L{DensityInfo}
"""
with open(self.filename, 'rb') as stream:
raw = self._rawheader(stream)
header = self._inspect(raw, ByteOrder.NATIVE)
spacing = self._spacing(header)
origin = self._origin(header, spacing)
shape = self._shape(header)
return DensityInfo(None, spacing, origin, shape=shape, header=raw)
def read(self):
"""
Read the entire density map.
@return: complete density info
@rtype: L{DensityInfo}
"""
with open(self.filename, 'rb') as stream:
raw = self._rawheader(stream)
header = self._inspect(raw, ByteOrder.NATIVE)
if header.mode == 2 or header.mode == 1:
byte_order = ByteOrder.NATIVE
elif header.mode == 33554432:
header = self._inspect(raw, ByteOrder.BIG)
byte_order = ByteOrder.BIG
if header.mode == 33554432:
header = self._inspect(raw, ByteOrder.LITTLE)
byte_order = ByteOrder.LITTLE
else:
raise DensityMapFormatError("Not a mode 2 CCP4 map file")
stream.read(header.nsymbt) # symmetry_data
count = header.ns * header.nr * header.nc
map_data = stream.read(4 * count)
if byte_order == ByteOrder.NATIVE:
array = numpy.fromstring(map_data, numpy.float32, count)
else:
array = numpy.zeros((count,), numpy.float32)
index = 0
while len(map_data) >= 4 * 10000:
values = struct.unpack(byte_order + '10000f', map_data[:4 * 10000])
array[index:index + 10000] = numpy.array(values, numpy.float32)
index += 10000
map_data = map_data[4 * 10000:]
values = struct.unpack(byte_order + '%df' % (len(map_data) / 4), map_data)
array[index:] = numpy.array(values, numpy.float32)
del map_data
array.shape = self._shape(header)
data = array.T
spacing = self._spacing(header)
origin = self._origin(header, spacing)
return DensityInfo(data, spacing, origin, header=raw)
class DensityMapWriter(object):
"""
Binary MRC density map writer.
"""
def reconstruct_header(self, density):
"""
Attempt to reconstruct the header, given L{DensityInfo}'s
data shape, spacing and origin.
@param density: density info
@type density: L{DensityInfo}
@return: reconstructed binary header
@rtype: bytes
"""
N = list(density.data.shape)
MODE = 2
if isinstance(density.spacing, float):
spacing = 3 * [density.spacing]
else:
spacing = density.spacing
if density.origin is None:
origin = 3 * [0.]
else:
origin = density.origin
if density.axes is None:
MAP = list(range(1, 4))
else:
MAP = list(density.axes)
start = [int(round(origin[i] / spacing[i], 0)) for i in range(3)]
M = [density.data.shape[i] for i in range(3)]
cella = [density.data.shape[i] * spacing[i] for i in range(3)]
cellb = 3 * [90.]
stats = [density.data.min(), density.data.max(), density.data.mean()]
ISPG = 0
NSYMBT = 0
LSKFLG = 0
JUNK = [0] * 25
ORIGIN = [0., 0., 0.]
MACHST = 0
args = N + [MODE] + start + M + cella + cellb + \
MAP + stats + [ISPG, NSYMBT, LSKFLG] + JUNK + \
ORIGIN + [0, MACHST, 0., 0] + [b' ' * 796]
return struct.pack('=10l6f3l3f3l25l3f2l1f1l796s', *args)
def write(self, stream, density):
"""
Write C{density} to a binary C{stream}.
@param stream: destination binary stream
@type stream: stream
@param density: input density info
@type density: L{DensityInfo}
"""
if density.header is not None:
stream.write(density.header)
else:
stream.write(self.reconstruct_header(density))
data = density.data.T.flatten()
x = struct.pack('=%df' % len(data), *data.tolist())
stream.write(x)
def write_file(self, filename, density):
"""
Write C{density} to a binary file.
@param filename: destination file name
@type filename: str
@param density: input density info
@type density: L{DensityInfo}
"""
with open(filename, 'wb') as stream:
self.write(stream, density)
|
csb-toolbox/CSB
|
csb/bio/io/mrc.py
|
Python
|
mit
| 9,732 | 0.009453 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
import matplotlib
matplotlib.use('TkAgg')
from .version import __version__
from .keparray import *
from .kepbls import *
from .kepclean import *
from .kepclip import *
from .kepconvert import *
from .kepcotrend import *
from .kepdetrend import *
from .kepdiffim import *
from .kepdraw import *
from .kepdynamic import *
from .kepextract import *
from .kepfilter import *
from .kepfit import *
from .kepflatten import *
from .kepfold import *
from .kepfourier import *
from .kepperiodogram import *
from .kepfunc import *
from .kephead import *
from .kepimages import *
from .kepio import *
from .kepkey import *
from .kepmask import *
from .kepmsg import *
from .kepoutlier import *
from .keppca import *
from .keppixseries import *
from .kepplot import *
from .kepprf import *
from .kepprfphot import *
from .keprange import *
from .kepsff import *
from .kepsmooth import *
from .kepstat import *
from .kepstddev import *
from .kepstitch import *
from .keptimefix import *
from .keptrial import *
from .keptrim import *
from .kepwindow import *
from .prf import *
from .lightcurve import *
from .targetpixelfile import *
from .utils import *
|
gully/PyKE
|
pyke/__init__.py
|
Python
|
mit
| 1,296 | 0.036265 |
from .base import *
from .local import *
FB_APP_ID = "557603244304943"
|
amol-mandhane/konnactivity
|
settings/__init__.py
|
Python
|
mit
| 71 | 0.014085 |
from django import forms
from django.conf import settings
from guardian.compat import url, patterns
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext, ugettext_lazy as _
from guardian.forms import UserObjectPermissionsForm
from guardian.forms import GroupObjectPermissionsForm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_perms_for_model
from guardian.models import User, Group
class AdminUserObjectPermissionsForm(UserObjectPermissionsForm):
"""
Extends :form:`UserObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class AdminGroupObjectPermissionsForm(GroupObjectPermissionsForm):
"""
Extends :form:`GroupObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class GuardedModelAdmin(admin.ModelAdmin):
"""
Extends ``django.contrib.admin.ModelAdmin`` class. Provides some extra
views for object permissions management at admin panel. It also changes
default ``change_form_template`` option to
``'admin/guardian/model/change_form.html'`` which is required for proper
url (object permissions related) being shown at the model pages.
**Extra options**
``GuardedModelAdmin.obj_perms_manage_template``
*Default*: ``admin/guardian/model/obj_perms_manage.html``
``GuardedModelAdmin.obj_perms_manage_user_template``
*Default*: ``admin/guardian/model/obj_perms_manage_user.html``
``GuardedModelAdmin.obj_perms_manage_group_template``
*Default*: ``admin/guardian/model/obj_perms_manage_group.html``
``GuardedModelAdmin.user_can_access_owned_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects he or she doesn't own (checking ``user`` field
of used model - field name may be overridden by
``user_owned_objects_field`` option.
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.user_owned_objects_field``
*Default*: ``user``
**Usage example**
Just use :admin:`GuardedModelAdmin` instead of
``django.contrib.admin.ModelAdmin``.
.. code-block:: python
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from myapp.models import Author
class AuthorAdmin(GuardedModelAdmin):
pass
admin.site.register(Author, AuthorAdmin)
"""
change_form_template = \
'admin/guardian/model/change_form.html'
obj_perms_manage_template = \
'admin/guardian/model/obj_perms_manage.html'
obj_perms_manage_user_template = \
'admin/guardian/model/obj_perms_manage_user.html'
obj_perms_manage_group_template = \
'admin/guardian/model/obj_perms_manage_group.html'
user_can_access_owned_objects_only = False
user_owned_objects_field = 'user'
def queryset(self, request):
qs = super(GuardedModelAdmin, self).queryset(request)
if self.user_can_access_owned_objects_only and \
not request.user.is_superuser:
filters = {self.user_owned_objects_field: request.user}
qs = qs.filter(**filters)
return qs
def get_urls(self):
"""
Extends standard admin model urls with the following:
- ``.../permissions/``
- ``.../permissions/user-manage/<user_id>/``
- ``.../permissions/group-manage/<group_id>/``
.. note::
``...`` above are standard, instance detail url (i.e.
``/admin/flatpages/1/``)
"""
urls = super(GuardedModelAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
myurls = patterns('',
url(r'^(?P<object_pk>.+)/permissions/$',
view=self.admin_site.admin_view(self.obj_perms_manage_view),
name='%s_%s_permissions' % info),
url(r'^(?P<object_pk>.+)/permissions/user-manage/(?P<user_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_user_view),
name='%s_%s_permissions_manage_user' % info),
url(r'^(?P<object_pk>.+)/permissions/group-manage/(?P<group_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_group_view),
name='%s_%s_permissions_manage_group' % info),
)
return myurls + urls
def get_obj_perms_base_context(self, request, obj):
"""
Returns context dictionary with common admin and object permissions
related content.
"""
context = {
'adminform': {'model_admin': self},
'object': obj,
'app_label': self.model._meta.app_label,
'opts': self.model._meta,
'original': hasattr(obj, '__unicode__') and obj.__unicode__() or\
str(obj),
'has_change_permission': self.has_change_permission(request, obj),
'model_perms': get_perms_for_model(obj),
'title': _("Object permissions"),
}
return context
def obj_perms_manage_view(self, request, object_pk):
"""
Main object permissions view. Presents all users and groups with any
object permissions for the current model *instance*. Users or groups
without object permissions for related *instance* would **not** be
shown. In order to add or manage user or group one should use links or
forms presented within the page.
"""
obj = get_object_or_404(self.queryset(request), pk=object_pk)
users_perms = SortedDict(
get_users_with_perms(obj, attach_perms=True,
with_group_users=False))
users_perms.keyOrder.sort(key=lambda user: user.username)
groups_perms = SortedDict(
get_groups_with_perms(obj, attach_perms=True))
groups_perms.keyOrder.sort(key=lambda group: group.name)
if request.method == 'POST' and 'submit_manage_user' in request.POST:
user_form = UserManage(request.POST)
group_form = GroupManage()
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if user_form.is_valid():
user_id = user_form.cleaned_data['user'].id
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user_id]
)
return redirect(url)
elif request.method == 'POST' and 'submit_manage_group' in request.POST:
user_form = UserManage()
group_form = GroupManage(request.POST)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if group_form.is_valid():
group_id = group_form.cleaned_data['group'].id
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group_id]
)
return redirect(url)
else:
user_form = UserManage()
group_form = GroupManage()
context = self.get_obj_perms_base_context(request, obj)
context['users_perms'] = users_perms
context['groups_perms'] = groups_perms
context['user_form'] = user_form
context['group_form'] = group_form
return render_to_response(self.get_obj_perms_manage_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_template(self):
"""
Returns main object permissions admin template. May be overridden if
need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage.html'
return self.obj_perms_manage_template
def obj_perms_manage_user_view(self, request, object_pk, user_id):
"""
Manages selected users' permissions for current object.
"""
user = get_object_or_404(User, id=user_id)
obj = get_object_or_404(self.queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_user_form()
form = form_class(user, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['user_obj'] = user
context['user_perms'] = get_perms(user, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_user_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_user_template(self):
"""
Returns object permissions for user admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_user.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_user.html'
return self.obj_perms_manage_user_template
def get_obj_perms_manage_user_form(self):
"""
Returns form class for user object permissions management. By default
:form:`AdminUserObjectPermissionsForm` is returned.
"""
return AdminUserObjectPermissionsForm
def obj_perms_manage_group_view(self, request, object_pk, group_id):
"""
Manages selected groups' permissions for current object.
"""
group = get_object_or_404(Group, id=group_id)
obj = get_object_or_404(self.queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_group_form()
form = form_class(group, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['group_obj'] = group
context['group_perms'] = get_perms(group, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_group_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_group_template(self):
"""
Returns object permissions for group admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_group.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_group.html'
return self.obj_perms_manage_group_template
def get_obj_perms_manage_group_form(self):
"""
Returns form class for group object permissions management. By default
:form:`AdminGroupObjectPermissionsForm` is returned.
"""
return AdminGroupObjectPermissionsForm
class UserManage(forms.Form):
user = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
error_messages = {
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters."),
'does_not_exist': _("This user does not exist")})
def clean_user(self):
"""
Returns ``User`` instance based on the given username.
"""
username = self.cleaned_data['user']
try:
user = User.objects.get(username=username)
return user
except User.DoesNotExist:
raise forms.ValidationError(
self.fields['user'].error_messages['does_not_exist'])
class GroupManage(forms.Form):
group = forms.CharField(max_length=80, error_messages={'does_not_exist':
_("This group does not exist")})
def clean_group(self):
"""
Returns ``Group`` instance based on the given group name.
"""
name = self.cleaned_data['group']
try:
group = Group.objects.get(name=name)
return group
except Group.DoesNotExist:
raise forms.ValidationError(
self.fields['group'].error_messages['does_not_exist'])
|
restless/django-guardian
|
guardian/admin.py
|
Python
|
bsd-2-clause
| 14,636 | 0.001366 |
"""
Configuration class.
Copyright 2010 Kelsey Hightower
Kelsey Hightower <kelsey.hightower@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
module for configuring repos, packages and files
"""
import filecmp
import shutil
from . import utils
import tempfile
import stat
import os.path
import sys
import time
import pwd
import grp
import json
try:
import yum
sys.path.append('/usr/share/yum-cli')
import cli
yum_available = True
except:
yum_available = False
class KoanConfigure:
"""
Used for all configuration methods, used by koan
to configure repos, files and packages.
"""
def __init__(self, config):
"""Constructor. Requires json config object."""
self.config = json.JSONDecoder().decode(config)
self.stats = {}
(self.dist, _) = utils.os_release()
def configure_repos(self):
# Enables the possibility to use different types of repos
if yum_available and self.dist == "redhat":
self.configure_yum_repos()
def configure_yum_repos(self):
"""Configure YUM repositories."""
print("- Configuring Repos")
old_repo = '/etc/yum.repos.d/config.repo'
# Stage a tempfile to hold new file contents
_tempfile = tempfile.NamedTemporaryFile()
_tempfile.write(self.config['repo_data'])
_tempfile.flush()
new_repo = _tempfile.name
# Check if repo resource exist, create if missing
if os.path.isfile(old_repo):
if not filecmp.cmp(old_repo, new_repo):
utils.sync_file(old_repo, new_repo, 0, 0, 644)
self.stats['repos_status'] = "Success: Repos in sync"
else:
self.stats['repos_status'] = "Success: Repos in sync"
else:
print(" %s not found, creating..." % old_repo)
open(old_repo, 'w').close()
utils.sync_file(old_repo, new_repo, 0, 0, 644)
self.stats['repos_status'] = "Success: Repos in sync"
_tempfile.close()
def configure_packages(self):
# Enables the possibility to use different types of package
# configurators
if yum_available and self.dist == "redhat":
self.configure_yum_packages()
def configure_yum_packages(self):
"""Configure package resources."""
print("- Configuring Packages")
runtime_start = time.time()
nsync = 0
osync = 0
fail = 0
packages = self.config['packages']
yb = yum.YumBase()
yb.preconf.debuglevel = 0
yb.preconf.errorlevel = 0
yb.doTsSetup()
yb.doRpmDBSetup()
ybc = cli.YumBaseCli()
ybc.preconf.debuglevel = 0
ybc.preconf.errorlevel = 0
ybc.conf.assumeyes = True
ybc.doTsSetup()
ybc.doRpmDBSetup()
create_pkg_list = []
remove_pkg_list = []
for package in packages:
action = packages[package]['action']
# In the near future, will use install_name vs package
# as it includes a more specific package name: "package-version"
# install_name = packages[package]['install_name']
if yb.isPackageInstalled(package):
if action == 'create':
nsync += 1
if action == 'remove':
remove_pkg_list.append(package)
if not yb.isPackageInstalled(package):
if action == 'create':
create_pkg_list.append(package)
if action == 'remove':
nsync += 1
# Don't waste time with YUM if there is nothing to do.
doTransaction = False
if create_pkg_list:
print(" Packages out of sync: %s" % create_pkg_list)
ybc.installPkgs(create_pkg_list)
osync += len(create_pkg_list)
doTransaction = True
if remove_pkg_list:
print(" Packages out of sync: %s" % remove_pkg_list)
ybc.erasePkgs(remove_pkg_list)
osync += len(remove_pkg_list)
doTransaction = True
if doTransaction:
ybc.buildTransaction()
ybc.doTransaction()
runtime_end = time.time()
runtime = (runtime_end - runtime_start)
self.stats['pkg'] = {
'runtime': runtime,
'nsync': nsync,
'osync': osync,
'fail': fail}
def configure_directories(self):
""" Configure directory resources."""
print("- Configuring Directories")
runtime_start = time.time()
nsync = 0
osync = 0
fail = 0
files = self.config['files']
# Split out directories
_dirs = [d for d in files if files[d]['is_dir']]
# Configure directories first
for dir in _dirs:
action = files[dir]['action']
odir = files[dir]['path']
protected_dirs = [
'/',
'/bin',
'/boot',
'/dev',
'/etc',
'/lib',
'/lib64',
'/proc',
'/sbin',
'/sys',
'/usr',
'/var']
if os.path.isdir(odir):
if os.path.realpath(odir) in protected_dirs:
print(" %s is a protected directory, skipping..."
% os.path.realpath(odir))
fail += 1
continue
if action == 'create':
nmode = int(files[dir]['mode'], 8)
nuid = pwd.getpwnam(files[dir]['owner'])[2]
ngid = grp.getgrnam(files[dir]['group'])[2]
# Compare old and new directories, sync if permissions mismatch
if os.path.isdir(odir):
dstat = os.stat(odir)
omode = stat.S_IMODE(dstat.st_mode)
ouid = pwd.getpwuid(dstat.st_uid)[2]
ogid = grp.getgrgid(dstat.st_gid)[2]
if omode != nmode or ouid != nuid or ogid != ngid:
os.chmod(odir, nmode)
os.chown(odir, nuid, ngid)
osync += 1
else:
nsync += 1
else:
print(" Directory out of sync, creating %s" % odir)
os.makedirs(odir, nmode)
os.chown(odir, nuid, ngid)
osync += 1
elif action == 'remove':
if os.path.isdir(odir):
print(" Directory out of sync, removing %s" % odir)
shutil.rmtree(odir)
osync += 1
else:
nsync += 1
else:
pass
runtime_end = time.time()
runtime = (runtime_end - runtime_start)
self.stats['dir'] = {
'runtime': runtime,
'nsync': nsync,
'osync': osync,
'fail': fail}
def configure_files(self):
""" Configure file resources."""
print("- Configuring Files")
runtime_start = time.time()
nsync = 0
osync = 0
fail = 0
files = self.config['files']
# Split out files
_files = [f for f in files if files[f]['is_dir'] is False]
for file in _files:
action = files[file]['action']
ofile = files[file]['path']
if action == 'create':
nmode = int(files[file]['mode'], 8)
nuid = pwd.getpwnam(files[file]['owner'])[2]
ngid = grp.getgrnam(files[file]['group'])[2]
# Stage a tempfile to hold new file contents
_tempfile = tempfile.NamedTemporaryFile()
_tempfile.write(files[file]['content'])
_tempfile.flush()
nfile = _tempfile.name
# Compare new and old files, sync if permissions or contents
# mismatch
if os.path.isfile(ofile):
fstat = os.stat(ofile)
omode = stat.S_IMODE(fstat.st_mode)
ouid = pwd.getpwuid(fstat.st_uid)[2]
ogid = grp.getgrgid(fstat.st_gid)[2]
if not filecmp.cmp(ofile, nfile) or omode != nmode or ogid != ngid or ouid != nuid:
utils.sync_file(ofile, nfile, nuid, ngid, nmode)
osync += 1
else:
nsync += 1
elif os.path.dirname(ofile):
# Create the file only if the base directory exists
open(ofile, 'w').close()
utils.sync_file(ofile, nfile, nuid, ngid, nmode)
osync += 1
else:
print(" Base directory not found, %s required."
% (os.path.dirname(ofile)))
fail += 1
_tempfile.close()
elif action == 'remove':
if os.path.isfile(file):
os.remove(ofile)
osync += 1
else:
nsync += 1
else:
pass
runtime_end = time.time()
runtime = (runtime_end - runtime_start)
self.stats['files'] = {
'runtime': runtime,
'nsync': nsync,
'osync': osync,
'fail': fail}
def run(self):
# Configure resources in a specific order: repos, packages,
# directories, files
if self.config['repos_enabled']:
self.configure_repos()
self.configure_packages()
self.configure_directories()
self.configure_files()
return self.stats
|
cobbler/koan
|
koan/configurator.py
|
Python
|
gpl-2.0
| 10,535 | 0.00019 |
import pytz
from collections import defaultdict
import logging
from datetime import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import connection
from django.http import HttpResponse
from django.utils import simplejson
from django_comment_common.models import Role, FORUM_ROLE_STUDENT
from django_comment_client.permissions import check_permissions_by_view, cached_has_permission
from edxmako import lookup_template
import pystache_custom as pystache
from course_groups.cohorts import get_cohort_by_id, get_cohort_id, is_commentable_cohorted
from course_groups.models import CourseUserGroup
from xmodule.modulestore.django import modulestore
from django.utils.timezone import UTC
from opaque_keys.edx.locations import i4xEncoder
from opaque_keys.edx.keys import CourseKey
import json
log = logging.getLogger(__name__)
def extract(dic, keys):
return {k: dic.get(k) for k in keys}
def strip_none(dic):
return dict([(k, v) for k, v in dic.iteritems() if v is not None])
def strip_blank(dic):
def _is_blank(v):
return isinstance(v, str) and len(v.strip()) == 0
return dict([(k, v) for k, v in dic.iteritems() if not _is_blank(v)])
# TODO should we be checking if d1 and d2 have the same keys with different values?
def merge_dict(dic1, dic2):
return dict(dic1.items() + dic2.items())
def get_role_ids(course_id):
roles = Role.objects.filter(course_id=course_id).exclude(name=FORUM_ROLE_STUDENT)
return dict([(role.name, list(role.users.values_list('id', flat=True))) for role in roles])
def has_forum_access(uname, course_id, rolename):
try:
role = Role.objects.get(name=rolename, course_id=course_id)
except Role.DoesNotExist:
return False
return role.users.filter(username=uname).exists()
def _get_discussion_modules(course):
all_modules = modulestore().get_items(course.id, qualifiers={'category': 'discussion'})
def has_required_keys(module):
for key in ('discussion_id', 'discussion_category', 'discussion_target'):
if getattr(module, key) is None:
log.warning("Required key '%s' not in discussion %s, leaving out of category map" % (key, module.location))
return False
return True
return filter(has_required_keys, all_modules)
def _get_discussion_id_map(course):
def get_entry(module):
discussion_id = module.discussion_id
title = module.discussion_target
last_category = module.discussion_category.split("/")[-1].strip()
return (discussion_id, {"location": module.location, "title": last_category + " / " + title})
return dict(map(get_entry, _get_discussion_modules(course)))
def _filter_unstarted_categories(category_map):
now = datetime.now(UTC())
result_map = {}
unfiltered_queue = [category_map]
filtered_queue = [result_map]
while len(unfiltered_queue) > 0:
unfiltered_map = unfiltered_queue.pop()
filtered_map = filtered_queue.pop()
filtered_map["children"] = []
filtered_map["entries"] = {}
filtered_map["subcategories"] = {}
for child in unfiltered_map["children"]:
if child in unfiltered_map["entries"]:
if unfiltered_map["entries"][child]["start_date"] <= now:
filtered_map["children"].append(child)
filtered_map["entries"][child] = {}
for key in unfiltered_map["entries"][child]:
if key != "start_date":
filtered_map["entries"][child][key] = unfiltered_map["entries"][child][key]
else:
log.debug(u"Filtering out:%s with start_date: %s", child, unfiltered_map["entries"][child]["start_date"])
else:
if unfiltered_map["subcategories"][child]["start_date"] < now:
filtered_map["children"].append(child)
filtered_map["subcategories"][child] = {}
unfiltered_queue.append(unfiltered_map["subcategories"][child])
filtered_queue.append(filtered_map["subcategories"][child])
return result_map
def _sort_map_entries(category_map, sort_alpha):
things = []
for title, entry in category_map["entries"].items():
if entry["sort_key"] == None and sort_alpha:
entry["sort_key"] = title
things.append((title, entry))
for title, category in category_map["subcategories"].items():
things.append((title, category))
_sort_map_entries(category_map["subcategories"][title], sort_alpha)
category_map["children"] = [x[0] for x in sorted(things, key=lambda x: x[1]["sort_key"])]
def get_discussion_category_map(course):
course_id = course.id
unexpanded_category_map = defaultdict(list)
modules = _get_discussion_modules(course)
is_course_cohorted = course.is_cohorted
cohorted_discussion_ids = course.cohorted_discussions
for module in modules:
id = module.discussion_id
title = module.discussion_target
sort_key = module.sort_key
category = " / ".join([x.strip() for x in module.discussion_category.split("/")])
#Handle case where module.start is None
entry_start_date = module.start if module.start else datetime.max.replace(tzinfo=pytz.UTC)
unexpanded_category_map[category].append({"title": title, "id": id, "sort_key": sort_key, "start_date": entry_start_date})
category_map = {"entries": defaultdict(dict), "subcategories": defaultdict(dict)}
for category_path, entries in unexpanded_category_map.items():
node = category_map["subcategories"]
path = [x.strip() for x in category_path.split("/")]
# Find the earliest start date for the entries in this category
category_start_date = None
for entry in entries:
if category_start_date is None or entry["start_date"] < category_start_date:
category_start_date = entry["start_date"]
for level in path[:-1]:
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
node = node[level]["subcategories"]
level = path[-1]
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
for entry in entries:
node[level]["entries"][entry["title"]] = {"id": entry["id"],
"sort_key": entry["sort_key"],
"start_date": entry["start_date"],
"is_cohorted": is_course_cohorted}
# TODO. BUG! : course location is not unique across multiple course runs!
# (I think Kevin already noticed this) Need to send course_id with requests, store it
# in the backend.
for topic, entry in course.discussion_topics.items():
category_map['entries'][topic] = {"id": entry["id"],
"sort_key": entry.get("sort_key", topic),
"start_date": datetime.now(UTC()),
"is_cohorted": is_course_cohorted and entry["id"] in cohorted_discussion_ids}
_sort_map_entries(category_map, course.discussion_sort_alpha)
return _filter_unstarted_categories(category_map)
class JsonResponse(HttpResponse):
def __init__(self, data=None):
content = json.dumps(data, cls=i4xEncoder)
super(JsonResponse, self).__init__(content,
mimetype='application/json; charset=utf-8')
class JsonError(HttpResponse):
def __init__(self, error_messages=[], status=400):
if isinstance(error_messages, basestring):
error_messages = [error_messages]
content = simplejson.dumps({'errors': error_messages},
indent=2,
ensure_ascii=False)
super(JsonError, self).__init__(content,
mimetype='application/json; charset=utf-8', status=status)
class HtmlResponse(HttpResponse):
def __init__(self, html=''):
super(HtmlResponse, self).__init__(html, content_type='text/plain')
class ViewNameMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
request.view_name = view_func.__name__
class QueryCountDebugMiddleware(object):
"""
This middleware will log the number of queries run
and the total time taken for each request (with a
status code of 200). It does not currently support
multi-db setups.
"""
def process_response(self, request, response):
if response.status_code == 200:
total_time = 0
for query in connection.queries:
query_time = query.get('time')
if query_time is None:
# django-debug-toolbar monkeypatches the connection
# cursor wrapper and adds extra information in each
# item in connection.queries. The query time is stored
# under the key "duration" rather than "time" and is
# in milliseconds, not seconds.
query_time = query.get('duration', 0) / 1000
total_time += float(query_time)
log.info('%s queries run, total %s seconds' % (len(connection.queries), total_time))
return response
def get_ability(course_id, content, user):
return {
'editable': check_permissions_by_view(user, course_id, content, "update_thread" if content['type'] == 'thread' else "update_comment"),
'can_reply': check_permissions_by_view(user, course_id, content, "create_comment" if content['type'] == 'thread' else "create_sub_comment"),
'can_delete': check_permissions_by_view(user, course_id, content, "delete_thread" if content['type'] == 'thread' else "delete_comment"),
'can_openclose': check_permissions_by_view(user, course_id, content, "openclose_thread") if content['type'] == 'thread' else False,
'can_vote': check_permissions_by_view(user, course_id, content, "vote_for_thread" if content['type'] == 'thread' else "vote_for_comment"),
}
# TODO: RENAME
def get_annotated_content_info(course_id, content, user, user_info):
"""
Get metadata for an individual content (thread or comment)
"""
voted = ''
if content['id'] in user_info['upvoted_ids']:
voted = 'up'
elif content['id'] in user_info['downvoted_ids']:
voted = 'down'
return {
'voted': voted,
'subscribed': content['id'] in user_info['subscribed_thread_ids'],
'ability': get_ability(course_id, content, user),
}
# TODO: RENAME
def get_annotated_content_infos(course_id, thread, user, user_info):
"""
Get metadata for a thread and its children
"""
infos = {}
def annotate(content):
infos[str(content['id'])] = get_annotated_content_info(course_id, content, user, user_info)
for child in (
content.get('children', []) +
content.get('endorsed_responses', []) +
content.get('non_endorsed_responses', [])
):
annotate(child)
annotate(thread)
return infos
def get_metadata_for_threads(course_id, threads, user, user_info):
def infogetter(thread):
return get_annotated_content_infos(course_id, thread, user, user_info)
metadata = reduce(merge_dict, map(infogetter, threads), {})
return metadata
# put this method in utils.py to avoid circular import dependency between helpers and mustache_helpers
def render_mustache(template_name, dictionary, *args, **kwargs):
template = lookup_template('main', template_name).source
return pystache.render(template, dictionary)
def permalink(content):
if isinstance(content['course_id'], CourseKey):
course_id = content['course_id'].to_deprecated_string()
else:
course_id = content['course_id']
if content['type'] == 'thread':
return reverse('django_comment_client.forum.views.single_thread',
args=[course_id, content['commentable_id'], content['id']])
else:
return reverse('django_comment_client.forum.views.single_thread',
args=[course_id, content['commentable_id'], content['thread_id']]) + '#' + content['id']
def extend_content(content):
roles = {}
if content.get('user_id'):
try:
user = User.objects.get(pk=content['user_id'])
roles = dict(('name', role.name.lower()) for role in user.roles.filter(course_id=content['course_id']))
except User.DoesNotExist:
log.error('User ID {0} in comment content {1} but not in our DB.'.format(content.get('user_id'), content.get('id')))
content_info = {
'displayed_title': content.get('highlighted_title') or content.get('title', ''),
'displayed_body': content.get('highlighted_body') or content.get('body', ''),
'permalink': permalink(content),
'roles': roles,
'updated': content['created_at'] != content['updated_at'],
}
return merge_dict(content, content_info)
def add_courseware_context(content_list, course):
id_map = _get_discussion_id_map(course)
for content in content_list:
commentable_id = content['commentable_id']
if commentable_id in id_map:
location = id_map[commentable_id]["location"].to_deprecated_string()
title = id_map[commentable_id]["title"]
url = reverse('jump_to', kwargs={"course_id": course.id.to_deprecated_string(),
"location": location})
content.update({"courseware_url": url, "courseware_title": title})
def prepare_content(content, course_key, is_staff=False):
"""
This function is used to pre-process thread and comment models in various
ways before adding them to the HTTP response. This includes fixing empty
attribute fields, enforcing author anonymity, and enriching metadata around
group ownership and response endorsement.
@TODO: not all response pre-processing steps are currently integrated into
this function.
"""
fields = [
'id', 'title', 'body', 'course_id', 'anonymous', 'anonymous_to_peers',
'endorsed', 'parent_id', 'thread_id', 'votes', 'closed', 'created_at',
'updated_at', 'depth', 'type', 'commentable_id', 'comments_count',
'at_position_list', 'children', 'highlighted_title', 'highlighted_body',
'courseware_title', 'courseware_url', 'unread_comments_count',
'read', 'group_id', 'group_name', 'pinned', 'abuse_flaggers',
'stats', 'resp_skip', 'resp_limit', 'resp_total', 'thread_type',
'endorsed_responses', 'non_endorsed_responses', 'non_endorsed_resp_total',
'endorsement',
]
if (content.get('anonymous') is False) and ((content.get('anonymous_to_peers') is False) or is_staff):
fields += ['username', 'user_id']
content = strip_none(extract(content, fields))
if content.get("endorsement"):
endorsement = content["endorsement"]
endorser = None
if endorsement["user_id"]:
try:
endorser = User.objects.get(pk=endorsement["user_id"])
except User.DoesNotExist:
log.error("User ID {0} in endorsement for comment {1} but not in our DB.".format(
content.get('user_id'),
content.get('id'))
)
# Only reveal endorser if requester can see author or if endorser is staff
if (
endorser and
("username" in fields or cached_has_permission(endorser, "endorse_comment", course_id))
):
endorsement["username"] = endorser.username
else:
del endorsement["user_id"]
for child_content_key in ["children", "endorsed_responses", "non_endorsed_responses"]:
if child_content_key in content:
children = [
prepare_content(child, course_key, is_staff) for child in content[child_content_key]
]
content[child_content_key] = children
# Augment the specified thread info to include the group name if a group id is present.
if content.get('group_id') is not None:
content['group_name'] = get_cohort_by_id(course_key, content.get('group_id')).name
return content
def get_group_id_for_comments_service(request, course_key, commentable_id=None):
"""
Given a user requesting content within a `commentable_id`, determine the
group_id which should be passed to the comments service.
Returns:
int: the group_id to pass to the comments service or None if nothing
should be passed
Raises:
ValueError if the requested group_id is invalid
"""
if commentable_id is None or is_commentable_cohorted(course_key, commentable_id):
if request.method == "GET":
requested_group_id = request.GET.get('group_id')
elif request.method == "POST":
requested_group_id = request.POST.get('group_id')
if cached_has_permission(request.user, "see_all_cohorts", course_key):
if not requested_group_id:
return None
try:
group_id = int(requested_group_id)
get_cohort_by_id(course_key, group_id)
except CourseUserGroup.DoesNotExist:
raise ValueError
else:
# regular users always query with their own id.
group_id = get_cohort_id(request.user, course_key)
return group_id
else:
# Never pass a group_id to the comments service for a non-cohorted
# commentable
return None
|
LearnEra/LearnEraPlaftform
|
lms/djangoapps/django_comment_client/utils.py
|
Python
|
agpl-3.0
| 18,621 | 0.003007 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.