text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from ...core.tags.registry import register
from .forms import LikeForm
@register.inclusion_tag('spirit/comment/like/_form.html')
def render_like_form(comment, like, next=None):
form = LikeForm()
return {'form': form, 'comment_id': comment.pk, 'like': like, 'next': next}
| nitely/Spirit | spirit/comment/like/tags.py | Python | mit | 306 | 0 |
#
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import json
import libvirt
import netaddr
import os
import os.path
import subprocess
from lxml import etree
from ..driverbase import VirtDeployDriverBase
from ..errors import InstanceNotFound
from ..errors import VirtDeployException
from ..utils import execute
from ..utils import random_password
DEFAULT_NET = 'default'
DEFAULT_POOL = 'default'
BASE_FORMAT = 'qcow2'
BASE_SIZE = '20G'
INSTANCE_DEFAULTS = {
'cpus': 2,
'memory': 1024,
'arch': 'x86_64',
'network': DEFAULT_NET,
'pool': DEFAULT_POOL,
'password': None,
}
_NET_ADD_LAST = libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST
_NET_MODIFY = libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY
_NET_DELETE = libvirt.VIR_NETWORK_UPDATE_COMMAND_DELETE
_NET_DNS_HOST = libvirt.VIR_NETWORK_SECTION_DNS_HOST
_NET_DHCP_HOST = libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST
_NET_UPDATE_FLAGS = (
libvirt.VIR_NETWORK_UPDATE_AFFECT_CONFIG |
libvirt.VIR_NETWORK_UPDATE_AFFECT_LIVE
)
_IMAGE_OS_TABLE = {
'centos-6': 'centos6.5', # TODO: fix versions
'centos-7.1': 'centos7.0', # TODO: fix versions
'centos-7.2': 'centos7.0', # TODO: fix versions
'rhel-6.7': 'rhel6', # TODO: fix versions
'rhel-7.2': 'rhel7', # TODO: fix versions
}
class VirtDeployLibvirtDriver(VirtDeployDriverBase):
def __init__(self, uri='qemu:///system'):
self._uri = uri
def _libvirt_open(self):
def libvirt_callback(ctx, err):
pass # add logging only when required
libvirt.registerErrorHandler(libvirt_callback, ctx=None)
return libvirt.open(self._uri)
def template_list(self):
templates = _get_virt_templates()
if templates['version'] != 1:
raise VirtDeployException('Unsupported template list version')
return [{'id': x['os-version'], 'name': x['full-name']}
for x in templates['templates']]
def instance_create(self, vmid, template, **kwargs):
kwargs = dict(INSTANCE_DEFAULTS.items() + kwargs.items())
name = '{0}-{1}-{2}'.format(vmid, template, kwargs['arch'])
image = '{0}.qcow2'.format(name)
conn = self._libvirt_open()
pool = conn.storagePoolLookupByName(kwargs['pool'])
net = conn.networkLookupByName(kwargs['network'])
repository = _get_pool_path(pool)
path = os.path.join(repository, image)
if os.path.exists(path):
raise OSError(os.errno.EEXIST, "Image already exists")
base = _create_base(template, kwargs['arch'], repository)
execute(('qemu-img', 'create', '-f', 'qcow2', '-b', base, image),
cwd=repository)
hostname = 'vm-{0}'.format(vmid)
domainname = _get_network_domainname(net)
if domainname is None:
fqdn = hostname
else:
fqdn = '{0}.{1}'.format(hostname, domainname)
if kwargs['password'] is None:
kwargs['password'] = random_password()
password_string = 'password:{0}'.format(kwargs['password'])
execute(('virt-customize',
'-a', path,
'--hostname', fqdn,
'--root-password', password_string))
network = 'network={0}'.format(kwargs['network'])
try:
conn.nwfilterLookupByName('clean-traffic')
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_NO_NWFILTER:
raise
else:
network += ',filterref=clean-traffic'
disk = 'path={0},format=qcow2,bus=scsi,discard=unmap'.format(path)
channel = 'unix,name=org.qemu.guest_agent.0'
execute(('virt-install',
'--quiet',
'--connect={0}'.format(self._uri),
'--name', name,
'--cpu', 'host-model-only,+vmx',
'--vcpus', str(kwargs['cpus']),
'--memory', str(kwargs['memory']),
'--controller', 'scsi,model=virtio-scsi',
'--disk', disk,
'--network', network,
'--graphics', 'spice',
'--channel', channel,
'--os-variant', _get_image_os(template),
'--import',
'--noautoconsole',
'--noreboot'))
netmac = _get_domain_mac_addresses(_get_domain(conn, name)).next()
ipaddress = _new_network_ipaddress(net)
# TODO: fix race between _new_network_ipaddress and ip reservation
_add_network_host(net, hostname, ipaddress)
_add_network_dhcp_host(net, hostname, netmac['mac'], ipaddress)
return {
'name': name,
'password': kwargs['password'],
'mac': netmac['mac'],
'hostname': fqdn,
'ipaddress': ipaddress,
}
def instance_address(self, vmid, network=None):
conn = self._libvirt_open()
dom = _get_domain(conn, vmid)
netmacs = _get_domain_macs_by_network(dom)
if network:
netmacs = {k: v for k, v in netmacs.iteritems()}
addresses = set()
for name, macs in netmacs.iteritems():
net = conn.networkLookupByName(name)
for lease in _get_network_dhcp_leases(net):
if lease['mac'] in macs:
addresses.add(lease['ip'])
return list(addresses)
def instance_start(self, vmid):
dom = _get_domain(self._libvirt_open(), vmid)
try:
dom.create()
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
def instance_stop(self, vmid):
dom = _get_domain(self._libvirt_open(), vmid)
try:
dom.shutdownFlags(
libvirt.VIR_DOMAIN_SHUTDOWN_GUEST_AGENT |
libvirt.VIR_DOMAIN_SHUTDOWN_ACPI_POWER_BTN
)
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
def instance_delete(self, vmid):
conn = self._libvirt_open()
dom = _get_domain(conn, vmid)
try:
dom.destroy()
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
xmldesc = etree.fromstring(dom.XMLDesc())
for disk in xmldesc.iterfind('./devices/disk/source'):
try:
os.remove(disk.get('file'))
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
netmacs = _get_domain_macs_by_network(dom)
for network, macs in netmacs.iteritems():
net = conn.networkLookupByName(network)
for x in _get_network_dhcp_hosts(net):
if x['mac'] in macs:
_del_network_host(net, x['name'])
_del_network_dhcp_host(net, x['name'])
dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
def _get_image_os(image):
try:
return _IMAGE_OS_TABLE[image]
except KeyError:
return image.replace('-', '')
def _create_base(template, arch, repository):
name = '_{0}-{1}.{2}'.format(template, arch, BASE_FORMAT)
path = os.path.join(repository, name)
if not os.path.exists(path):
execute(('virt-builder', template,
'-o', path,
'--size', BASE_SIZE,
'--format', BASE_FORMAT,
'--arch', arch,
'--root-password', 'locked:disabled'))
# As mentioned in the virt-builder man in section "CLONES" the
# resulting image should be cleaned before bsing used as template.
# TODO: handle half-backed templates
execute(('virt-sysprep', '-a', path))
return name
def _get_virt_templates():
stdout, _ = execute(('virt-builder', '-l', '--list-format', 'json'),
stdout=subprocess.PIPE)
return json.loads(stdout)
def _get_domain(conn, name):
try:
return conn.lookupByName(name)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
raise InstanceNotFound(name)
raise
def _get_domain_mac_addresses(dom):
xmldesc = etree.fromstring(dom.XMLDesc())
netxpath = './devices/interface[@type="network"]'
for iface in xmldesc.iterfind(netxpath):
network = iface.find('./source').get('network')
mac = iface.find('./mac').get('address')
yield {'mac': mac, 'network': network}
def _get_domain_macs_by_network(dom):
netmacs = {}
for x in _get_domain_mac_addresses(dom):
netmacs.setdefault(x['network'], []).append(x['mac'])
return netmacs
def _get_pool_path(pool):
xmldesc = etree.fromstring(pool.XMLDesc())
for x in xmldesc.iterfind('.[@type="dir"]/target/path'):
return x.text
raise OSError(os.errno.ENOENT, 'Path not found for pool')
def _get_network_domainname(net):
xmldesc = etree.fromstring(net.XMLDesc())
for domain in xmldesc.iterfind('./domain'):
return domain.get('name')
def _add_network_host(net, hostname, ipaddress):
xmlhost = etree.Element('host')
xmlhost.set('ip', ipaddress)
etree.SubElement(xmlhost, 'hostname').text = hostname
# Attempt to delete if present
_del_network_host(net, hostname)
net.update(_NET_ADD_LAST, _NET_DNS_HOST, 0, etree.tostring(xmlhost),
_NET_UPDATE_FLAGS)
def _del_network_host(net, hostname):
xmlhost = etree.Element('host')
etree.SubElement(xmlhost, 'hostname').text = hostname
try:
net.update(_NET_DELETE, _NET_DNS_HOST, 0, etree.tostring(xmlhost),
_NET_UPDATE_FLAGS)
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
def _add_network_dhcp_host(net, hostname, mac, ipaddress):
xmlhost = etree.Element('host')
xmlhost.set('mac', mac)
xmlhost.set('name', hostname)
xmlhost.set('ip', ipaddress)
# Attempt to delete if present
_del_network_dhcp_host(net, hostname)
net.update(_NET_ADD_LAST, _NET_DHCP_HOST, 0, etree.tostring(xmlhost),
_NET_UPDATE_FLAGS)
def _del_network_dhcp_host(net, hostname):
xmlhost = etree.Element('host')
xmlhost.set('name', hostname)
try:
net.update(_NET_DELETE, _NET_DHCP_HOST, 0, etree.tostring(xmlhost),
_NET_UPDATE_FLAGS)
except libvirt.libvirtError as e:
if e.get_error_code() != libvirt.VIR_ERR_OPERATION_INVALID:
raise
def _get_network_dhcp_hosts(net):
xmldesc = etree.fromstring(net.XMLDesc())
for x in xmldesc.iterfind('./ip/dhcp/host'):
yield {'name': x.get('name'), 'mac': x.get('mac'),
'ip': x.get('ip')}
def _get_network_dhcp_leases(net):
for x in _get_network_dhcp_hosts(net):
yield x
for x in net.DHCPLeases():
yield {'name': x['hostname'], 'mac': x['mac'],
'ip': x['ipaddr']}
def _new_network_ipaddress(net):
xmldesc = etree.fromstring(net.XMLDesc())
hosts = _get_network_dhcp_leases(net)
addresses = set(netaddr.IPAddress(x['ip']) for x in hosts)
localip = xmldesc.find('./ip').get('address')
netmask = xmldesc.find('./ip').get('netmask')
addresses.add(netaddr.IPAddress(localip))
for ip in netaddr.IPNetwork(localip, netmask)[1:-1]:
if ip not in addresses:
return str(ip)
| lyarwood/virt-deploy | virtdeploy/drivers/libvirt.py | Python | gpl-2.0 | 12,400 | 0 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlDevelGlobaldestruction(PerlPackage):
"""Makes Perl's global destruction less tricky to deal with"""
homepage = "http://search.cpan.org/~haarg/Devel-GlobalDestruction-0.14/lib/Devel/GlobalDestruction.pm"
url = "http://search.cpan.org/CPAN/authors/id/H/HA/HAARG/Devel-GlobalDestruction-0.14.tar.gz"
version('0.14', sha256='34b8a5f29991311468fe6913cadaba75fd5d2b0b3ee3bb41fe5b53efab9154ab')
| iulian787/spack | var/spack/repos/builtin/packages/perl-devel-globaldestruction/package.py | Python | lgpl-2.1 | 641 | 0.00624 |
from lib.loghelper import Logger
import numpy as np
def visitCoverMetrics(visitMetrics, visitobj):
visit = visitobj['visit']
riparianStructures = visitobj['riparianStructures']
percentBigTreeCover(visitMetrics, riparianStructures)
percentCanopyNoCover(visitMetrics, riparianStructures)
percentGroundCover(visitMetrics, riparianStructures)
percentGroundCoverNoCover(visitMetrics, riparianStructures)
percentUnderstoryCover(visitMetrics, riparianStructures)
percentWoodyCover(visitMetrics, riparianStructures)
percentNonWoodyGroundCover(visitMetrics, visit, riparianStructures)
percentConiferousCover(visitMetrics, visit, riparianStructures)
def percentConiferousCover(visitMetrics, visit, riparianStructures):
if visit["iterationID"] == 1:
visitMetrics["PercentConiferousCover"] = getConiferousScore2011(riparianStructures)
else:
visitMetrics["PercentConiferousCover"] = getConiferousScore2012(riparianStructures)
def getConiferousScore2012(riparianStructures):
if riparianStructures is None:
return None
inScope = []
inScope.extend([s["value"]["LBCanopyWoodyConiferous"] + s["value"]["LBUnderstoryWoodyConiferous"] for s in riparianStructures["values"] if s["value"]["LBCanopyWoodyConiferous"] is not None and s["value"]["LBUnderstoryWoodyConiferous"] is not None])
inScope.extend([s["value"]["RBCanopyWoodyConiferous"] + s["value"]["RBUnderstoryWoodyConiferous"] for s in riparianStructures["values"] if s["value"]["RBCanopyWoodyConiferous"] is not None and s["value"]["RBUnderstoryWoodyConiferous"] is not None])
if inScope.__len__() > 0:
return np.mean(inScope)
else:
return None
def getConiferousScore2011(riparianStructures):
if riparianStructures is None:
return None
count = 0
result = 0
multiplicationFactors = {"Coniferous": 1, "Mixed": 0.5 }
for rec in [r for r in riparianStructures["values"]]:
if rec["value"]["LBCanopyBigTrees"] is not None and rec["value"]["LBCanopySmallTrees"] is not None and rec["value"]["LBCanopyVegetationType"] is not None:
lbfactor = 0
if rec["value"]["LBCanopyVegetationType"] in multiplicationFactors:
lbfactor = multiplicationFactors[rec["value"]["LBCanopyVegetationType"]]
lbunderstoryfactor = 0
if rec["value"]["LBUnderstoryVegetationType"] is not None and rec["value"]["LBUnderstoryVegetationType"] in multiplicationFactors:
lbunderstoryfactor = multiplicationFactors[rec["value"]["LBUnderstoryVegetationType"]]
result = result + (rec["value"]["LBCanopyBigTrees"] + rec["value"]["LBCanopySmallTrees"]) * lbfactor
lbunderstoryshrubs = 0
if rec["value"]["LBUnderstoryWoodyShrubs"] is not None:
lbunderstoryshrubs = rec["value"]["LBUnderstoryWoodyShrubs"]
result = result + (lbunderstoryshrubs * lbunderstoryfactor)
count = count + 1
if rec["value"]["RBCanopyBigTrees"] is not None and rec["value"]["RBCanopySmallTrees"] is not None and rec["value"]["RBCanopyVegetationType"] is not None:
rbfactor = 0
if rec["value"]["RBCanopyVegetationType"] in multiplicationFactors:
rbfactor = multiplicationFactors[rec["value"]["RBCanopyVegetationType"]]
rbunderstoryfactor = 0
if rec["value"]["RBUnderstoryVegetationType"] is not None and rec["value"]["RBUnderstoryVegetationType"] in multiplicationFactors:
rbunderstoryfactor = multiplicationFactors[rec["value"]["RBUnderstoryVegetationType"]]
result = result + (rec["value"]["RBCanopyBigTrees"] + rec["value"]["RBCanopySmallTrees"]) * rbfactor
rbunderstoryshrubs = 0
if rec["value"]["RBUnderstoryWoodyShrubs"] is not None:
rbunderstoryshrubs = rec["value"]["RBUnderstoryWoodyShrubs"]
result = result + (rbunderstoryshrubs * rbunderstoryfactor)
count = count + 1
if count == 0:
return None
return result / count
def percentBigTreeCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentBigTreeCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBCanopyBigTrees"] for s in riparianStructures["values"] if s["value"]["LBCanopyBigTrees"] is not None])
inScope.extend([s["value"]["RBCanopyBigTrees"] for s in riparianStructures["values"] if s["value"]["RBCanopyBigTrees"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentBigTreeCover"] = np.mean(inScope)
else:
visitMetrics["PercentBigTreeCover"] = None
def percentUnderstoryCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentUnderstoryNoCover"] = None
visitMetrics["PercentUnderstoryCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBUnderstoryCover"] for s in riparianStructures["values"] if s["value"]["LBUnderstoryCover"] is not None])
inScope.extend([s["value"]["RBUnderstoryCover"] for s in riparianStructures["values"] if s["value"]["RBUnderstoryCover"] is not None])
if inScope.__len__() > 0:
understoryCover = np.mean(inScope)
visitMetrics["PercentUnderstoryCover"] = understoryCover
visitMetrics["PercentUnderstoryNoCover"] = 100 - understoryCover
else:
visitMetrics["PercentUnderstoryCover"] = None
visitMetrics["PercentUnderstoryNoCover"] = None
def percentNonWoodyGroundCover(visitMetrics, visit, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentNonWoodyGroundCover"] = None
return
inScope = []
if visit["iterationID"] == 1:
inScope.extend([s["value"]["LBGroundcoverNonWoodyShrubs"] + s["value"]["LBUnderstoryNonWoodyShrubs"] for s in riparianStructures["values"] if s["value"]["LBGroundcoverNonWoodyShrubs"] is not None and s["value"]["LBUnderstoryNonWoodyShrubs"] is not None])
inScope.extend([s["value"]["RBGroundcoverNonWoodyShurbs"] + s["value"]["RBUnderstoryNonWoodyShrubs"] for s in riparianStructures["values"] if s["value"]["RBGroundcoverNonWoodyShurbs"] is not None and s["value"]["RBUnderstoryNonWoodyShrubs"] is not None])
else:
inScope.extend([s["value"]["LBUnderstoryNonWoodyForbesGrasses"] + s["value"]["LBGroundcoverNonWoodyForbesGrasses"] for s in riparianStructures["values"] if s["value"]["LBUnderstoryNonWoodyForbesGrasses"] is not None and s["value"]["LBGroundcoverNonWoodyForbesGrasses"] is not None])
inScope.extend([s["value"]["RBUnderstoryNonWoodyForbesGrasses"] + s["value"]["RBGroundcoverNonWoodyForbesGrasses"] for s in riparianStructures["values"] if s["value"]["RBUnderstoryNonWoodyForbesGrasses"] is not None and s["value"]["RBGroundcoverNonWoodyForbesGrasses"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentNonWoodyGroundCover"] = np.mean(inScope)
else:
visitMetrics["PercentNonWoodyGroundCover"] = None
def percentWoodyCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentWoodyCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBWoodyCover"] for s in riparianStructures["values"] if s["value"]["LBWoodyCover"] is not None])
inScope.extend([s["value"]["RBWoodyCover"] for s in riparianStructures["values"] if s["value"]["RBWoodyCover"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentWoodyCover"] = np.mean(inScope)
else:
visitMetrics["PercentWoodyCover"] = None
def percentGroundCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentGroundCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBGroundCover"] for s in riparianStructures["values"] if s["value"]["LBGroundCover"] is not None])
inScope.extend([s["value"]["RBGroundCover"] for s in riparianStructures["values"] if s["value"]["RBGroundCover"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentGroundCover"] = np.mean(inScope)
else:
visitMetrics["PercentGroundCover"] = None
def percentGroundCoverNoCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentGroundCoverNoCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBGroundCoverNoCover"] for s in riparianStructures["values"] if s["value"]["LBGroundCoverNoCover"] is not None])
inScope.extend([s["value"]["RBGroundCoverNoCover"] for s in riparianStructures["values"] if s["value"]["RBGroundCoverNoCover"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentGroundCoverNoCover"] = np.mean(inScope)
else:
visitMetrics["PercentGroundCoverNoCover"] = None
def percentCanopyNoCover(visitMetrics, riparianStructures):
if riparianStructures is None:
visitMetrics["PercentCanopyNoCover"] = None
return
inScope = []
inScope.extend([s["value"]["LBCanopyBigTrees"] + s["value"]["LBCanopySmallTrees"] for s in riparianStructures["values"] if s["value"]["LBCanopyBigTrees"] is not None and s["value"]["LBCanopySmallTrees"] is not None])
inScope.extend([s["value"]["RBCanopyBigTrees"] + s["value"]["RBCanopySmallTrees"] for s in riparianStructures["values"] if s["value"]["RBCanopyBigTrees"] is not None and s["value"]["RBCanopySmallTrees"] is not None])
if inScope.__len__() > 0:
visitMetrics["PercentCanopyNoCover"] = 100 - np.mean(inScope)
else:
visitMetrics["PercentCanopyNoCover"] = None
| SouthForkResearch/CHaMP_Metrics | tools/auxmetrics/metriclib/coverMetrics.py | Python | gpl-3.0 | 9,707 | 0.003709 |
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
Optional region environment variable if region is 'auto'
This script also assumes that there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
You can filter for specific EC2 instances by creating an environment variable
named EC2_INSTANCE_FILTERS, which has the same format as the instance_filters
entry documented in ec2.ini. For example, to find all hosts whose name begins
with 'webserver', one might use:
export EC2_INSTANCE_FILTERS='tag:Name=webserver*'
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
When destination_format and destination_format_tags are specified
the destination_format can be built from the instance tags and attributes.
The behavior will first check the user defined tags, then proceed to
check instance attributes, and finally if neither are found 'nil' will
be used instead.
'my_instance': {
'region': 'us-east-1', # attribute
'availability_zone': 'us-east-1a', # attribute
'private_dns_name': '172.31.0.1', # attribute
'ec2_tag_deployment': 'blue', # tag
'ec2_tag_clusterid': 'ansible', # tag
'ec2_tag_Name': 'webserver', # tag
...
}
Inside of the ec2.ini file the following settings are specified:
...
destination_format: {0}-{1}-{2}-{3}
destination_format_tags: Name,clusterid,deployment,private_dns_name
...
These settings would produce a destination_format as the following:
'webserver-ansible-blue-172.31.0.1'
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
from boto import sts
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3 # noqa
HAS_BOTO3 = True
except ImportError:
pass
from six.moves import configparser
from collections import defaultdict
import json
DEFAULTS = {
'all_elasticache_clusters': 'False',
'all_elasticache_nodes': 'False',
'all_elasticache_replication_groups': 'False',
'all_instances': 'False',
'all_rds_instances': 'False',
'aws_access_key_id': None,
'aws_secret_access_key': None,
'aws_security_token': None,
'boto_profile': None,
'cache_max_age': '300',
'cache_path': '~/.ansible/tmp',
'destination_variable': 'public_dns_name',
'elasticache': 'True',
'eucalyptus': 'False',
'eucalyptus_host': None,
'expand_csv_tags': 'False',
'group_by_ami_id': 'True',
'group_by_availability_zone': 'True',
'group_by_aws_account': 'False',
'group_by_elasticache_cluster': 'True',
'group_by_elasticache_engine': 'True',
'group_by_elasticache_parameter_group': 'True',
'group_by_elasticache_replication_group': 'True',
'group_by_instance_id': 'True',
'group_by_instance_state': 'False',
'group_by_instance_type': 'True',
'group_by_key_pair': 'True',
'group_by_platform': 'True',
'group_by_rds_engine': 'True',
'group_by_rds_parameter_group': 'True',
'group_by_region': 'True',
'group_by_route53_names': 'True',
'group_by_security_group': 'True',
'group_by_tag_keys': 'True',
'group_by_tag_none': 'True',
'group_by_vpc_id': 'True',
'hostname_variable': None,
'iam_role': None,
'include_rds_clusters': 'False',
'nested_groups': 'False',
'pattern_exclude': None,
'pattern_include': None,
'rds': 'False',
'regions': 'all',
'regions_exclude': 'us-gov-west-1, cn-north-1',
'replace_dash_in_groups': 'True',
'route53': 'False',
'route53_excluded_zones': '',
'route53_hostnames': None,
'stack_filters': 'False',
'vpc_destination_variable': 'ip_address'
}
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
self.aws_account_id = None
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# AWS credentials.
self.credentials = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
scriptbasename = __file__
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {
'ec2': {
'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'),
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
}
}
if six.PY3:
config = configparser.ConfigParser(DEFAULTS)
else:
config = configparser.SafeConfigParser(DEFAULTS)
ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))
if not os.path.isfile(ec2_ini_path):
ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback'])
if os.path.isfile(ec2_ini_path):
config.read(ec2_ini_path)
# Add empty sections if they don't exist
try:
config.add_section('ec2')
except configparser.DuplicateSectionError:
pass
try:
config.add_section('credentials')
except configparser.DuplicateSectionError:
pass
# is eucalyptus?
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
config_regions = config.get('ec2', 'regions')
if (config_regions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
config_regions_exclude = config.get('ec2', 'regions_exclude')
for region_info in ec2.regions():
if region_info.name not in config_regions_exclude:
self.regions.append(region_info.name)
else:
self.regions = config_regions.split(",")
if 'auto' in self.regions:
env_region = os.environ.get('AWS_REGION')
if env_region is None:
env_region = os.environ.get('AWS_DEFAULT_REGION')
self.regions = [env_region]
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
self.hostname_variable = config.get('ec2', 'hostname_variable')
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_hostnames = config.get('ec2', 'route53_hostnames')
self.route53_excluded_zones = []
self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a]
# Include RDS instances?
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include RDS cluster instances?
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
# Include ElastiCache instances?
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
self.all_instances = config.getboolean('ec2', 'all_instances')
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
# Return all ElastiCache clusters? (if ElastiCache is enabled)
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
# Return all ElastiCache nodes? (if ElastiCache is enabled)
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
# boto configuration profile (prefer CLI argument then environment variables then config file)
self.boto_profile = self.args.boto_profile or \
os.environ.get('AWS_PROFILE') or \
config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')):
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
aws_security_token = config.get('credentials', 'aws_security_token')
if aws_access_key_id:
self.credentials = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if aws_security_token:
self.credentials['security_token'] = aws_security_token
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_name = 'ansible-ec2'
cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id'))
if cache_id:
cache_name = '%s-%s' % (cache_name, cache_id)
cache_name += '-' + str(abs(hash(__file__)))[1:7]
self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name)
self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
# Configure nested groups instead of flat namespace.
self.nested_groups = config.getboolean('ec2', 'nested_groups')
# Replace dash or not in group names
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
# IAM role to assume for connection
self.iam_role = config.get('ec2', 'iam_role')
# Configure which groups should be created.
group_by_options = [a for a in DEFAULTS if a.startswith('group_by')]
for option in group_by_options:
setattr(self, option, config.getboolean('ec2', option))
# Do we need to just include hosts that match a pattern?
self.pattern_include = config.get('ec2', 'pattern_include')
if self.pattern_include:
self.pattern_include = re.compile(self.pattern_include)
# Do we need to exclude hosts that match a pattern?
self.pattern_exclude = config.get('ec2', 'pattern_exclude')
if self.pattern_exclude:
self.pattern_exclude = re.compile(self.pattern_exclude)
# Do we want to stack multiple filters?
self.stack_filters = config.getboolean('ec2', 'stack_filters')
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = []
if config.has_option('ec2', 'instance_filters') or 'EC2_INSTANCE_FILTERS' in os.environ:
filters = os.getenv('EC2_INSTANCE_FILTERS', config.get('ec2', 'instance_filters') if config.has_option('ec2', 'instance_filters') else '')
if self.stack_filters and '&' in filters:
self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n")
filter_sets = [f for f in filters.split(',') if f]
for filter_set in filter_sets:
filters = {}
filter_set = filter_set.strip()
for instance_filter in filter_set.split("&"):
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
filters[filter_key] = filter_value
self.ec2_instance_filters.append(filters.copy())
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
if self.include_rds_clusters:
self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
if self.iam_role:
sts_conn = sts.connect_to_region(region, **connect_args)
role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory')
connect_args['aws_access_key_id'] = role.credentials.access_key
connect_args['aws_secret_access_key'] = role.credentials.secret_key
connect_args['security_token'] = role.credentials.session_token
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
if self.stack_filters:
filters_dict = {}
for filters in self.ec2_instance_filters:
filters_dict.update(filters)
reservations.extend(conn.get_all_instances(filters=filters_dict))
else:
for filters in self.ec2_instance_filters:
reservations.extend(conn.get_all_instances(filters=filters))
else:
reservations = conn.get_all_instances()
# Pull the tags back in a second step
# AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
# reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
instance_ids = []
for reservation in reservations:
instance_ids.extend([instance.id for instance in reservation.instances])
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
tags_by_instance_id[tag.res_id][tag.name] = tag.value
if (not self.aws_account_id) and reservations:
self.aws_account_id = reservations[0].owner_id
for reservation in reservations:
for instance in reservation.instances:
instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def tags_match_filters(self, tags):
''' return True if given tags match configured filters '''
if not self.ec2_instance_filters:
return True
for filters in self.ec2_instance_filters:
for filter_name, filter_value in filters.items():
if filter_name[:4] != 'tag:':
continue
filter_name = filter_name[4:]
if filter_name not in tags:
if self.stack_filters:
return False
continue
if isinstance(filter_value, list):
if self.stack_filters and tags[filter_name] not in filter_value:
return False
if not self.stack_filters and tags[filter_name] in filter_value:
return True
if isinstance(filter_value, six.string_types):
if self.stack_filters and tags[filter_name] != filter_value:
return False
if not self.stack_filters and tags[filter_name] == filter_value:
return True
return self.stack_filters
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
if not HAS_BOTO3:
self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again",
"getting RDS instances")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
db_instances = client.describe_db_instances()
try:
conn = self.connect_to_aws(rds, region)
if conn:
marker = None
while True:
instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker
for index, instance in enumerate(instances):
# Add tags to instances.
instance.arn = db_instances['DBInstances'][index]['DBInstanceArn']
tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList']
instance.tags = {}
for tag in tags:
instance.tags[tag['Key']] = tag['Value']
if self.tags_match_filters(instance.tags):
self.add_rds_instance(instance, region)
if not marker:
break
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
elif e.error_code == "OptInRequired":
error = "RDS hasn't been enabled for this account yet. " \
"You must either log in to the RDS service through the AWS console to enable it, " \
"or set 'rds = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def include_rds_clusters_by_region(self, region):
if not HAS_BOTO3:
self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
"getting RDS clusters")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
marker, clusters = '', []
while marker is not None:
resp = client.describe_db_clusters(Marker=marker)
clusters.extend(resp["DBClusters"])
marker = resp.get('Marker', None)
account_id = boto.connect_iam().get_user().arn.split(':')[4]
c_dict = {}
for c in clusters:
# remove these datetime objects as there is no serialisation to json
# currently in place and we don't need the data yet
if 'EarliestRestorableTime' in c:
del c['EarliestRestorableTime']
if 'LatestRestorableTime' in c:
del c['LatestRestorableTime']
if not self.ec2_instance_filters:
matches_filter = True
else:
matches_filter = False
try:
# arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
tags = client.list_tags_for_resource(
ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
c['Tags'] = tags['TagList']
if self.ec2_instance_filters:
for filters in self.ec2_instance_filters:
for filter_key, filter_values in filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
if matches_filter:
break
except Exception as e:
if e.message.find('DBInstanceNotFound') >= 0:
# AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
# Ignore errors when trying to find tags for these
pass
# ignore empty clusters caused by AWS bug
if len(c['DBClusterMembers']) == 0:
continue
elif matches_filter:
c_dict[c['DBClusterIdentifier']] = c
self.inventory['db_clusters'] = c_dict
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
clusters = []
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
_marker = 1
while _marker:
if _marker == 1:
_marker = None
response = conn.describe_cache_clusters(None, None, _marker, True)
_marker = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['Marker']
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = clusters + response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
elif e.error_code == "OptInRequired":
error = "ElastiCache hasn't been enabled for this account yet. " \
"You must either log in to the ElastiCache service through the AWS console to enable it, " \
"or set 'elasticache = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))]
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
# When destination_format and destination_format_tags are specified
# the following code will attempt to find the instance tags first,
# then the instance attributes next, and finally if neither are found
# assign nil for the desired destination format attribute.
if self.destination_format and self.destination_format_tags:
dest_vars = []
inst_tags = getattr(instance, 'tags')
for tag in self.destination_format_tags:
if tag in inst_tags:
dest_vars.append(inst_tags[tag])
elif hasattr(instance, tag):
dest_vars.append(getattr(instance, tag))
else:
dest_vars.append('nil')
dest = self.destination_format.format(*dest_vars)
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# set the hostname from route53
if self.route53_enabled and self.route53_hostnames:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
if name.endswith(self.route53_hostnames):
hostname = name
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
# to_safe strips hostname characters like dots, so don't strip route53 hostnames
elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames):
hostname = hostname.lower()
else:
hostname = self.to_safe(hostname).lower()
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by instance state
if self.group_by_instance_state:
state_name = self.to_safe('instance_state_' + instance.state)
self.push(self.inventory, state_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'instance_states', state_name)
# Inventory: Group by platform
if self.group_by_platform:
if instance.platform:
platform = self.to_safe('platform_' + instance.platform)
else:
platform = self.to_safe('platform_undefined')
self.push(self.inventory, platform, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'platforms', platform)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by AWS account ID
if self.group_by_aws_account:
self.push(self.inventory, self.aws_account_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'accounts', self.aws_account_id)
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
hostname = self.to_safe(hostname).lower()
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Skip clusters we cannot address (e.g. private VPC subnet or clustered redis)
if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \
replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None:
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
if self.boto_profile:
r53_conn = route53.Route53Connection(profile_name=self.boto_profile)
else:
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = ['public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address']
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif isinstance(value, (int, bool)):
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif value is None:
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
# print key
# print type(value)
# print value
instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif isinstance(value, (int, bool)):
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif value is None:
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
with open(self.cache_path_cache, 'r') as f:
json_inventory = f.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
with open(self.cache_path_index, 'rb') as f:
self.index = json.load(f)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
with open(filename, 'w') as f:
f.write(json_data)
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += r"\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ == '__main__':
# Run the script
Ec2Inventory()
| jimi-c/ansible | contrib/inventory/ec2.py | Python | gpl-3.0 | 72,916 | 0.002373 |
#!/usr/bin/env python
'''
Module to perform various time operations.
Documentation convention from https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
07.07.2016
Loris Foresti
'''
from __future__ import division
from __future__ import print_function
import datetime
import numpy as np
import time
import sys
import math
fmt1 = "%.1f"
fmt2 = "%.2f"
def timestring2datetime(timestring):
'''
Function to convert a time stamp string YYYYmmDDHHMMSS to a datetime object.
Parameters
----------
timestring : str
Time string YYYYmmDDHHMMSS
Returns
-------
timeDate: datetime
Datetime object
'''
#timeDate = datetime.datetime.strptime(timestring,'%Y%m%d%H%M%S')
timeDate = datetime.datetime(int(timestring[0:4]), int(timestring[4:6]), int(timestring[6:8]), int(timestring[8:10]),int(timestring[10:12]))
return(timeDate)
def datetime2timestring(timeDate):
'''
Function to convert datetime object to a time stamp string YYYYmmDDHHMMSS.
Parameters
----------
timeDate : datetime
Datetime object
Returns
-------
timeString: str
Time string YYYYmmDDHHMMSS
'''
timeString = timeDate.strftime("%Y%m%d%H%M%S")
return(timeString)
def datetime2juliantimestring(timeDate, format='YYJJJHHMM'):
'''
Function to convert datetime object to a Julian time stamp string YYYYJJJHHMM.
Parameters
----------
timeDate : datetime
Datetime object
Returns
-------
timeString: str
Time string YYYYJJJHHMM
'''
year, yearStr, julianDay, julianDayStr = parse_datetime(timeDate)
hour = timeDate.hour
minute = timeDate.minute
hourminStr = ('%02i' % hour) + ('%02i' % minute)
if format == 'YYYYJJJHHMM':
timeString = str(year) + julianDayStr + hourminStr
if format == 'YYJJJHHMM':
timeString = yearStr + julianDayStr + hourminStr
return(timeString)
def juliantimestring2datetime(timeString, format='YYJJJHHMM'):
'''
Function to convert Julian time stamp string to a datetime object.
Parameters
----------
timeString: str
Time string YYYYJJJHHMMSS
Returns
-------
timeDate : datetime
Datetime object
Note: julian day starts at 001 (i.e. January 1st)
'''
if format=='YYYYJJJHHMMSS':
if not len(timeString) == 13:
print("Not the right string length.")
sys.exit(1)
year = int(timeString[0:4])
day = int(timeString[4:7]) - 1
hour = int(timeString[7:9])
min = int(timeString[9:11])
sec = int(timeString[11:13])
totaldeltaDays = day + hour/24 + min/60/24 + sec/60/60/24
timeDate = datetime.datetime(year, 1, 1) + datetime.timedelta(days=totaldeltaDays)
elif format=='YYJJJHHMM':
if not len(timeString) == 9:
print("Not the right string length.")
sys.exit(1)
year = int(timeString[0:2])
if year > 80:
year = 1900 + year
else:
year = 2000 + year
day = int(timeString[2:5]) - 1
hour = int(timeString[5:7])
min = int(timeString[7:9])
totaldeltaDays = day + hour/24 + min/60/24
timeDate = datetime.datetime(year, 1, 1) + datetime.timedelta(days=totaldeltaDays)
else:
print("Julian time stamp string format not supported.")
sys.exit(1)
return(timeDate)
def juliantimestring2datetime_array(timeStampJulianArray, format='YYJJJHHMM', timeString=True):
'''
Same as above but for a list or array of time stamps.
'''
nrSamples = len(timeStampJulianArray)
# If not many samples...
if nrSamples < 1000000:
timeStampJulianArrayStr = np.array(map(lambda n: "%0.9i"%n, timeStampJulianArray))
timeStampJulianArrayDt = map(juliantimestring2datetime, timeStampJulianArrayStr)
if timeString == True:
timeStampArrayStr = map(datetime2timestring, timeStampJulianArrayDt)
else:
timeStampArrayStr = []
return(timeStampJulianArrayDt, timeStampArrayStr)
else:
# If a lot of samples
timeStampJulianSet = np.unique(timeStampJulianArray)
nrUniqueSamples = len(timeStampJulianSet)
print(nrSamples, nrUniqueSamples)
timeStampDt = np.empty((nrSamples,), dtype='datetime64[m]')
timeStampStr = np.empty((nrSamples,), dtype='S12')
# Do the operations over the unique time stamps
for i in range(0,nrUniqueSamples):
timeStampJulianStr = "%0.9i"% timeStampJulianSet[i]
dt = juliantimestring2datetime(timeStampJulianStr, format=format)
bool = (timeStampJulianArray == timeStampJulianSet[i])
# Set values in array
timeStampDt[bool] = dt
if timeString == True:
dtStr = datetime2timestring(dt)
timeStampStr[bool] = dtStr
# Print out advancement (for large arrays)
if ((i % 100) == 0):
print(fmt1 % (i/nrUniqueSamples*100),"%")
return(timeStampDt, timeStampStr)
def get_julianday(timeDate):
'''
Get Julian day from datetime object.
Parameters
----------
timeDate : datetime
Datetime object
Returns
-------
julianDay: int
Julian day
'''
julianDay = timeDate.timetuple().tm_yday
return(julianDay)
def parse_datetime(timeDate):
'''
Function to parse a datetime object and return the year and Julian day in integer and string formats.
Parameters
----------
timeDate : datetime
Datetime object
Returns
-------
year: int
Year
yearStr: str
Year string in YY
julianDay: int
Julian day
julianDayStr: str
Julian day string JJJ
'''
year = timeDate.year
yearStr = str(year)[2:4]
julianDay = get_julianday(timeDate)
julianDayStr = '%03i' % julianDay
yearJulianStr = yearStr + julianDayStr
return(year, yearStr, julianDay, julianDayStr)
def timestring_array2datetime_array(arrayTimeStampsStr):
'''
Function to convert a list array of time strings YYYYmmDDHHMMSS
into a list of datetime objects
Parameters
----------
arrayTimeStampsStr : list(str)
List of time strings YYYYmmDDHHMMSS
Returns
-------
arrayTimeStampsDt: list(datetime)
List of datetime objects
'''
timeStamps = np.array(arrayTimeStampsStr, dtype=int)
timeStampsStr = np.array(list(map(str,timeStamps)))
arrayTimeStampsDt = []
for t in range(0,len(arrayTimeStampsStr)):
timeDate = timestring2datetime(str(timeStampsStr[t]))
arrayTimeStampsDt.append(timeDate)
return(arrayTimeStampsDt)
def juliantimeInt2juliantimeStr(juliantimeInt):
'''
50010000 -> '050010000'
'''
timeStampJulianStr = map(lambda x: '%09i' % x, juliantimeInt)
return(timeStampJulianStr)
def year2digit_to_year4digit(arrayYear2digit):
'''
Function to convert an array of year strings YY into an array of year strings YYYY
'''
arrayYear2digit = np.array(arrayYear2digit, dtype=int)
arrayYear4digit = np.array(arrayYear2digit)
bool80 = (arrayYear2digit > 80)
arrayYear4digit[bool80] = arrayYear2digit[bool80] + 1900
arrayYear4digit[~bool80] = arrayYear2digit[~bool80] + 2000
return(arrayYear4digit)
def get_HHmm_str(hour, minute):
'''
Function to concatenate hours and minutes into a 4-digit string.
Parameters
----------
hour : int
minute: int
Returns
-------
hourminStr: str
4-digit hour and minute string (HHMM)
'''
hourminStr = ('%02i' % hour) + ('%02i' % minute)
return(hourminStr)
def get_subdir(year, julianDay):
'''
Function to create the subdirectory string from the year and Julian day.
Parameters
----------
year : int
julianDay: int
Returns
-------
subDir: str
Sub-directory string YYYY/YYJJJ/
'''
yearStr = str(year)[2:4]
julianDayStr = '%03i' % julianDay
subDir = str(year) + '/' + yearStr + julianDayStr + '/'
return(subDir)
def datetime2absolutetime(timeDate):
'''
Function to convert a datetime object into an epoch (absolute time in seconds since 01/01/1970).
Parameters
----------
timeDate : datetime
Returns
-------
absTime: int
Number of seconds since 01/01/1970
'''
# Convert list or numpy array of values
if type(timeDate) == list or type(timeDate) == np.ndarray:
absTime = []
for t in range(0,len(timeDate)):
absTime.append(datetime2absolutetime(timeDate[t]))
else:
# Convert single value
absTime = int((timeDate-datetime.datetime(1970,1,1)).total_seconds())
# Convert list to numpy array if necessary
if type(timeDate) == np.ndarray:
absTime = np.array(absTime)
return(absTime)
def absolutetime2datetime(absTime):
timeDate = datetime.datetime(1970,1,1) + datetime.timedelta(seconds = absTime)
return(timeDate)
def tic():
global _start_time
_start_time = time.time()
def toc(appendText):
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec,60)
(t_hour,t_min) = divmod(t_min,60)
print('Time passed: {}h:{}m:{}s'.format(t_hour,t_min,t_sec), appendText)
def sample_independent_times(timeStampsDt, indepTimeHours=6, method='start'):
'''
This function is not optimal as it selects the first time stamp respecting the condition (usually at the beginning of the event)
'''
if len(timeStampsDt) <= 1:
return(timeStampsDt,[0])
sortedIdx = np.argsort(timeStampsDt)
timeStampsDt = np.sort(timeStampsDt)
timeDiffs = np.diff(datetime2absolutetime(timeStampsDt))
timeDiffs = np.hstack((timeDiffs[0], timeDiffs))
indepTimeSecs = indepTimeHours*60*60
#print(timeStampsDt.shape, timeDiffs)
if method == 'start':
timeDiffsAccum = 0
indepIndices = []
indepTimeStampsDt = []
for i in range(0,len(timeStampsDt)):
if (i == 0) | (timeDiffs[i] >= indepTimeSecs) | (timeDiffsAccum >= indepTimeSecs):
indepIndices.append(sortedIdx[i])
indepTimeStampsDt.append(sortedIdx[i])
timeDiffsAccum = 0
else:
# Increment the accumulated time difference to avoid excluding the next sample
# if closer than X hours from the previous (if not included),
# but further than X hours than the one before the previous
timeDiffsAccum = timeDiffsAccum + timeDiffs[i]
indepIndices = np.array(indepIndices)
indepTimeStampsDt = np.array(indepTimeStampsDt)
return(indepTimeStampsDt, indepIndices)
def generate_datetime_list(startDt, endDt, stepMin=5):
'''
Generate a list of datetimes from start to end (included).
'''
localTime = startDt
listDt = []
while localTime <= endDt:
listDt.append(localTime)
localTime = localTime + datetime.timedelta(minutes=stepMin)
return(listDt)
def daytime2circular(dayHour):
dayHour = np.array(dayHour, dtype=float)
daytimeSin = np.sin(2.0*math.pi*dayHour/24.0)
daytimeCos = np.cos(2.0*math.pi*dayHour/24.0)
return(daytimeSin,daytimeCos)
| meteoswiss-mdr/precipattractor | pymodules/time_tools_attractor.py | Python | gpl-3.0 | 12,112 | 0.011311 |
# -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
def test_make_aes1(self):
aes = rplot.make_aes()
self.assertTrue(aes['x'] is None)
self.assertTrue(aes['y'] is None)
self.assertTrue(aes['size'] is None)
self.assertTrue(aes['colour'] is None)
self.assertTrue(aes['shape'] is None)
self.assertTrue(aes['alpha'] is None)
self.assertTrue(isinstance(aes, dict))
def test_make_aes2(self):
self.assertRaises(ValueError, rplot.make_aes,
size=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
colour=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
shape=rplot.ScaleSize('test'))
self.assertRaises(ValueError, rplot.make_aes,
alpha=rplot.ScaleShape('test'))
def test_dictionary_union(self):
dict1 = {1 : 1, 2 : 2, 3 : 3}
dict2 = {1 : 1, 2 : 2, 4 : 4}
union = rplot.dictionary_union(dict1, dict2)
self.assertEqual(len(union), 4)
keys = list(union.keys())
self.assertTrue(1 in keys)
self.assertTrue(2 in keys)
self.assertTrue(3 in keys)
self.assertTrue(4 in keys)
self.assertEqual(rplot.dictionary_union(dict1, {}), dict1)
self.assertEqual(rplot.dictionary_union({}, dict1), dict1)
self.assertEqual(rplot.dictionary_union({}, {}), {})
def test_merge_aes(self):
layer1 = rplot.Layer(size=rplot.ScaleSize('test'))
layer2 = rplot.Layer(shape=rplot.ScaleShape('test'))
rplot.merge_aes(layer1, layer2)
self.assertTrue(isinstance(layer2.aes['size'], rplot.ScaleSize))
self.assertTrue(isinstance(layer2.aes['shape'], rplot.ScaleShape))
self.assertEqual(layer2.aes['size'], layer1.aes['size'])
for key in layer2.aes.keys():
if key != 'size' and key != 'shape':
self.assertTrue(layer2.aes[key] is None)
def test_sequence_layers(self):
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='SepalLength', y='SepalWidth',
size=rplot.ScaleSize('PetalLength'))
layer3 = rplot.GeomPolyFit(2)
result = rplot.sequence_layers([layer1, layer2, layer3])
self.assertEqual(len(result), 3)
last = result[-1]
self.assertEqual(last.aes['x'], 'SepalLength')
self.assertEqual(last.aes['y'], 'SepalWidth')
self.assertTrue(isinstance(last.aes['size'], rplot.ScaleSize))
self.assertTrue(self.data is last.data)
self.assertTrue(rplot.sequence_layers([layer1])[0] is layer1)
@tm.mplskip
class TestTrellis(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/tips.csv')
self.data = read_csv(path, sep=',')
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='total_bill', y='tip')
layer3 = rplot.GeomPolyFit(2)
self.layers = rplot.sequence_layers([layer1, layer2, layer3])
self.trellis1 = rplot.TrellisGrid(['sex', 'smoker'])
self.trellis2 = rplot.TrellisGrid(['sex', '.'])
self.trellis3 = rplot.TrellisGrid(['.', 'smoker'])
self.trellised1 = self.trellis1.trellis(self.layers)
self.trellised2 = self.trellis2.trellis(self.layers)
self.trellised3 = self.trellis3.trellis(self.layers)
def test_grid_sizes(self):
self.assertEqual(len(self.trellised1), 3)
self.assertEqual(len(self.trellised2), 3)
self.assertEqual(len(self.trellised3), 3)
self.assertEqual(len(self.trellised1[0]), 2)
self.assertEqual(len(self.trellised1[0][0]), 2)
self.assertEqual(len(self.trellised2[0]), 2)
self.assertEqual(len(self.trellised2[0][0]), 1)
self.assertEqual(len(self.trellised3[0]), 1)
self.assertEqual(len(self.trellised3[0][0]), 2)
self.assertEqual(len(self.trellised1[1]), 2)
self.assertEqual(len(self.trellised1[1][0]), 2)
self.assertEqual(len(self.trellised2[1]), 2)
self.assertEqual(len(self.trellised2[1][0]), 1)
self.assertEqual(len(self.trellised3[1]), 1)
self.assertEqual(len(self.trellised3[1][0]), 2)
self.assertEqual(len(self.trellised1[2]), 2)
self.assertEqual(len(self.trellised1[2][0]), 2)
self.assertEqual(len(self.trellised2[2]), 2)
self.assertEqual(len(self.trellised2[2][0]), 1)
self.assertEqual(len(self.trellised3[2]), 1)
self.assertEqual(len(self.trellised3[2][0]), 2)
def test_trellis_cols_rows(self):
self.assertEqual(self.trellis1.cols, 2)
self.assertEqual(self.trellis1.rows, 2)
self.assertEqual(self.trellis2.cols, 1)
self.assertEqual(self.trellis2.rows, 2)
self.assertEqual(self.trellis3.cols, 2)
self.assertEqual(self.trellis3.rows, 1)
@tm.mplskip
class TestScaleGradient(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient("SepalLength", colour1=(0.2, 0.3,
0.4),
colour2=(0.8, 0.7, 0.6))
def test_gradient(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
@tm.mplskip
class TestScaleGradient2(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient2("SepalLength", colour1=(0.2, 0.3, 0.4), colour2=(0.8, 0.7, 0.6), colour3=(0.5, 0.5, 0.5))
def test_gradient2(self):
for index in range(len(self.data)):
row = self.data.iloc[index]
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
r3, g3, b3 = self.gradient.colour3
value = row[self.gradient.column]
a_ = min(self.data[self.gradient.column])
b_ = max(self.data[self.gradient.column])
scaled = (value - a_) / (b_ - a_)
if scaled < 0.5:
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
else:
self.assertTrue(between(r2, r3, r))
self.assertTrue(between(g2, g3, g))
self.assertTrue(between(b2, b3, b))
@tm.mplskip
class TestScaleRandomColour(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.colour = rplot.ScaleRandomColour('SepalLength')
def test_random_colour(self):
for index in range(len(self.data)):
colour = self.colour(self.data, index)
self.assertEqual(len(colour), 3)
r, g, b = colour
self.assertTrue(r >= 0.0)
self.assertTrue(g >= 0.0)
self.assertTrue(b >= 0.0)
self.assertTrue(r <= 1.0)
self.assertTrue(g <= 1.0)
self.assertTrue(b <= 1.0)
@tm.mplskip
class TestScaleConstant(tm.TestCase):
def test_scale_constant(self):
scale = rplot.ScaleConstant(1.0)
self.assertEqual(scale(None, None), 1.0)
scale = rplot.ScaleConstant("test")
self.assertEqual(scale(None, None), "test")
class TestScaleSize(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.scale1 = rplot.ScaleShape('Name')
self.scale2 = rplot.ScaleShape('PetalLength')
def test_scale_size(self):
for index in range(len(self.data)):
marker = self.scale1(self.data, index)
self.assertTrue(marker in ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x'])
def test_scale_overflow(self):
def f():
for index in range(len(self.data)):
self.scale2(self.data, index)
self.assertRaises(ValueError, f)
@tm.mplskip
class TestRPlot(tm.TestCase):
def test_rplot1(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot2(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['.', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot3(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', '.']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot_iris(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/iris.csv')
plt.figure()
self.data = read_csv(path, sep=',')
plot = rplot.RPlot(self.data, x='SepalLength', y='SepalWidth')
plot.add(rplot.GeomPoint(colour=rplot.ScaleGradient('PetalLength', colour1=(0.0, 1.0, 0.5), colour2=(1.0, 0.0, 0.5)),
size=rplot.ScaleSize('PetalWidth', min_size=10.0, max_size=200.0),
shape=rplot.ScaleShape('Name')))
self.fig = plt.gcf()
plot.render(self.fig)
if __name__ == '__main__':
import unittest
unittest.main()
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tests/test_rplot.py | Python | artistic-2.0 | 11,560 | 0.001298 |
from utils import common, database
from TestCase.MVSTestCase import *
class TestAccount(MVSTestCaseBase):
roles = ()
need_mine = False
def test_0_new_account(self):
'''create new account * 5000'''
account_table_file = '/home/%s/.metaverse/mainnet/account_table' % common.get_username()
origin_payload_size = database.get_payload_size(account_table_file)
batch_amount = 5000
lastwords = []
for i in range(batch_amount):
ec, message = mvs_rpc.new_account("Account_%s" % i, "123456")
self.assertEqual(ec, 0, message)
lastwords.append( message[-1] )
try:
current_payload_size = database.get_payload_size(account_table_file)
# each simple account record size < 300, but when getnew address, the account record will be create twice, so 600 is the reasonable record size.
self.assertGreater(600 * batch_amount, current_payload_size - origin_payload_size, "each account record size shall be less than 600.")
finally:
for i in range(batch_amount):
ec, message = mvs_rpc.delete_account("Account_%s" % i, "123456", lastwords[i])
self.assertEqual(ec, 0, message)
def test_1_new_address(self):
'''new address for Zac'''
max_duration = 0.01
avg_duration = 0.002
round = 5000
Zac.create()
account_table_file = '/home/%s/.metaverse/mainnet/account_table' % common.get_username()
try:
origin_payload_size = database.get_payload_size(account_table_file)
durations = []
for i in range(round):
duration, ret = common.duration_call(mvs_rpc.new_address, Zac.name, Zac.password)
self.assertEqual(ret[0], 0, "mvs_rpc.new_address failed!")
self.assertLess(duration, max_duration)
durations.append(duration)
self.assertLess(sum(durations), avg_duration*round)
current_payload_size = database.get_payload_size(account_table_file)
# each simple account record size < 300
self.assertGreater(300 * round, current_payload_size - origin_payload_size,
"each account record size shall be less than 300.")
finally:
Zac.delete() | mvs-live/metaverse | test/test-rpc-v3/TestCase/Account/batch_account.py | Python | agpl-3.0 | 2,352 | 0.006378 |
from django.core.exceptions import PermissionDenied
from core.models import Author, Editor
def copy_author_to_submission(user, book):
author = Author(
first_name=user.first_name,
middle_name=user.profile.middle_name,
last_name=user.last_name,
salutation=user.profile.salutation,
institution=user.profile.institution,
department=user.profile.department,
country=user.profile.country,
author_email=user.email,
biography=user.profile.biography,
orcid=user.profile.orcid,
twitter=user.profile.twitter,
linkedin=user.profile.linkedin,
facebook=user.profile.facebook,
)
author.save()
book.author.add(author)
return author
def copy_editor_to_submission(user, book):
editor = Editor(
first_name=user.first_name,
middle_name=user.profile.middle_name,
last_name=user.last_name,
salutation=user.profile.salutation,
institution=user.profile.institution,
department=user.profile.department,
country=user.profile.country,
author_email=user.email,
biography=user.profile.biography,
orcid=user.profile.orcid,
twitter=user.profile.twitter,
linkedin=user.profile.linkedin,
facebook=user.profile.facebook,
)
editor.save()
book.editor.add(editor)
return editor
def check_stage(book, check):
if book.submission_stage >= check:
pass
elif book.submission_date:
raise PermissionDenied()
else:
raise PermissionDenied()
def handle_book_labels(post, book, kind):
for _file in book.files.all():
if _file.kind == kind and post.get("%s" % _file.id, None):
_file.label = post.get("%s" % _file.id)
_file.save()
def handle_copyedit_author_labels(post, copyedit, kind):
for _file in copyedit.author_files.all():
if _file.kind == kind and post.get("%s" % _file.id, None):
_file.label = post.get("%s" % _file.id)
_file.save()
| ubiquitypress/rua | src/submission/logic.py | Python | gpl-2.0 | 2,061 | 0 |
#!/usr/bin/python
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
try:
from include import *
except:
pass
from experiment import GenericExperiment
if __name__ == "__main__":
experiment = GenericExperiment()
experiment.run() | hubert667/AIR | build/scripts-2.7/learning-experiment.py | Python | gpl-3.0 | 860 | 0.002326 |
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import os
import tempfile
import pywps.configuration as config
from pywps import Process, WPSRequest
from pywps.response.execute import ExecuteResponse
import json
import logging
LOGGER = logging.getLogger("PYWPS")
class Job(object):
"""
:class:`Job` represents a processing job.
"""
def __init__(self, process, wps_request, wps_response):
self.process = process
self.method = '_run_process'
self.wps_request = wps_request
self.wps_response = wps_response
@property
def name(self):
return self.process.identifier
@property
def workdir(self):
return self.process.workdir
@property
def uuid(self):
return self.process.uuid
@property
def json(self):
"""Return JSON encoded representation of the request
"""
obj = {
'process': self.process.json,
'wps_request': self.wps_request.json,
}
return json.dumps(obj, allow_nan=False)
@classmethod
def from_json(cls, value):
"""init this request from json back again
:param value: the json (not string) representation
"""
process = Process.from_json(value['process'])
wps_request = WPSRequest()
wps_request.json = json.loads(value['wps_request'])
wps_response = ExecuteResponse(
wps_request=wps_request,
uuid=process.uuid,
process=process)
wps_response.store_status_file = True
new_job = Job(
process=Process.from_json(value['process']),
wps_request=wps_request,
wps_response=wps_response)
return new_job
def dump(self):
LOGGER.debug('dump job ...')
filename = tempfile.mkstemp(prefix='job_', suffix='.dump', dir=self.workdir)[1]
with open(filename, 'w') as fp:
fp.write(self.json)
LOGGER.debug("dumped job status to {}".format(filename))
return filename
return None
@classmethod
def load(cls, filename):
LOGGER.debug('load job ...')
with open(filename, 'r') as fp:
job = Job.from_json(json.load(fp))
return job
return None
def run(self):
getattr(self.process, self.method)(self.wps_request, self.wps_response)
class JobLauncher(object):
"""
:class:`JobLauncher` is a command line tool to launch a job from a file
with a dumped job state.
Example call: ``joblauncher -c /etc/pywps.cfg job-1001.dump``
"""
def create_parser(self):
import argparse
parser = argparse.ArgumentParser(prog="joblauncher")
parser.add_argument("-c", "--config", help="Path to pywps configuration.")
parser.add_argument("filename", help="File with dumped pywps job object.")
return parser
def run(self, args):
if args.config:
LOGGER.debug("using pywps_cfg={}".format(args.config))
os.environ['PYWPS_CFG'] = args.config
self._run_job(args.filename)
def _run_job(self, filename):
job = Job.load(filename)
# init config
if 'PYWPS_CFG' in os.environ:
config.load_configuration(os.environ['PYWPS_CFG'])
# update PATH
os.environ['PATH'] = "{0}:{1}".format(
config.get_config_value('processing', 'path'),
os.environ.get('PATH'))
# cd into workdir
os.chdir(job.workdir)
# init logger ... code copied from app.Service
if config.get_config_value('logging', 'file') and config.get_config_value('logging', 'level'):
LOGGER.setLevel(getattr(logging, config.get_config_value('logging', 'level')))
if not LOGGER.handlers: # hasHandlers in Python 3.x
fh = logging.FileHandler(config.get_config_value('logging', 'file'))
fh.setFormatter(logging.Formatter(config.get_config_value('logging', 'format')))
LOGGER.addHandler(fh)
else: # NullHandler
if not LOGGER.handlers:
LOGGER.addHandler(logging.NullHandler())
job.run()
def launcher():
"""
Run job launcher command line.
"""
job_launcher = JobLauncher()
parser = job_launcher.create_parser()
args = parser.parse_args()
job_launcher.run(args)
| geopython/pywps | pywps/processing/job.py | Python | mit | 4,609 | 0.001519 |
'''Galoshes
'''
from distutils.core import setup
from setuptools import find_packages
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Natural Language :: English',
]
with open('README.md') as fp:
LONG_DESCRIPTION = ''.join(fp.readlines())
setup(
name = 'galoshes',
version = '0.2.3',
packages = find_packages(),
install_requires = ['numpy>=1.7',
'future',
],
author = 'Brendan Smithyman',
author_email = 'brendan@bitsmithy.net',
description = 'galoshes',
long_description = LONG_DESCRIPTION,
license = 'MIT',
keywords = 'dictionary class attribute',
url = 'https://github.com/bsmithyman/galoshes',
download_url = 'https://github.com/bsmithyman/galoshes',
classifiers = CLASSIFIERS,
platforms = ['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
use_2to3 = False,
)
| bsmithyman/galoshes | setup.py | Python | mit | 1,497 | 0.032732 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from nose_parameterized import parameterized, param
from dateparser.languages import default_language_loader, Language
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from tests import BaseTestCase
class TestBundledLanguages(BaseTestCase):
def setUp(self):
super(TestBundledLanguages, self).setUp()
self.language = NotImplemented
self.datetime_string = NotImplemented
self.translation = NotImplemented
self.tokens = NotImplemented
self.result = NotImplemented
@parameterized.expand([
param('en', "Sep 03 2014", "september 03 2014"),
param('en', "friday, 03 september 2014", "friday 03 september 2014"),
# Chinese
param('cn', "1年11个月", "1 year 11 month"),
# French
param('fr', "20 Février 2012", "20 february 2012"),
param('fr', "Mercredi 19 Novembre 2013", "wednesday 19 november 2013"),
param('fr', "18 octobre 2012 à 19 h 21 min", "18 october 2012 19:21"),
# German
param('de', "29. Juni 2007", "29. june 2007"),
param('de', "Montag 5 Januar, 2015", "monday 5 january 2015"),
# Spanish
param('es', "Miércoles 31 Diciembre 2014", "wednesday 31 december 2014"),
# Italian
param('it', "Giovedi Maggio 29 2013", "thursday may 29 2013"),
param('it', "19 Luglio 2013", "19 july 2013"),
# Portuguese
param('pt', "22 de dezembro de 2014 às 02:38", "22 december 2014 02:38"),
# Russian
param('ru', "5 августа 2014 г. в 12:00", "5 august 2014 year 12:00"),
# Turkish
param('tr', "2 Ocak 2015 Cuma, 16:49", "2 january 2015 friday 16:49"),
# Czech
param('cz', "22. prosinec 2014 v 2:38", "22. december 2014 2:38"),
# Dutch
param('nl', "maandag 22 december 2014 om 2:38", "monday 22 december 2014 2:38"),
# Romanian
param('ro', "22 Decembrie 2014 la 02:38", "22 december 2014 02:38"),
# Polish
param('pl', "4 stycznia o 13:50", "4 january 13:50"),
param('pl', "29 listopada 2014 o 08:40", "29 november 2014 08:40"),
# Ukrainian
param('uk', "30 листопада 2013 о 04:27", "30 november 2013 04:27"),
# Belarusian
param('by', "5 снежня 2015 г. у 12:00", "5 december 2015 year 12:00"),
param('by', "11 верасня 2015 г. у 12:11", "11 september 2015 year 12:11"),
param('by', "3 стд 2015 г. у 10:33", "3 january 2015 year 10:33"),
# Arabic
param('ar', "6 يناير، 2015، الساعة 05:16 مساءً", "6 january 2015 05:16 pm"),
param('ar', "7 يناير، 2015، الساعة 11:00 صباحاً", "7 january 2015 11:00 am"),
# Vietnamese
param('vi', "Thứ Năm, ngày 8 tháng 1 năm 2015", "thursday 8 january 2015"),
param('vi', "Thứ Tư, 07/01/2015 | 22:34", "wednesday 07/01/2015 22:34"),
param('vi', "9 Tháng 1 2015 lúc 15:08", "9 january 2015 15:08"),
# Thai
param('th', "เมื่อ กุมภาพันธ์ 09, 2015, 09:27:57 AM", "february 09 2015 09:27:57 am"),
param('th', "เมื่อ กรกฎาคม 05, 2012, 01:18:06 AM", "july 05 2012 01:18:06 am"),
# Filipino
param('ph', "Biyernes Hulyo 3, 2015", "friday july 3 2015"),
param('ph', "Pebrero 5, 2015 7:00 pm", "february 5 2015 7:00 pm"),
# Miscellaneous
param('en', "2014-12-12T12:33:39-08:00", "2014-12-12 12:33:39-08:00"),
param('en', "2014-10-15T16:12:20+00:00", "2014-10-15 16:12:20+00:00"),
param('en', "28 Oct 2014 16:39:01 +0000", "28 october 2014 16:39:01 +0000"),
param('es', "13 Febrero 2015 a las 23:00", "13 february 2015 23:00")
])
def test_translation(self, shortname, datetime_string, expected_translation):
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string_translated()
self.then_string_translated_to(expected_translation)
@parameterized.expand([
# English
param('en', "yesterday", "1 day"),
param('en', "today", "0 day"),
param('en', "day before yesterday", "2 day"),
param('en', "last month", "1 month"),
param('en', "less than a minute ago", "45 second"),
# German
param('de', "vorgestern", "2 day"),
param('de', "heute", "0 day"),
param('de', "vor 3 Stunden", "ago 3 hour"),
# French
param('fr', "avant-hier", "2 day"),
param('fr', "hier", "1 day"),
param('fr', "aujourd'hui", "0 day"),
# Spanish
param('es', "anteayer", "2 day"),
param('es', "ayer", "1 day"),
param('es', "ayer a las", "1 day "),
param('es', "hoy", "0 day"),
param('es', "hace un horas", "ago 1 hour"),
param('es', "2 semanas", "2 week"),
param('es', "2 año", "2 year"),
# Italian
param('it', "altro ieri", "2 day"),
param('it', "ieri", "1 day"),
param('it', "oggi", "0 day"),
param('it', "2 settimana fa", "2 week ago"),
param('it', "2 anno fa", "2 year ago"),
# Portuguese
param('pt', "anteontem", "2 day"),
param('pt', "ontem", "1 day"),
param('pt', "hoje", "0 day"),
param('pt', "56 minutos", "56 minute"),
param('pt', "12 dias", "12 day"),
param('pt', "há 14 min.", "ago 14 minute."),
# Russian
param('ru', "9 месяцев", "9 month"),
param('ru', "8 недели", "8 week"),
param('ru', "7 года", "7 year"),
param('ru', "вчера", "1 day"),
param('ru', "сегодня", "0 day"),
param('ru', "несколько секунд", "44 second"),
# Turkish
param('tr', "dün", "1 day"),
param('tr', "22 dakika", "22 minute"),
param('tr', "12 hafta", "12 week"),
param('tr', "13 yıl", "13 year"),
# Czech
param('cz', "40 sekunda", "40 second"),
param('cz', "4 týden", "4 week"),
param('cz', "14 roků", "14 year"),
# Chinese
param('cn', "昨天", "1 day"),
param('cn', "前天", "2 day"),
param('cn', "50 秒", "50 second"),
param('cn', "7 周", "7 week"),
param('cn', "12 年", "12 year"),
# Dutch
param('nl', "17 uur geleden", "17 hour ago"),
param('nl', "27 jaar geleden", "27 year ago"),
param('nl', "45 minuten", "45 minute"),
# Romanian
param('ro', "23 săptămâni în urmă", "23 week ago"),
param('ro', "23 săptămâni", "23 week"),
param('ro', "13 oră", "13 hour"),
# Arabic
param('ar', "يومين", "2 day"),
param('ar', "أمس", "1 day"),
param('ar', "4 عام", "4 year"),
param('ar', "منذ 2 ساعات", "ago 2 hour"),
param('ar', "منذ ساعتين", "ago 2 hour"),
# Polish
param('pl', "2 godz.", "2 hour"),
param('pl', "Wczoraj o 07:40", "1 day 07:40"),
param('pl', "Poniedziałek 8:10 pm", "monday 8:10 pm"),
# Vietnamese
param('vi', "2 tuần 3 ngày", "2 week 3 day"),
param('vi', "21 giờ trước", "21 hour ago"),
param('vi', "Hôm qua 08:16", "1 day 08:16"),
param('vi', "Hôm nay 15:39", "0 day 15:39"),
#French
param('fr', u"Il y a moins d'une minute", "ago 1 minute"),
param('fr', u"Il y a moins de 30s", "ago 30 s"),
#Filipino
param('ph', "kahapon", "1 day"),
param('ph', "ngayon", "0 second"),
# Belarusian
param('by', "9 месяцаў", "9 month"),
param('by', "8 тыдняў", "8 week"),
param('by', "1 тыдзень", "1 week"),
param('by', "2 года", "2 year"),
param('by', "3 гады", "3 year"),
param('by', "11 секунд", "11 second"),
param('by', "учора", "1 day"),
param('by', "пазаўчора", "2 day"),
param('by', "сёння", "0 day"),
param('by', "некалькі хвілін", "2 minute"),
])
def test_freshness_translation(self, shortname, datetime_string, expected_translation):
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string_translated()
self.then_string_translated_to(expected_translation)
@parameterized.expand([
param('pt', "sexta-feira, 10 de junho de 2014 14:52",
["sexta-feira", " ", "10", " ", "de", " ", "junho", " ", "de", " ", "2014", " ", "14", ":", "52"]),
param('it', "14_luglio_15", ["14", "luglio", "15"]),
param('cn', "1年11个月", ["1", "年", "11", "个月"]),
param('tr', "2 saat önce", ["2", " ", "saat", " ", "önce"]),
param('fr', "il ya environ 23 heures'", ["il ya", " ", "environ", " ", "23", " ", "heures"]),
param('de', "Gestern um 04:41", ['Gestern ', 'um', ' ', '04', ':', '41']),
param('de', "Donnerstag, 8. Januar 2015 um 07:17", ['Donnerstag', ' ', '8', '.', ' ', 'Januar', ' ', '2015', ' ', 'um', ' ', '07', ':', '17']),
param('ru', "8 января 2015 г. в 9:10", ['8', ' ', 'января', ' ', '2015', ' ', 'г.', ' ', 'в', ' ', '9', ':', '10']),
param('cz', "6. leden 2015 v 22:29", ['6', '.', ' ', 'leden', ' ', '2015', ' ', 'v', ' ', '22', ':', '29']),
param('nl', "woensdag 7 januari 2015 om 21:32", ['woensdag', ' ', '7', ' ', 'januari', ' ', '2015', ' ', 'om', ' ', '21', ':', '32']),
param('ro', "8 Ianuarie 2015 la 13:33", ['8', ' ', 'Ianuarie', ' ', '2015', ' ', 'la', ' ', '13', ':', '33']),
param('ar', "8 يناير، 2015، الساعة 10:01 صباحاً", ['8', ' ', 'يناير', ' ', '2015', 'الساعة', ' ', '10', ':', '01', ' صباحاً']),
param('th', "8 มกราคม 2015 เวลา 12:22 น.", ['8', ' ', 'มกราคม', ' ', '2015', ' ', 'เวลา', ' ', '12', ':', '22', ' ', 'น.']),
param('pl', "8 stycznia 2015 o 10:19", ['8', ' ', 'stycznia', ' ', '2015', ' ', 'o', ' ', '10', ':', '19']),
param('vi', "Thứ Năm, ngày 8 tháng 1 năm 2015", ["Thứ Năm", " ", "ngày", " ", "8", " tháng ", "1", " ", "năm", " ", "2015"]),
param('ph', "Biyernes Hulyo 3 2015", ["Biyernes", " ", "Hulyo", " ", "3", " ", "2015"]),
param('by', "3 верасня 2015 г. у 11:10", ['3', ' ', 'верасня', ' ', '2015', ' ', 'г.', ' ', 'у', ' ', '11', ':', '10']),
])
def test_split(self, shortname, datetime_string, expected_tokens):
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string_splitted()
self.then_tokens_are(expected_tokens)
@parameterized.expand([
param('en', "17th October, 2034 @ 01:08 am PDT", strip_timezone=True),
param('en', "#@Sept#04#2014", strip_timezone=False),
param('en', "2014-12-13T00:11:00Z", strip_timezone=False),
param('de', "Donnerstag, 8. Januar 2015 um 07:17", strip_timezone=False),
param('ru', "8 января 2015 г. в 9:10", strip_timezone=False),
param('cz', "Pondělí v 22:29", strip_timezone=False),
param('nl', "woensdag 7 januari om 21:32", strip_timezone=False),
param('ro', "8 Ianuarie 2015 la 13:33", strip_timezone=False),
param('ar', "ساعتين", strip_timezone=False),
param('tr', "3 hafta", strip_timezone=False),
param('th', "17 เดือนมิถุนายน", strip_timezone=False),
param('pl', "przedwczoraj", strip_timezone=False),
param('fa', "ژانویه 8, 2015، ساعت 15:46", strip_timezone=False),
param('vi', "2 tuần 3 ngày", strip_timezone=False),
param('ph', "Hulyo 3, 2015 7:00 pm", strip_timezone=False),
param('by', "3 верасня 2015 г. у 11:10", strip_timezone=False),
])
def test_applicable_languages(self, shortname, datetime_string, strip_timezone):
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string_checked_if_applicable(strip_timezone)
self.then_language_is_applicable()
@parameterized.expand([
param('ru', "08.haziran.2014, 11:07", strip_timezone=False),
param('ar', "6 دقیقه", strip_timezone=False),
param('fa', "ساعتين", strip_timezone=False),
param('cz', "3 hafta", strip_timezone=False),
])
def test_not_applicable_languages(self, shortname, datetime_string, strip_timezone):
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string_checked_if_applicable(strip_timezone)
self.then_language_is_not_applicable()
def given_string(self, datetime_string):
self.datetime_string = datetime_string
def given_bundled_language(self, shorname):
self.language = default_language_loader.get_language(shorname)
def when_datetime_string_translated(self):
self.translation = self.language.translate(self.datetime_string)
def when_datetime_string_splitted(self, keep_formatting=False):
self.tokens = self.language._split(self.datetime_string, keep_formatting)
def when_datetime_string_checked_if_applicable(self, strip_timezone):
self.result = self.language.is_applicable(self.datetime_string, strip_timezone)
def then_string_translated_to(self, expected_string):
self.assertEqual(expected_string, self.translation)
def then_tokens_are(self, expected_tokens):
self.assertEqual(expected_tokens, self.tokens)
def then_language_is_applicable(self):
self.assertTrue(self.result)
def then_language_is_not_applicable(self):
self.assertFalse(self.result)
class BaseLanguageDetectorTestCase(BaseTestCase):
__test__ = False
NOT_DETECTED = object()
def setUp(self):
super(BaseLanguageDetectorTestCase, self).setUp()
self.datetime_string = NotImplemented
self.detector = NotImplemented
self.detected_language = NotImplemented
self.known_languages = None
@parameterized.expand([
param("1 january 2015", 'en'),
])
def test_valid_dates_detected(self, datetime_string, expected_language):
self.given_languages(expected_language)
self.given_detector()
self.given_string(datetime_string)
self.when_searching_for_first_applicable_language()
self.then_language_was_detected(expected_language)
@parameterized.expand([
param("foo"),
])
def test_invalid_dates_not_detected(self, datetime_string):
self.given_languages('en')
self.given_detector()
self.given_string(datetime_string)
self.when_searching_for_first_applicable_language()
self.then_no_language_was_detected()
def test_invalid_date_after_valid_date_not_detected(self):
self.given_languages('en')
self.given_detector()
self.given_previosly_detected_string("1 january 2015")
self.given_string("foo")
self.when_searching_for_first_applicable_language()
self.then_no_language_was_detected()
def test_valid_date_after_invalid_date_detected(self):
self.given_languages('en')
self.given_detector()
self.given_previosly_detected_string("foo")
self.given_string("1 january 2015")
self.when_searching_for_first_applicable_language()
self.then_language_was_detected('en')
def given_languages(self, *shortnames):
self.known_languages = [default_language_loader.get_language(shortname)
for shortname in shortnames]
def given_previosly_detected_string(self, datetime_string):
for _ in self.detector.iterate_applicable_languages(datetime_string, modify=True):
break
def given_string(self, datetime_string):
self.datetime_string = datetime_string
def given_detector(self):
raise NotImplementedError
def when_searching_for_first_applicable_language(self):
for language in self.detector.iterate_applicable_languages(self.datetime_string, modify=True):
self.detected_language = language
break
else:
self.detected_language = self.NOT_DETECTED
def then_language_was_detected(self, shortname):
self.assertIsInstance(self.detected_language, Language, "Language was not properly detected")
self.assertEqual(shortname, self.detected_language.shortname)
def then_no_language_was_detected(self):
self.assertIs(self.detected_language, self.NOT_DETECTED)
class TestExactLanguages(BaseLanguageDetectorTestCase):
__test__ = True
@parameterized.expand([
param("01-01-12", ['en', 'fr']),
param("01-01-12", ['tr', 'ar']),
param("01-01-12", ['ru', 'fr', 'en', 'pl']),
param("01-01-12", ['en']),
])
def test_exact_languages(self, datetime_string, shortnames):
self.given_string(datetime_string)
self.given_known_languages(shortnames)
self.given_detector()
self.when_using_exact_languages()
self.then_exact_languages_were_filtered(shortnames)
def given_known_languages(self, shortnames):
self.known_languages = [default_language_loader.get_language(shortname)
for shortname in shortnames]
def given_detector(self):
self.assertIsInstance(self.known_languages, list, "Require a list of languages to initialize")
self.assertGreaterEqual(len(self.known_languages), 1, "Could only be initialized with one or more languages")
self.detector = ExactLanguages(languages=self.known_languages)
def when_using_exact_languages(self):
self.exact_languages = self.detector.iterate_applicable_languages(self.datetime_string, modify=True)
def then_exact_languages_were_filtered(self, shortnames):
self.assertEqual(set(shortnames), set([lang.shortname for lang in self.exact_languages]))
class BaseAutoDetectLanguageDetectorTestCase(BaseLanguageDetectorTestCase):
allow_redetection = NotImplemented
def given_detector(self):
self.detector = AutoDetectLanguage(languages=self.known_languages, allow_redetection=self.allow_redetection)
class TestAutoDetectLanguageDetectorWithoutRedetection(BaseAutoDetectLanguageDetectorTestCase):
__test__ = True
allow_redetection = False
class TestAutoDetectLanguageDetectorWithRedetection(BaseAutoDetectLanguageDetectorTestCase):
__test__ = True
allow_redetection = True
| seagatesoft/dateparser | tests/test_languages.py | Python | bsd-3-clause | 18,881 | 0.0024 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
from danlog import DanLog
from ddp import *
import os
import pickle
import sys
import xmlrpclib
from xml.dom import minidom
###########
# Globals #
###########
client_callsign = ""
log = DanLog("XRProxyServer")
#############
# Constants #
#############
ALLOW_UNSIGNED_PACKETS = False
BACKEND_DATAMODE = "PSK500R"
BACKEND_HOSTNAME = "localhost"
BACKEND_PORT = 7362
DEBUG_MODE = False
DISABLE_CRYPTO = False
SPECIFICATION = 0
USE_TCP = 0
XMLRPC_SERVER = "http://127.0.0.1:7397/xmlrpc/"
XML_SETTINGS_FILE = "xrproxyserver-settings.xml"
###############
# Subroutines #
###############
def cBool(value):
if str(value).lower() == "false" or str(value) == "0":
return False
elif str(value).lower() == "true" or str(value) == "1":
return True
else:
return False
def exitProgram():
sys.exit(0)
def main():
global client_callsign
log.info("""
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
""")
log.info("")
log.info("XMLRPC Proxy - Server")
log.info("=====================")
log.info("Checking settings...")
if os.path.exists(XML_SETTINGS_FILE) == False:
log.warn("The XML settings file doesn't exist, create one...")
xmlXRPSettingsWrite()
log.info("The XML settings file has been created using the default settings. Please edit it and restart the XMLRPC proxy server once you're happy with the settings.")
exitProgram()
else:
log.info("Reading XML settings...")
xmlXRPSettingsRead()
# This will ensure it will have any new settings in
if os.path.exists(XML_SETTINGS_FILE + ".bak"):
os.unlink(XML_SETTINGS_FILE + ".bak")
os.rename(XML_SETTINGS_FILE, XML_SETTINGS_FILE + ".bak")
xmlXRPSettingsWrite()
log.info("Setting up DDP...")
ddp = DDP(hostname = BACKEND_HOSTNAME, port = BACKEND_PORT, data_mode = BACKEND_DATAMODE, timeout = 60., ack_timeout = 30., tx_hangtime = 1.25, data_length = 1024, specification = SPECIFICATION, disable_ec = False, disable_crypto = DISABLE_CRYPTO, allow_unsigned_packets = ALLOW_UNSIGNED_PACKETS, application = "DDP Example: XMLRPC Proxy", ignore_broadcast_packets = True, debug_mode = DEBUG_MODE)
log.info("")
while client_callsign == "":
print "Please enter your callsign: ",
client_callsign = readInput().strip().upper()
log.info("")
ddp.setCallsign(client_callsign)
log.info("Waiting for a packet...")
while True:
try:
data = ddp.receiveDataFromAny("XMLRPC")
if data is not None:
# Check the flags
d = data[0]
packet = data[1]
# Send the query off to the XMLRPC server
log.info("A XMLRPC packet has arrived, forwarding it on...")
call = pickle.loads(d)
s = xmlrpclib.ServerProxy(XMLRPC_SERVER)
t = getattr(s, call[0])
args = call[1]
tosend = pickle.dumps(t(*args), protocol = 2)
s = None
# Send the results back to the client
log.info("Transmitting the results back to the client...")
ddp.transmitData("XMLRPC", "", packet[ddp.SECTION_SOURCE], tosend, USE_TCP, 1)
except KeyboardInterrupt:
break
except Exception, ex:
log.fatal(ex)
log.info("Cleaning up...")
ddp.dispose()
ddp = None
log.info("Exiting...")
exitProgram()
def readInput():
return sys.stdin.readline().replace("\r", "").replace("\n", "")
def xmlXRPSettingsRead():
global ALLOW_UNSIGNED_PACKETS, BACKEND_DATAMODE, BACKEND_HOSTNAME, BACKEND_PORT, DEBUG_MODE, DISABLE_CRYPTO, SPECIFICATION, USE_TCP, XMLRPC_SERVER
if os.path.exists(XML_SETTINGS_FILE):
xmldoc = minidom.parse(XML_SETTINGS_FILE)
myvars = xmldoc.getElementsByTagName("Setting")
for var in myvars:
for key in var.attributes.keys():
val = str(var.attributes[key].value)
# Now put the correct values to correct key
if key == "BackendDataMode":
BACKEND_DATAMODE = val.upper()
elif key == "BackendHostname":
BACKEND_HOSTNAME = val
elif key == "BackendPort":
BACKEND_PORT = val.upper()
elif key == "Specification":
SPECIFICATION = int(val)
elif key == "UseTCP":
USE_TCP = int(val)
elif key == "AllowUnsignedPackets":
ALLOW_UNSIGNED_PACKETS = cBool(val)
elif key == "DisableCrypto":
DISABLE_CRYPTO = cBool(val)
elif key == "DebugMode":
DEBUG_MODE = cBool(val)
elif key == "XMLRPCServer":
XMLRPC_SERVER = val
else:
log.warn("XML setting attribute \"%s\" isn't known. Ignoring..." % key)
def xmlXRPSettingsWrite():
if os.path.exists(XML_SETTINGS_FILE) == False:
xmloutput = file(XML_SETTINGS_FILE, "w")
xmldoc = minidom.Document()
# Create header
settings = xmldoc.createElement("XRPServer")
xmldoc.appendChild(settings)
# Write each of the details one at a time, makes it easier for someone to alter the file using a text editor
var = xmldoc.createElement("Setting")
var.setAttribute("BackendDataMode", str(BACKEND_DATAMODE))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("BackendHostname", str(BACKEND_HOSTNAME))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("BackendPort", str(BACKEND_PORT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("Specification", str(SPECIFICATION))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("UseTCP", str(USE_TCP))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("AllowUnsignedPackets", str(ALLOW_UNSIGNED_PACKETS))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("DisableCrypto", str(DISABLE_CRYPTO))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("DebugMode", str(DEBUG_MODE))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("XMLRPCServer", str(XMLRPC_SERVER))
settings.appendChild(var)
# Finally, save to the file
xmloutput.write(xmldoc.toprettyxml())
xmloutput.close()
##########################
# Main
##########################
if __name__ == "__main__":
main()
| haxwithaxe/ddp | examples/xrproxy_server.py | Python | bsd-3-clause | 11,364 | 0.027455 |
"""Agent foundation for conversation integration."""
from abc import ABC, abstractmethod
from typing import Optional
from homeassistant.helpers import intent
class AbstractConversationAgent(ABC):
"""Abstract conversation agent."""
@property
def attribution(self):
"""Return the attribution."""
return None
async def async_get_onboarding(self):
"""Get onboard data."""
return None
async def async_set_onboarding(self, shown):
"""Set onboard data."""
return True
@abstractmethod
async def async_process(
self, text: str, conversation_id: Optional[str] = None
) -> intent.IntentResponse:
"""Process a sentence."""
| qedi-r/home-assistant | homeassistant/components/conversation/agent.py | Python | apache-2.0 | 714 | 0 |
def main():
with open('file.txt'):
print(42) | smmribeiro/intellij-community | python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/withOneTarget_after.py | Python | apache-2.0 | 56 | 0.017857 |
my_name = 'Zed A. Shaw'
my_age = 35 # not a lie
my_height = 74 # Inches
my_weight = 180 # lbs
my_eyes = 'Blue'
my_teeth = 'White'
my_hair = 'Brown'
print "Let's talk about %s." % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy"
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usually %s depending on the coffee." % my_teeth
# this line is tricky, try to get it exactly right
print" If I add %d, %d and %d I get %d." % (my_age, my_height, my_weight, my_age + my_height + my_weight)
| rdthomson/set09103 | src/LPHW/ex5.py | Python | gpl-3.0 | 596 | 0.008389 |
# -*- coding: utf-8 -*-
# Copyright 2015-2017 Quartile Limted
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
class StockMove(models.Model):
_inherit = "stock.move"
pick_partner_id = fields.Many2one(
related='picking_id.partner_id',
store=True,
readonly=True,
string='Pick Partner'
)
picking_type_code = fields.Selection(
related='picking_type_id.code',
store=True,
readonly=True,
string='Picking Type Code'
)
quant_lot_id = fields.Many2one(
'stock.production.lot',
compute='_get_quant_info',
store=True,
readonly=True,
string='Case No.'
)
quant_owner_id = fields.Many2one(
'res.partner',
compute='_get_quant_info',
store=True,
readonly=True,
string='Owner'
)
so_id = fields.Many2one(
'sale.order',
compute='_get_vals',
store=True,
readonly=True,
string='SO'
)
po_id = fields.Many2one(
'purchase.order',
compute='_get_vals',
store=True,
readonly=True,
string='PO'
)
is_mto = fields.Boolean('Make to Order',
compute='_compute_mto',
store=True,
)
@api.multi
def name_get(self):
res = []
for line in self:
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
pick_rec = self.env['stock.picking'].search(
[('name','=',line.picking_id.origin)])
if pick_rec.picking_type_id.code == 'incoming':
name = line.picking_id.name + '/ ' + name
else:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
@api.multi
@api.depends('quant_ids', 'reserved_quant_ids', 'lot_id')
def _get_quant_info(self):
for m in self:
if m.quant_ids:
m.quant_lot_id = m.quant_ids[0].lot_id and \
m.quant_ids[0].lot_id.id
m.quant_owner_id = m.quant_ids[0].owner_id and \
m.quant_ids[0].owner_id.id
elif m.reserved_quant_ids:
m.quant_lot_id = m.reserved_quant_ids[0].lot_id and \
m.reserved_quant_ids[0].lot_id.id
m.quant_owner_id = m.reserved_quant_ids[0].owner_id and \
m.reserved_quant_ids[0].owner_id.id
else:
m.quant_lot_id = m.lot_id.id
# below part does not work since quant is generated after
# this step
# if m.lot_id.quant_ids:
# m.quant_owner_id = m.lot_id.quant_ids[-1].owner_id and \
# m.lot_id.quant_ids[-1].owner_id.owner_id.id
def _get_quant_info_init(self, cr, uid):
# update quant info when installing/upgrading
cr.execute("""
update stock_move m1
set quant_lot_id = lot, quant_owner_id = owner
from (select q.lot_id as lot, q.owner_id as owner, m2.id as id
from stock_quant q
join stock_move m2 on q.reservation_id = m2.id) as subq
where m1.id = subq.id
and quant_lot_id is null
""")
@api.multi
@api.depends('origin')
def _get_vals(self):
SO = self.env['sale.order']
PO = self.env['purchase.order']
for m in self:
m.so_id, m.po_id = 0, 0
if m.purchase_line_id:
m.po_id = m.purchase_line_id.order_id.id
elif m.procurement_id and m.procurement_id.sale_line_id:
m.so_id = m.procurement_id.sale_line_id.order_id.id
@api.one
@api.depends('procurement_id', 'purchase_line_id')
def _compute_mto(self):
if self.code == 'outgoing' and self.procurement_id and \
self.procurement_id.sale_line_id:
self.is_mto = self.procurement_id.sale_line_id.mto
elif self.code == 'incoming' and self.purchase_line_id:
self.is_mto = self.purchase_line_id.mto
# def init(self, cr):
# move_ids = self.search(cr, SUPERUSER_ID, [])
# for m in self.browse(cr, SUPERUSER_ID, move_ids):
# m.pick_partner_id = m.picking_id.partner_id and m.picking_id.partner_id.id
# if m.quant_ids:
# m.quant_lot_id = m.quant_ids[0].lot_id and m.quant_ids[0].lot_id.id
# m.quant_owner_id = m.quant_ids[0].owner_id and m.quant_ids[0].owner_id.id
@api.model
def _prepare_picking_assign(self, move):
res = super(StockMove, self)._prepare_picking_assign(move)
res['is_mto'] = move.is_mto
return res
def action_assign(self, cr, uid, ids, context=None):
# NEED TO OVERRIDE COMPLETE METHOD SINCE LOGIC WAS IN BETWEEN THE
# LINES. SEE #oscg TAG FOR CHANGES DONE ON THIS METHOD.
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
to_assign_moves = []
main_domain = {}
todo_moves = []
operations = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.append(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.append(move.id)
continue
else:
todo_moves.append(move)
#we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
# oscg add
# this is to prevent reserving quants that are taken by
# quotations for supplier return outgoing move
if move.location_dest_id.usage == 'supplier':
main_domain[move.id] += [('sale_id', '=', False)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
qty = record.qty
if qty:
# add a serial number field in SO line, which should be passed to delivery order
# to reserve a quant of the selected serial number
if record.move_id.quant_id: #oscg
quants = [(record.move_id.quant_id, record.move_id.quant_id.qty)] #oscg
else: #oscg
quants = quant_obj.quants_get_prefered_domain(cr,
uid, ops.location_id, move.product_id, qty,
domain=domain, prefered_domain_list=[],
restrict_lot_id=move.restrict_lot_id.id,
restrict_partner_id=move.restrict_partner_id.\
id, context=context) #oscg
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
for move in todo_moves:
if move.linked_move_operation_ids:
continue
# then if the move isn't totally assigned, try to find quants without any specific domain
if move.state != 'assigned':
qty_already_assigned = move.reserved_availability
qty = move.product_qty - qty_already_assigned
# add a serial number field in SO line, which should be passed to delivery order
# to reserve a quant of the selected serial number
if move.quant_id: #oscg
quants = [(move.quant_id, qty)] #oscg
else: #oscg
quants = quant_obj.quants_get_prefered_domain(cr, uid,
move.location_id, move.product_id, qty,
domain=main_domain[move.id], prefered_domain_list=[],
restrict_lot_id=move.restrict_lot_id.id,
restrict_partner_id=move.restrict_partner_id.id,
context=context) #oscg
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
#force assignation of consumable products and incoming from supplier/inventory/production
if to_assign_moves:
self.force_assign(cr, uid, to_assign_moves, context=context)
| rfhk/awo-custom | sale_line_quant_extended/models/stock_move.py | Python | lgpl-3.0 | 10,584 | 0.005574 |
from rpython.flowspace.model import Variable
from rpython.rtyper.lltypesystem import lltype
from rpython.translator.simplify import get_graph
from rpython.tool.uid import uid
class CreationPoint(object):
def __init__(self, creation_method, TYPE, op=None):
self.escapes = False
self.returns = False
self.creation_method = creation_method
if creation_method == "constant":
self.escapes = True
self.TYPE = TYPE
self.op = op
def __repr__(self):
return ("CreationPoint(<0x%x>, %r, %s, esc=%s)" %
(uid(self), self.TYPE, self.creation_method, self.escapes))
class VarState(object):
def __init__(self, *creps):
self.creation_points = set()
for crep in creps:
self.creation_points.add(crep)
def contains(self, other):
return other.creation_points.issubset(self.creation_points)
def merge(self, other):
creation_points = self.creation_points.union(other.creation_points)
return VarState(*creation_points)
def setescapes(self):
changed = []
for crep in self.creation_points:
if not crep.escapes:
changed.append(crep)
crep.escapes = True
return changed
def setreturns(self):
changed = []
for crep in self.creation_points:
if not crep.returns:
changed.append(crep)
crep.returns = True
return changed
def does_escape(self):
for crep in self.creation_points:
if crep.escapes:
return True
return False
def does_return(self):
for crep in self.creation_points:
if crep.returns:
return True
return False
def __repr__(self):
return "<VarState %s>" % (self.creation_points, )
class AbstractDataFlowInterpreter(object):
def __init__(self, translation_context):
self.translation_context = translation_context
self.scheduled = {} # block: graph containing it
self.varstates = {} # var-or-const: state
self.creationpoints = {} # var: creationpoint
self.constant_cps = {} # const: creationpoint
self.dependencies = {} # creationpoint: {block: graph containing it}
self.functionargs = {} # graph: list of state of args
self.flown_blocks = {} # block: True
def seen_graphs(self):
return self.functionargs.keys()
def getstate(self, var_or_const):
if not isonheap(var_or_const):
return None
if var_or_const in self.varstates:
return self.varstates[var_or_const]
if isinstance(var_or_const, Variable):
varstate = VarState()
else:
if var_or_const not in self.constant_cps:
crep = CreationPoint("constant", var_or_const.concretetype)
self.constant_cps[var_or_const] = crep
else:
crep = self.constant_cps[var_or_const]
varstate = VarState(crep)
self.varstates[var_or_const] = varstate
return varstate
def getstates(self, varorconstlist):
return [self.getstate(var) for var in varorconstlist]
def setstate(self, var, state):
self.varstates[var] = state
def get_creationpoint(self, var, method="?", op=None):
if var in self.creationpoints:
return self.creationpoints[var]
crep = CreationPoint(method, var.concretetype, op)
self.creationpoints[var] = crep
return crep
def schedule_function(self, graph):
startblock = graph.startblock
if graph in self.functionargs:
args = self.functionargs[graph]
else:
args = []
for var in startblock.inputargs:
if not isonheap(var):
varstate = None
else:
crep = self.get_creationpoint(var, "arg")
varstate = VarState(crep)
self.setstate(var, varstate)
args.append(varstate)
self.scheduled[startblock] = graph
self.functionargs[graph] = args
resultstate = self.getstate(graph.returnblock.inputargs[0])
return resultstate, args
def flow_block(self, block, graph):
self.flown_blocks[block] = True
if block is graph.returnblock:
if isonheap(block.inputargs[0]):
self.returns(self.getstate(block.inputargs[0]))
return
if block is graph.exceptblock:
if isonheap(block.inputargs[0]):
self.escapes(self.getstate(block.inputargs[0]))
if isonheap(block.inputargs[1]):
self.escapes(self.getstate(block.inputargs[1]))
return
self.curr_block = block
self.curr_graph = graph
for op in block.operations:
self.flow_operation(op)
for exit in block.exits:
args = self.getstates(exit.args)
targetargs = self.getstates(exit.target.inputargs)
# flow every block at least once
if (multicontains(targetargs, args) and
exit.target in self.flown_blocks):
continue
for prevstate, origstate, var in zip(args, targetargs,
exit.target.inputargs):
if not isonheap(var):
continue
newstate = prevstate.merge(origstate)
self.setstate(var, newstate)
self.scheduled[exit.target] = graph
def flow_operation(self, op):
args = self.getstates(op.args)
opimpl = getattr(self, 'op_' + op.opname, None)
if opimpl is not None:
res = opimpl(op, *args)
if res is not NotImplemented:
self.setstate(op.result, res)
return
if isonheap(op.result) or filter(None, args):
for arg in args:
if arg is not None:
self.escapes(arg)
def complete(self):
while self.scheduled:
block, graph = self.scheduled.popitem()
self.flow_block(block, graph)
def escapes(self, arg):
changed = arg.setescapes()
self.handle_changed(changed)
def returns(self, arg):
changed = arg.setreturns()
self.handle_changed(changed)
def handle_changed(self, changed):
for crep in changed:
if crep not in self.dependencies:
continue
self.scheduled.update(self.dependencies[crep])
def register_block_dependency(self, state, block=None, graph=None):
if block is None:
block = self.curr_block
graph = self.curr_graph
for crep in state.creation_points:
self.dependencies.setdefault(crep, {})[block] = graph
def register_state_dependency(self, state1, state2):
"state1 depends on state2: if state2 does escape/change, so does state1"
# change state1 according to how state2 is now
if state2.does_escape():
self.escapes(state1)
if state2.does_return():
self.returns(state1)
# register a dependency of the current block on state2:
# that means that if state2 changes the current block will be reflown
# triggering this function again and thus updating state1
self.register_block_dependency(state2)
# _____________________________________________________________________
# operation implementations
def op_malloc(self, op, typestate, flagsstate):
assert flagsstate is None
flags = op.args[1].value
if flags != {'flavor': 'gc'}:
return NotImplemented
return VarState(self.get_creationpoint(op.result, "malloc", op))
def op_malloc_varsize(self, op, typestate, flagsstate, lengthstate):
assert flagsstate is None
flags = op.args[1].value
if flags != {'flavor': 'gc'}:
return NotImplemented
return VarState(self.get_creationpoint(op.result, "malloc_varsize", op))
def op_cast_pointer(self, op, state):
return state
def op_setfield(self, op, objstate, fieldname, valuestate):
if valuestate is not None:
# be pessimistic for now:
# everything that gets stored into a structure escapes
self.escapes(valuestate)
return None
def op_setarrayitem(self, op, objstate, indexstate, valuestate):
if valuestate is not None:
# everything that gets stored into a structure escapes
self.escapes(valuestate)
return None
def op_getarrayitem(self, op, objstate, indexstate):
if isonheap(op.result):
return VarState(self.get_creationpoint(op.result, "getarrayitem", op))
def op_getfield(self, op, objstate, fieldname):
if isonheap(op.result):
# assume that getfield creates a new value
return VarState(self.get_creationpoint(op.result, "getfield", op))
def op_getarraysize(self, op, arraystate):
pass
def op_direct_call(self, op, function, *args):
graph = get_graph(op.args[0], self.translation_context)
if graph is None:
for arg in args:
if arg is None:
continue
# an external function can escape every parameter:
self.escapes(arg)
funcargs = [None] * len(args)
else:
result, funcargs = self.schedule_function(graph)
assert len(args) == len(funcargs)
for localarg, funcarg in zip(args, funcargs):
if localarg is None:
assert funcarg is None
continue
if funcarg is not None:
self.register_state_dependency(localarg, funcarg)
if isonheap(op.result):
# assume that a call creates a new value
return VarState(self.get_creationpoint(op.result, "direct_call", op))
def op_indirect_call(self, op, function, *args):
graphs = op.args[-1].value
args = args[:-1]
if graphs is None:
for localarg in args:
if localarg is None:
continue
self.escapes(localarg)
else:
for graph in graphs:
result, funcargs = self.schedule_function(graph)
assert len(args) == len(funcargs)
for localarg, funcarg in zip(args, funcargs):
if localarg is None:
assert funcarg is None
continue
self.register_state_dependency(localarg, funcarg)
if isonheap(op.result):
# assume that a call creates a new value
return VarState(self.get_creationpoint(op.result, "indirect_call", op))
def op_ptr_iszero(self, op, ptrstate):
return None
op_cast_ptr_to_int = op_keepalive = op_ptr_nonzero = op_ptr_iszero
def op_ptr_eq(self, op, ptr1state, ptr2state):
return None
op_ptr_ne = op_ptr_eq
def op_same_as(self, op, objstate):
return objstate
def isonheap(var_or_const):
return isinstance(var_or_const.concretetype, lltype.Ptr)
def multicontains(l1, l2):
assert len(l1) == len(l2)
for a, b in zip(l1, l2):
if a is None:
assert b is None
elif not a.contains(b):
return False
return True
def is_malloc_like(adi, graph, seen):
if graph in seen:
return seen[graph]
return_state = adi.getstate(graph.getreturnvar())
if return_state is None or len(return_state.creation_points) != 1:
seen[graph] = False
return False
crep, = return_state.creation_points
if crep.escapes:
seen[graph] = False
return False
if crep.creation_method in ["malloc", "malloc_varsize"]:
assert crep.returns
seen[graph] = True
return True
if crep.creation_method == "direct_call":
subgraph = get_graph(crep.op.args[0], adi.translation_context)
if subgraph is None:
seen[graph] = False
return False
res = is_malloc_like(adi, subgraph, seen)
seen[graph] = res
return res
seen[graph] = False
return False
def malloc_like_graphs(adi):
seen = {}
return [graph for graph in adi.seen_graphs()
if is_malloc_like(adi, graph, seen)]
| oblique-labs/pyVM | rpython/translator/backendopt/escape.py | Python | mit | 12,552 | 0.001514 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
from platform import python_version
from setuptools import setup, find_packages
major, minor, micro = python_version().split('.')
if major != '2' or minor not in ['4', '5', '6', '7']:
raise Exception('unsupported version of python')
requires = [
]
setup(
name='katello-agent',
version='0.1',
description='Katello Agent',
author='Jeff Ortel',
author_email='jortel@redhat.com',
url='',
license='GPLv2+',
packages=find_packages(),
scripts = [
],
include_package_data=False,
data_files=[],
classifiers=[
'License :: OSI Approved :: GNU General Puclic License (GPL)',
'Programming Language :: Python',
'Operating System :: POSIX',
'Topic :: Content Management and Delivery',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
],
install_requires=requires,
)
| stbenjam/katello-agent | src/setup.py | Python | gpl-2.0 | 1,603 | 0.001871 |
from django.conf.urls import patterns, include, url
from testapp.api import PersonResource
from django.contrib import admin
admin.autodiscover()
person_resource = PersonResource()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'testapp.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'^api/', include(person_resource.urls))
)
| satish-suradkar/pyresttest | pyresttest/testapp/testapp/urls.py | Python | apache-2.0 | 535 | 0 |
# vim:set tabstop=3 shiftwidth=3 expandtab:
# vim:set autoindent smarttab nowrap:
from django.conf.urls.defaults import *
import settings
urlpatterns = patterns('',
(r'^$', 'webreview.views.index'),
(r'^skip/(?P<skip>.*)$', 'webreview.views.changes'),
(r'^diff/(?P<change_id>.*)/html$', 'webreview.views.diffhtml'),
(r'^addmodule$', 'webreview.views.addmodule'),
(r'^login$', 'webreview.views.login'),
(r'^changes/all/skip/(?P<skip>\d+)$', 'webreview.views.index'),
(r'^changes/all$', 'webreview.views.index'),
(r'^changes/(?P<filter>.*)/(?P<filter_id>\d+)/skip/(?P<skip>\d*)$', 'webreview.views.changes'),
(r'^changes/(?P<filter>.*)/(?P<filter_id>\d+)$', 'webreview.views.changes'),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| oseemann/cvsreview | app/urls.py | Python | gpl-3.0 | 1,023 | 0.006843 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
RAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os, numbers
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.ProcessingLog import ProcessingLog
from processing.gui.Help2Html import Help2Html
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.ParameterString import ParameterString
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterTableField import ParameterTableField
from processing.parameters.ParameterExtent import ParameterExtent
from processing.parameters.ParameterFile import ParameterFile
from processing.outputs.OutputTable import OutputTable
from processing.outputs.OutputVector import OutputVector
from processing.outputs.OutputRaster import OutputRaster
from processing.outputs.OutputHTML import OutputHTML
from processing.outputs.OutputFile import OutputFile
from processing.outputs.OutputString import OutputString
from processing.outputs.OutputNumber import OutputNumber
from processing.tools.system import isWindows
from processing.script.WrongScriptException import WrongScriptException
from processing.r.RUtils import RUtils
class RAlgorithm(GeoAlgorithm):
R_CONSOLE_OUTPUT = 'R_CONSOLE_OUTPUT'
RPLOTS = 'RPLOTS'
KNITR_REPORT = 'KNITR_REPORT'
def getCopy(self):
newone = RAlgorithm(self.descriptionFile)
newone.provider = self.provider
return newone
def __init__(self, descriptionFile, script=None):
GeoAlgorithm.__init__(self)
self.script = script
self.descriptionFile = descriptionFile
if script is not None:
self.defineCharacteristicsFromScript()
if descriptionFile is not None:
self.defineCharacteristicsFromFile()
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../images/r.png')
def defineCharacteristicsFromScript(self):
lines = self.script.split('\n')
self.name = '[Unnamed algorithm]'
self.group = 'User R scripts'
self.parseDescription(iter(lines))
def defineCharacteristicsFromFile(self):
filename = os.path.basename(self.descriptionFile)
self.name = filename[:filename.rfind('.')].replace('_', ' ')
self.group = 'User R scripts'
with open(self.descriptionFile, 'r') as f:
lines = [line.strip() for line in f]
self.parseDescription(iter(lines))
def parseDescription(self, lines):
self.outputNumberName = ''
self.outputNumberBool = False
self.script = ''
self.commands = []
self.report = False
self.showPlots = False
self.showConsoleOutput = False
self.useRasterPackage = True
self.passFileNames = False
self.useShapefilesPackage = False
self.verboseCommands = []
ender = 0
line = lines.next().strip('\n').strip('\r')
while ender < 10:
if line.startswith('##'):
try:
self.processParameterLine(line)
except Exception:
raise WrongScriptException('Could not load R script:'
+ self.descriptionFile + '.\n Problem with line "'
+ line + '"')
elif line.startswith('>'):
self.commands.append(line[1:])
self.verboseCommands.append(line[1:])
if not self.showConsoleOutput:
self.addOutput(OutputHTML(RAlgorithm.R_CONSOLE_OUTPUT,
'R Console Output'))
self.showConsoleOutput = True
else:
if line == '':
ender += 1
else:
ender = 0
self.commands.append(line)
self.script += line + '\n'
try:
line = lines.next().strip('\n').strip('\r')
except:
break
def getVerboseCommands(self):
return self.verboseCommands
def createDescriptiveName(self, s):
return s.replace('_', ' ')
def processParameterLine(self, line):
param = None
out = None
line = line.replace('#', '')
if line.lower().strip().startswith('report'):
self.report = True
self.addOutput(OutputHTML(RAlgorithm.KNITR_REPORT, 'HTML Report'))
return
if line.lower().strip().startswith('showplots'):
self.showPlots = True
self.addOutput(OutputHTML(RAlgorithm.RPLOTS, 'R Plots'))
return
if line.lower().strip().startswith('dontuserasterpackage'):
self.useRasterPackage = False
return
if line.lower().strip().startswith('passfilenames'):
self.passFileNames = True
return
if line.lower().strip().startswith('shapefilespackage'):
self.useShapefilesPackage = True
return
tokens = line.split('=')
desc = self.createDescriptiveName(tokens[0])
if tokens[1].lower().strip() == 'group':
self.group = tokens[0]
return
if tokens[1].lower().strip().startswith('raster'):
param = ParameterRaster(tokens[0], desc, False)
elif tokens[1].lower().strip() == 'vector':
param = ParameterVector(tokens[0], desc,
[ParameterVector.VECTOR_TYPE_ANY])
elif tokens[1].lower().strip() == 'table':
param = ParameterTable(tokens[0], desc, False)
elif tokens[1].lower().strip().startswith('multiple raster'):
param = ParameterMultipleInput(tokens[0], desc,
ParameterMultipleInput.TYPE_RASTER)
param.optional = False
elif tokens[1].lower().strip() == 'multiple vector':
param = ParameterMultipleInput(tokens[0], desc,
ParameterMultipleInput.TYPE_VECTOR_ANY)
param.optional = False
elif tokens[1].lower().strip().startswith('selection'):
options = tokens[1].strip()[len('selection'):].split(';')
param = ParameterSelection(tokens[0], desc, options)
elif tokens[1].lower().strip().startswith('boolean'):
default = tokens[1].strip()[len('boolean') + 1:]
param = ParameterBoolean(tokens[0], desc, default)
elif tokens[1].lower().strip().startswith('number'):
try:
default = float(tokens[1].strip()[len('number') + 1:])
param = ParameterNumber(tokens[0], desc, default=default)
except:
raise WrongScriptException('Could not load R script:'
+ self.descriptionFile + '.\n Problem with line "'
+ line + '"')
elif tokens[1].lower().strip().startswith('field'):
field = tokens[1].strip()[len('field') + 1:]
found = False
for p in self.parameters:
if p.name == field:
found = True
break
if found:
param = ParameterTableField(tokens[0], desc, field)
elif tokens[1].lower().strip() == 'extent':
param = ParameterExtent(tokens[0], desc)
elif tokens[1].lower().strip() == 'file':
param = ParameterFile(tokens[0], desc, False)
elif tokens[1].lower().strip() == 'folder':
param = ParameterFile(tokens[0], desc, True)
elif tokens[1].lower().strip().startswith('string'):
default = tokens[1].strip()[len('string') + 1:]
param = ParameterString(tokens[0], desc, default)
elif tokens[1].lower().strip().startswith('output raster'):
out = OutputRaster()
elif tokens[1].lower().strip().startswith('output vector'):
out = OutputVector()
elif tokens[1].lower().strip().startswith('output table'):
out = OutputTable()
elif tokens[1].lower().strip().startswith('output file'):
out = OutputFile()
elif tokens[1].lower().strip().startswith('output string'):
out = OutputString()
elif tokens[1].lower().strip().startswith('output number'):
out = OutputNumber(tokens[0], desc)
self.outputNumberBool = True
self.outputNumberName = tokens[0]
if param is not None:
self.addParameter(param)
elif out is not None:
out.name = tokens[0]
out.description = tokens[0]
self.addOutput(out)
else:
raise WrongScriptException('Could not load R script:'
+ self.descriptionFile
+ '.\n Problem with line "' + line + '"'
)
def processAlgorithm(self, progress):
if isWindows():
path = RUtils.RFolder()
if path == '':
raise GeoAlgorithmExecutionException(
'R folder is not configured.\nPlease configure it \
before running R scripts.')
loglines = []
loglines.append('R execution commands')
loglines += self.getFullSetOfRCommands()
current = 0
total = 50.00 / len(loglines)
for line in loglines:
progress.setCommand(line)
current += 1
progress.setPercentage(int(current * total))
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
RUtils.executeRAlgorithm(self, progress)
if self.report:
knitrname = self.getOutputValue(RAlgorithm.KNITR_REPORT)
htmlreportname = open(self.htmlreport + '/reporthtml.html','r')
file = open(knitrname, 'w')
file.write(htmlreportname.read())
file.close()
htmlreportname.close()
if self.showPlots:
htmlfilename = self.getOutputValue(RAlgorithm.RPLOTS)
f = open(htmlfilename, 'w')
f.write('<html><img src="' + self.plotsFilename + '"/></html>')
f.close()
if self.showConsoleOutput:
htmlfilename = self.getOutputValue(RAlgorithm.R_CONSOLE_OUTPUT)
f = open(htmlfilename, 'w')
f.write(RUtils.getConsoleOutput())
f.close()
if self.outputNumberBool:
consoleOutput = RUtils.consoleResults
#self.setOutputValue(self.outputNumberName, consoleOutput)
for line in consoleOutput:
line = line.strip().strip(' ')
if line.startswith('<p>'):
token = line.split(' ')
token = token[1].split('<')
self.setOutputValue(self.outputNumberName, token[0])
def getFullSetOfRCommands(self):
commands = []
commands += self.getImportCommands()
commands += self.getRCommands()
commands += self.getExportCommands()
return commands
def getExportCommands(self):
commands = []
for out in self.outputs:
if isinstance(out, OutputRaster):
value = out.value
value = value.replace('\\', '/')
if self.useRasterPackage or self.passFileNames:
commands.append('writeRaster(' + out.name + ',"' + value
+ '", overwrite=TRUE)')
else:
if not value.endswith('tif'):
value = value + '.tif'
commands.append('writeGDAL(' + out.name + ',"' + value
+ '")')
if isinstance(out, OutputVector):
value = out.value
if not value.endswith('shp'):
value = value + '.shp'
value = value.replace('\\', '/')
filename = os.path.basename(value)
filename = filename[:-4]
value2 = value[:-4]
if self.useShapefilesPackage:
commands.append('write.shapefile(' + out.name + ',"' + value2 + '",arcgis=TRUE)')
#commands.append('x <- readOGR("' + value + '", layer="' + filename + '")') #read and write
#commands.append('writeOGR(x,"' + value + '","'
# + filename + '", driver="ESRI Shapefile")') #--> harus bikin file baru, ga bisa di overwrite
else:
commands.append('writeOGR(' + out.name + ',"' + value + '","'
+ filename + '", driver="ESRI Shapefile")')
if isinstance(out, OutputTable):
value = out.value
if not value.endswith('csv'):
value = value + '.csv'
value = value.replace('\\', '/') # /tmp/0123456789/file
commands.append('write.csv(' + out.name +',file="' + value + '")')
# if isinstance(out, OutputNumber):
# value = self.getOutputFromName(out.name) #gimana cara ngedapetin nilai dari R environment
# self.setOutputValue(out.name, value)
if self.showPlots:
commands.append('dev.off()')
return commands
def getImportCommands(self):
commands = []
# Just use main mirror
commands.append('options("repos"="http://cran.at.r-project.org/")')
# Try to install packages if needed
if isWindows():
commands.append('.libPaths(\"' + str(RUtils.RLibs()).replace('\\','/') + '\")')
packages = RUtils.getRequiredPackages(self.script)
packages.extend(['rgdal', 'raster'])
for p in packages:
commands.append('tryCatch(find.package("' + p
+ '"), error=function(e) install.packages("' + p
+ '", dependencies=TRUE))')
commands.append('library("raster")')
commands.append('library("rgdal")')
for param in self.parameters:
if isinstance(param, ParameterFile):
if param.isFolder:
value = param.value
self.htmlreport = value.replace('\\','/')
if isinstance(param, ParameterRaster):
value = param.value
value = value.replace('\\', '/')
if self.passFileNames:
commands.append(param.name + ' = "' + value + '"')
elif self.useRasterPackage:
commands.append(param.name + ' = ' + 'brick("' + value
+ '")')
else:
commands.append(param.name + ' = ' + 'readGDAL("' + value
+ '")')
if isinstance(param, ParameterVector):
value = param.getSafeExportedLayer()
value = value.replace('\\', '/')
filename = os.path.basename(value)
filename = filename[:-4]
folder = os.path.dirname(value)
if self.passFileNames:
commands.append(param.name + ' = "' + value + '"')
else:
commands.append(param.name + ' = readOGR("' + folder
+ '",layer="' + filename + '")')
if isinstance(param, ParameterTable):
value = param.value
if not value.lower().endswith('csv'):
raise GeoAlgorithmExecutionException(
'Unsupported input file format.\n' + value)
if self.passFileNames:
commands.append(param.name + ' = "' + value + '"')
else:
commands.append(param.name + ' <- read.csv("' + value
+ '", head=TRUE, sep=",")')
elif isinstance(param, (ParameterTableField, ParameterString,
ParameterFile)):
commands.append(param.name + '="' + param.value + '"')
elif isinstance(param, (ParameterNumber, ParameterSelection)):
commands.append(param.name + '=' + str(param.value))
elif isinstance(param, ParameterBoolean):
if param.value:
commands.append(param.name + '=TRUE')
else:
commands.append(param.name + '=FALSE')
elif isinstance(param, ParameterMultipleInput):
iLayer = 0
if param.datatype == ParameterMultipleInput.TYPE_RASTER:
layers = param.value.split(';')
for layer in layers:
layer = layer.replace('\\', '/')
if self.passFileNames:
commands.append('tempvar' + str(iLayer) + ' <- "'
+ layer + '"')
elif self.useRasterPackage:
commands.append('tempvar' + str(iLayer) + ' <- '
+ 'brick("' + layer + '")')
else:
commands.append('tempvar' + str(iLayer) + ' <- '
+ 'readGDAL("' + layer + '")')
iLayer += 1
else:
exported = param.getSafeExportedLayers()
layers = exported.split(';')
for layer in layers:
if not layer.lower().endswith('shp') \
and not self.passFileNames:
raise GeoAlgorithmExecutionException(
'Unsupported input file format.\n' + layer)
layer = layer.replace('\\', '/')
filename = os.path.basename(layer)
filename = filename[:-4]
if self.passFileNames:
commands.append('tempvar' + str(iLayer) + ' <- "'
+ layer + '"')
else:
commands.append('tempvar' + str(iLayer) + ' <- '
+ 'readOGR("' + layer + '",layer="'
+ filename + '")')
iLayer += 1
s = ''
s += param.name
s += ' = c('
iLayer = 0
for layer in layers:
if iLayer != 0:
s += ','
s += 'tempvar' + str(iLayer)
iLayer += 1
s += ')\n'
commands.append(s)
if self.showPlots:
htmlfilename = self.getOutputValue(RAlgorithm.RPLOTS)
self.plotsFilename = htmlfilename + '.png'
self.plotsFilename = self.plotsFilename.replace('\\', '/')
commands.append('png("' + self.plotsFilename + '")')
return commands
def getRCommands(self):
return self.commands
def helpFile(self):
helpfile = unicode(self.descriptionFile) + '.help'
if os.path.exists(helpfile):
h2h = Help2Html()
return True, h2h.getHtmlFile(self, helpfile)
else:
return False, None
def checkBeforeOpeningParametersDialog(self):
msg = RUtils.checkRIsInstalled()
if msg is not None:
html = '<p>This algorithm requires R to be run.Unfortunately, \
it seems that R is not installed in your system, or it \
is not correctly configured to be used from QGIS</p>'
html += '<p><a href= "http://docs.qgis.org/2.0/html/en/docs/user_manual/processing/3rdParty.html">Click here</a> to know more about how to install and configure R to be used with QGIS</p>'
return html
def getPostProcessingErrorMessage(self, wrongLayers):
html = GeoAlgorithm.getPostProcessingErrorMessage(self, wrongLayers)
msg = RUtils.checkRIsInstalled(True)
html += '<p>This algorithm requires R to be run. A test to check if \
R is correctly installed and configured in your system has \
been performed, with the following result:</p><ul><i>'
if msg is None:
html += 'R seems to be correctly installed and \
configured</i></li></ul>'
html += '<p>The script you have executed needs the following \
packages:</p><ul>'
packages = RUtils.getRequiredPackages(self.script)
for p in packages:
html += '<li>' + p + '</li>'
html += '</ul><p>Make sure they are installed in your R \
environment before trying to execute this script.</p>'
else:
html += msg + '</i></li></ul>'
html += '<p><a href= "http://docs.qgis.org/2.0/html/en/docs/user_manual/processing/3rdParty.html">Click here</a> to know more about how to install and configure R to be used with QGIS</p>'
return html
| adwiputra/LUMENS-repo | processing/r/RAlgorithm.py | Python | gpl-2.0 | 22,835 | 0.001927 |
class Solution(object):
def validWordSquare(self, words):
"""
:type words: List[str]
:rtype: bool
"""
if words is None or len(words) == 0:
return True
ls = len(words)
for i in range(ls):
for j in range(1, len(words[i])):
if j >= ls:
return False
if i >= len(words[j]):
return False
if words[i][j] != words[j][i]:
return False
return True
# def validWordSquare(self, words):
# # https://discuss.leetcode.com/topic/63423/1-liner-python/2
# # The map(None, ...) transposes the "matrix", filling missing spots with None
# return map(None, *words) == map(None, *map(None, *words))
| qiyuangong/leetcode | python/422_Valid_Word_Square.py | Python | mit | 805 | 0.001242 |
import logging
from datetime import timedelta
from core import Feed
import pandas as pd
from core.observables import Ip, Observable
from core.errors import ObservableValidationError
class ThreatFox(Feed):
default_values = {
"frequency": timedelta(hours=1),
"name": "ThreatFox",
"source": "https://threatfox.abuse.ch/export/json/recent/",
"description": "Feed ThreatFox by Abuse.ch",
}
def update(self):
for index, line in self.update_json():
self.analyze(line)
def update_json(self):
r = self._make_request(sort=False)
if r:
res = r.json()
values = [r[0] for r in res.values()]
df = pd.DataFrame(values)
df["first_seen_utc"] = pd.to_datetime(df["first_seen_utc"])
df["last_seen_utc"] = pd.to_datetime(df["last_seen_utc"])
if self.last_run:
df = df[df["first_seen_utc"] > self.last_run]
df.fillna("-", inplace=True)
return df.iterrows()
def analyze(self, item):
first_seen = item["first_seen_utc"]
ioc_value = item["ioc_value"]
ioc_type = item["ioc_type"]
threat_type = item["threat_type"]
malware_alias = item["malware_alias"]
malware_printable = item["malware_printable"]
last_seen_utc = item["last_seen_utc"]
confidence_level = item["confidence_level"]
reference = item["reference"]
reporter = item["reporter"]
tags = []
context = {"source": self.name}
context["first_seen"] = first_seen
if reference:
context["reference"] = reference
else:
context["reference"] = "Unknown"
if reporter:
context["reporter"] = reporter
else:
context["reporter"] = "Unknown"
if threat_type:
context["threat_type"] = threat_type
if item["tags"]:
tags.extend(item["tags"].split(","))
if malware_printable:
tags.append(malware_printable)
if malware_alias:
context["malware_alias"] = malware_alias
if last_seen_utc:
context["last_seen_utc"] = last_seen_utc
if confidence_level:
context["confidence_level"] = confidence_level
value = None
obs = None
try:
if "ip" in ioc_type:
value, port = ioc_value.split(":")
context["port"] = port
obs = Ip.get_or_create(value=value)
else:
obs = Observable.add_text(ioc_value)
except ObservableValidationError as e:
logging.error(e)
return
if obs:
obs.add_context(context)
obs.add_source(self.name)
if tags:
obs.tag(tags)
if malware_printable:
obs.tags
| yeti-platform/yeti | plugins/feeds/public/threatfox.py | Python | apache-2.0 | 2,925 | 0 |
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import os, sys, time
import numpy as np
from pdb import set_trace
import montage_wrapper as montage
import shutil
import gal_data
import config
import glob
from scipy.ndimage.interpolation import zoom
#_TOP_DIR = '/data/tycho/0/leroy.42/allsky/'
#_INDEX_DIR = os.path.join(_TOP_DIR, 'code/')
_HOME_DIR = '/n/home00/lewis.1590/research/galbase_allsky/'
_DATA_DIR = '/n/home00/lewis.1590/research/galbase/gal_data/'
#_MOSAIC_DIR = os.path.join(_HOME_DIR, 'cutouts')
_GAL_DIR = os.path.join(_HOME_DIR, 'ngc2976')
_INPUT_DIR = os.path.join(_GAL_DIR, 'input')
_MOSAIC_DIR = os.path.join(_GAL_DIR, 'mosaics')
def get_args():
import argparse
parser = argparse.ArgumentParser(description='Create cutouts of a given size around each galaxy center.')
parser.add_argument('--size', default=30.,help='cutout size in arcminutes')
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--copy', action='store_true')
parser.add_argument('--convolve', action='store_true')
parser.add_argument('--align', action='store_true')
return parser.parse_args()
def create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale):
hdr = pyfits.Header()
hdr['NAXIS'] = 2
hdr['NAXIS1'] = pix_len
hdr['NAXIS2'] = pix_len
hdr['CTYPE1'] = 'RA---TAN'
hdr['CRVAL1'] = float(ra_ctr)
hdr['CRPIX1'] = (pix_len / 2.) * 1.
hdr['CDELT1'] = -1.0 * pix_scale
hdr['CTYPE2'] = 'DEC--TAN'
hdr['CRVAL2'] = float(dec_ctr)
hdr['CRPIX2'] = (pix_len / 2.) * 1.
hdr['CDELT2'] = pix_scale
hdr['EQUINOX'] = 2000
return hdr
def make_axes(hdr, quiet=False, novec=False, vonly=False, simple=False):
# PULL THE IMAGE/CUBE SIZES FROM THE HEADER
naxis = hdr['NAXIS']
naxis1 = hdr['NAXIS1']
naxis2 = hdr['NAXIS2']
if naxis > 2:
naxis3 = hdr['NAXIS3']
## EXTRACT FITS ASTROMETRY STRUCTURE
ww = pywcs.WCS(hdr)
#IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)
if naxis > 3:
#GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER
cd = ww.wcs.cd
crpix = ww.wcs.crpix
cdelt = ww.wcs.crelt
crval = ww.wcs.crval
if naxis > 2:
# MAKE THE VELOCITY AXIS (WILL BE M/S)
v = np.arange(naxis3) * 1.0
vdif = v - (hdr['CRPIX3']-1)
vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])
# CUT OUT HERE IF WE ONLY WANT VELOCITY INFO
if vonly:
return vaxis
#IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:
if simple:
print('Using simple aproach to make axes.')
print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')
raxis = np.arange(naxis1) * 1.0
rdif = raxis - (hdr['CRPIX1'] - 1)
raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])
daxis = np.arange(naxis2) * 1.0
ddif = daxis - (hdr['CRPIX1'] - 1)
daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])
rimg = raxis # (fltarr(naxis2) + 1.)
dimg = (np.asarray(naxis1) + 1.) # daxis
return rimg, dimg
# OBNOXIOUS SFL/GLS THING
glspos = ww.wcs.ctype[0].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[0]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[0] = ctstr
print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])
glspos = ww.wcs.ctype[1].find('GLS')
if glspos != -1:
ctstr = ww.wcs.ctype[1]
newtype = 'SFL'
ctstr.replace('GLS', 'SFL')
ww.wcs.ctype[1] = ctstr
print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])
# CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE
if novec:
rimg = np.zeros((naxis1, naxis2))
dimg = np.zeros((naxis1, naxis2))
for i in range(naxis1):
j = np.asarray([0 for i in xrange(naxis2)])
pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)
ra, dec = ww.all_pix2world(pixcrd, 1)
rimg[i, :] = ra
dimg[i, :] = dec
else:
ximg = np.arange(naxis1) * 1.0
yimg = np.arange(naxis1) * 1.0
X, Y = np.meshgrid(ximg, yimg, indexing='xy')
ss = X.shape
xx, yy = X.flatten(), Y.flatten()
pixcrd = np.array(zip(xx, yy), np.float_)
img_new = ww.all_pix2world(pixcrd, 0)
rimg_new, dimg_new = img_new[:,0], img_new[:,1]
rimg = rimg_new.reshape(ss)
dimg = dimg_new.reshape(ss)
# GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW
raxis = np.squeeze(rimg[:, naxis2/2])
daxis = np.squeeze(dimg[naxis1/2, :])
return rimg, dimg
def convert_files(gal_dir, im_dir, wt_dir, band, fuv_toab, nuv_toab, pix_as):
converted_dir = os.path.join(gal_dir, 'converted')
os.makedirs(converted_dir)
intfiles = sorted(glob.glob(os.path.join(im_dir, '*-int.fits')))
wtfiles = sorted(glob.glob(os.path.join(wt_dir, '*-rrhr.fits')))
int_outfiles = [os.path.join(converted_dir, os.path.basename(f).replace('.fits', '_mjysr.fits')) for f in intfiles]
wt_outfiles = [os.path.join(converted_dir, os.path.basename(f)) for f in wtfiles]
for i in range(len(intfiles)):
if os.path.exists(wtfiles[i]):
im, hdr = pyfits.getdata(intfiles[i], header=True)
wt, whdr = pyfits.getdata(wtfiles[i], header=True)
#wt = wtpersr(wt, pix_as)
#if band.lower() == 'fuv':
# im = counts2jy_galex(im, fuv_toab, pix_as)
if band.lower() == 'nuv':
im = counts2jy_galex(im, nuv_toab, pix_as)
if not os.path.exists(int_outfiles[i]):
# im -= np.mean(im)
pyfits.writeto(int_outfiles[i], im, hdr)
if not os.path.exists(wt_outfiles[i]):
pyfits.writeto(wt_outfiles[i], wt, whdr)
else:
continue
return converted_dir, converted_dir
def counts2jy_galex(counts, cal, pix_as):
# first convert to abmag
abmag = -2.5 * np.log10(counts) + cal
# then convert to Jy
f_nu = 10**(abmag/-2.5) * 3631.
# then to MJy
f_nu *= 1e-6
# then to MJy/sr
val = f_nu / (np.radians(pix_as/3600))**2
return val
#val = flux / MJYSR2JYARCSEC / pixel_area / 1e-23 / C * FUV_LAMBDA**2
def write_headerfile(header_file, header):
f = open(header_file, 'w')
for iii in range(len(header)):
outline = str(header[iii:iii+1]).strip().rstrip('END').strip()+'\n'
f.write(outline)
f.close()
def mask_images(cnt_dir, exp_dir, flag_dir, gal_dir):
masked_dir = os.path.join(gal_dir, 'masked')
os.makedirs(masked_dir)
cnt_masked_dir = os.path.join(masked_dir, 'cnt')
t_masked_dir = os.path.join(masked_dir, 'rrhr')
os.makedirs(int_masked_dir)
os.makedirs(wt_masked_dir)
cnt_suff, exp_suff, flag_suff = '*-cnt.fits', '*-rrhr.fits', '*-flag.fits'
cnt_images = sorted(glob.glob(os.path.join(cnt_dir, cnt_suff)))
rrhr_images = sorted(glob.glob(os.path.join(exp_dir, exp_suff)))
flag_images = sorted(glob.glob(os.path.join(flag_dir, flag_suff)))
for i in range(len(int_images)):
image_infile = int_images[i]
time_infile = rrhr_images[i]
flag_infile = flag_images[i]
image_outfile = os.path.join(int_masked_dir, os.path.basename(image_infile))
wt_outfile = os.path.join(wt_masked_dir, os.path.basename(wt_infile))
mask_galex(image_infile, time_infile, flag_infile, out_intfile=image_outfile, out_wtfile=wt_outfile)
return int_masked_dir, wt_masked_dir
def mask_galex(cntfile, timefile, flagfile, outfile=None, chip_rad = 1400, chip_x0=1920, chip_y0=1920, out_intfile=None, out_wtfile=None):
if out_intfile is None:
out_intfile = intfile.replace('.fits', '_masked.fits')
if out_wtfile is None:
out_wtfile = wtfile.replace('.fits', '_masked.fits')
if not os.path.exists(out_intfile):
cnt, hdr = pyfits.getdata(cntfile, header=True)
exp, whdr = pyfits.getdata(timefile, header=True)
flag, fhdr = pyfits.getdata(flagfile, header=True)
factor = float(len(cnt)) / len(flag)
upflag = zoom(flag, factor, order=0)
x = np.arange(data.shape[1]).reshape(1, -1) + 1
y = np.arange(data.shape[0]).reshape(-1, 1) + 1
r = np.sqrt((x - chip_x0)**2 + (y - chip_y0)**2)
i = (r > chip_rad)
j = (flag == 2) or (flag == 4) or (flag == 6)
k = (exp == -1.1e30)
cnt = np.where(i | k, 0, cnt) #0
exp = np.where(i | j| k, 1e-20, exp) #1e-20
pyfits.writeto(out_intfile, cnt, hdr)
pyfits.writeto(out_wtfile, exp, whdr)
def reproject_images(template_header, input_dir, reprojected_dir, imtype, whole=False, exact=True, img_list=None):
reproj_imtype_dir = os.path.join(reprojected_dir, imtype)
os.makedirs(reproj_imtype_dir)
input_table = os.path.join(input_dir, imtype + '_input.tbl')
montage.mImgtbl(input_dir, input_table, corners=True, img_list=img_list)
# Create reprojection directory, reproject, and get image metadata
stats_table = os.path.join(reproj_imtype_dir, imtype+'_mProjExec_stats.log')
montage.mProjExec(input_table, template_header, reproj_imtype_dir, stats_table, raw_dir=input_dir, whole=whole, exact=exact)
reprojected_table = os.path.join(reproj_imtype_dir, imtype + '_reprojected.tbl')
montage.mImgtbl(reproj_imtype_dir, reprojected_table, corners=True)
return reproj_imtype_dir
def weight_images(im_dir, wt_dir, weight_dir):
im_suff, wt_suff = '*_mjysr.fits', '*-rrhr.fits'
imfiles = sorted(glob.glob(os.path.join(im_dir, im_suff)))
wtfiles = sorted(glob.glob(os.path.join(wt_dir, wt_suff)))
im_weight_dir = os.path.join(weight_dir, 'int')
wt_weight_dir = os.path.join(weight_dir, 'rrhr')
[os.makedirs(out_dir) for out_dir in [im_weight_dir, wt_weight_dir]]
for i in range(len(imfiles)):
imfile = imfiles[i]
wtfile = wtfiles[i]
im, hdr = pyfits.getdata(imfile, header=True)
rrhr, rrhrhdr = pyfits.getdata(wtfile, header=True)
# noise = 1. / np.sqrt(rrhr)
# weight = 1 / noise**2
wt = rrhr
newim = im * wt
#nf = imfiles[i].split('/')[-1].replace('.fits', '_weighted.fits')
#newfile = os.path.join(weighted_dir, nf)
newfile = os.path.join(im_weight_dir, os.path.basename(imfile))
pyfits.writeto(newfile, newim, hdr)
old_area_file = imfile.replace('.fits', '_area.fits')
if os.path.exists(old_area_file):
new_area_file = newfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
#nf = wtfiles[i].split('/')[-1].replace('.fits', '_weights.fits')
#weightfile = os.path.join(weights_dir, nf)
weightfile = os.path.join(wt_weight_dir, os.path.basename(wtfile))
pyfits.writeto(weightfile, wt, rrhrhdr)
old_area_file = wtfile.replace('.fits', '_area.fits')
if os.path.exists(old_area_file):
new_area_file = weightfile.replace('.fits', '_area.fits')
shutil.copy(old_area_file, new_area_file)
return im_weight_dir, wt_weight_dir
def create_table(in_dir, dir_type=None):
if dir_type is None:
reprojected_table = os.path.join(in_dir, 'reprojected.tbl')
else:
reprojected_table = os.path.join(in_dir, dir_type + '_reprojected.tbl')
montage.mImgtbl(in_dir, reprojected_table, corners=True)
return reprojected_table
def coadd(template_header, output_dir, input_dir, output=None, add_type=None):
img_dir = input_dir
# output is either 'weights' or 'int'
if output is None:
reprojected_table = os.path.join(img_dir, 'reprojected.tbl')
out_image = os.path.join(output_dir, 'mosaic.fits')
else:
reprojected_table = os.path.join(img_dir, output + '_reprojected.tbl')
out_image = os.path.join(output_dir, output + '_mosaic.fits')
montage.mAdd(reprojected_table, template_header, out_image, img_dir=img_dir, exact=True, type=add_type)
def finish_weight(output_dir):
image_file = os.path.join(output_dir, 'int_mosaic.fits')
wt_file = os.path.join(output_dir, 'weights_mosaic.fits')
count_file = os.path.join(output_dir, 'count_mosaic.fits')
im, hdr = pyfits.getdata(image_file, header=True)
wt = pyfits.getdata(wt_file)
ct = pyfits.getdata(count_file)
newim = im / wt
newfile = os.path.join(output_dir, 'image_mosaic.fits')
pyfits.writeto(newfile, newim, hdr)
return newfile
def divide_files(output_dir):
image_file = os.path.join(output_dir, 'cnt_mosaic.fits')
time_file = os.path.join(output_dir, 'exp_mosaic.fits')
#count_file = os.path.join(output_dir, 'numbers_mosaic.fits')
cnt, hdr = pyfits.getdata(image_file, header=True)
exp = pyfits.getdata(wt_file)
#ct = pyfits.getdata(count_file)
newim = cnt / exp
newfile = os.path.join(output_dir, 'image_mosaic.fits')
pyfits.writeto(newfile, newim, hdr)
return newfile
def remove_background(final_dir, imfile, bgfile):
data, hdr = pyfits.getdata(imfile, header=True)
box_inds = read_bg_regfile(bgfile)
allvals = []
sample_means = []
for box in box_inds:
rectangle = zip(box[0::2], box[1::2])
sample = get_bg_sample(data, hdr, rectangle)
for s in sample:
allvals.append(s)
sample_mean = np.nanmean(sample)
sample_means.append(sample_mean)
this_mean = np.around(np.nanmean(sample_means), 8)
final_data = data - this_mean
hdr['BG'] = this_mean
hdr['comment'] = 'Background has been subtracted.'
outfile = os.path.join(final_dir, 'final_mosaic.fits')
pyfits.writeto(outfile, final_data, hdr)
def galex(band='fuv', ra_ctr=None, dec_ctr=None, size_deg=None, index=None, name=None):
gal_dir = _GAL_DIR
galaxy_mosaic_file = os.path.join(_MOSAIC_DIR, '_'.join([name, band]).upper() + '.FITS')
start_time = time.time()
# CALIBRATION FROM COUNTS TO ABMAG
fuv_toab = 18.82
nuv_toab = 20.08
# PIXEL SCALE IN ARCSECONDS
pix_as = 1.5 # galex pixel scale -- from galex docs
# MAKE A HEADER
pix_scale = 1.5 / 3600. # 1.5 arbitrary: how should I set it?
pix_len = size_deg / pix_scale
target_hdr = create_hdr(ra_ctr, dec_ctr, pix_len, pix_scale)
# SET UP THE OUTPUT
ri_targ, di_targ = make_axes(target_hdr)
sz_out = ri_targ.shape
outim = ri_targ * np.nan
prihdu = pyfits.PrimaryHDU(data=outim, header=target_hdr)
target_hdr = prihdu.header
# GATHER THE INPUT FILES
#im_dir, wt_dir, nfiles = get_input(index, ind, data_dir, gal_dir)
cnt_dir = _INPUT_DIR
exp_dir = _INPUT_DIR
flag_dir = _INPUT_DIR
# CONVERT INT FILES TO MJY/SR AND WRITE NEW FILES INTO TEMP DIR
#im_dir, wt_dir = convert_files(gal_dir, im_dir, wt_dir, band, fuv_toab, nuv_toab, pix_as)
# APPEND UNIT INFORMATION TO THE NEW HEADER AND WRITE OUT HEADER FILE
#target_hdr['BUNIT'] = 'MJY/SR'
hdr_file = os.path.join(gal_dir, name + '_template.hdr')
write_headerfile(hdr_file, target_hdr)
# MASK IMAGES
cnt_dir, exp_dir = mask_images(cnt_dir, exp_dir, flag_dir, gal_dir)
# REPROJECT IMAGES
reprojected_dir = os.path.join(gal_dir, 'reprojected')
os.makedirs(reprojected_dir)
cnt_dir = reproject_images(hdr_file, im_dir, reprojected_dir, 'cnt')
exp_dir = reproject_images(hdr_file, wt_dir, reprojected_dir,'exp')
# WEIGHT IMAGES
#weight_dir = os.path.join(gal_dir, 'weight')
#os.makedirs(weight_dir)
#im_dir, wt_dir = weight_images(im_dir, wt_dir, weight_dir)
# CREATE THE METADATA TABLES NEEDED FOR COADDITION
weight_table = create_table(exp_dir, dir_type='exp')
weighted_table = create_table(cnt_dir, dir_type='cnt')
count_table = create_table(cnt_dir, dir_type='numbers')
# COADD THE REPROJECTED, WEIGHTED IMAGES AND THE WEIGHT IMAGES
final_dir = os.path.join(gal_dir, 'mosaic')
os.makedirs(final_dir)
coadd(hdr_file, final_dir, exp_dir, output='exp')
coadd(hdr_file, final_dir, cnt_dir, output='cnt')
coadd(hdr_file, final_dir, cnt_dir, output='numbers',add_type='count')
# DIVIDE OUT THE WEIGHTS
#imagefile = finish_weight(final_dir)
final_imfile = divide_files(final_dir)
# SUBTRACT OUT THE BACKGROUND
#remove_background(final_dir, imagefile, bg_reg_file)
# COPY MOSAIC FILES TO CUTOUTS DIRECTORY
mos_files = ['image_mosaic.fits','weights_mosaic.fits','count_mosaic.fits']
suffixes = ['.FITS', '_weight.FITS', '_count.FITS']
for f, s in zip(mos_files, suffixes):
shutil.copy(os.path.join(final_dir, f),
os.path.join(gal_dir, '_'.join([name, band]).upper() + s))
# REMOVE GALAXY DIRECTORY AND EXTRA FILES
#shutil.rmtree(gal_dir, ignore_errors=True)
fdirs = [final_dir, weight_dir, reprojected_dir, os.path.join(gal_dir, 'converted'), os.path.join(gal_dir, 'masked')]
for fdir in fdirs:
shutil.rmtree(fdir, ignore_errors=True)
# NOTE TIME TO FINISH
stop_time = time.time()
total_time = (stop_time - start_time) / 60.
print total_time
return
def main(**kwargs):
if kwargs['cutout']:
gals = gal_data.gal_data(tag='SINGS', data_dir=_DATA_DIR)
n_gals = len(gals)
size_deg = kwargs['size'] * 60. / 3600.
for i in range(n_gals):
this_gal = np.rec.fromarrays(gals[i], names=list(config.COLUMNS))
galname = str(this_gal.name).replace(' ', '').upper()
if galname == 'NGC2976':
set_trace()
galex(band='fuv', ra_ctr=this_gal.ra_deg, dec_ctr=this_gal.dec_deg, size_deg=size_deg, name=galname)
#galex(band='nuv', ra_ctr=this_gal.ra_deg, dec_ctr=this_gal.dec_deg, size_deg=size_deg, name=galname)
if kwargs['copy']:
pass
if kwargs['convolve']:
pass
if kwargs['align']:
pass
if __name__ == '__main__':
args = get_args()
main(**vars(args))
| arlewis/arl_galbase | single_cutout_test_newmethod.py | Python | mit | 18,179 | 0.004951 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 OpenERP S.A (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Charset import Charset
from email.Header import Header
from email.Utils import formatdate, make_msgid, COMMASPACE
from email import Encoders
import logging
import re
import smtplib
import threading
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools import html2text
import openerp.tools as tools
# ustr was originally from tools.misc.
# it is moved to loglevels until we refactor tools.
from openerp.loglevels import ustr
_logger = logging.getLogger(__name__)
class MailDeliveryException(osv.except_osv):
"""Specific exception subclass for mail delivery errors"""
def __init__(self, name, value):
super(MailDeliveryException, self).__init__(name, value)
class WriteToLogger(object):
"""debugging helper: behave as a fd and pipe to logger at the given level"""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, s):
self.logger.log(self.level, s)
def try_coerce_ascii(string_utf8):
"""Attempts to decode the given utf8-encoded string
as ASCII after coercing it to UTF-8, then return
the confirmed 7-bit ASCII string.
If the process fails (because the string
contains non-ASCII characters) returns ``None``.
"""
try:
string_utf8.decode('ascii')
except UnicodeDecodeError:
return
return string_utf8
def encode_header(header_text):
"""Returns an appropriate representation of the given header value,
suitable for direct assignment as a header value in an
email.message.Message. RFC2822 assumes that headers contain
only 7-bit characters, so we ensure it is the case, using
RFC2047 encoding when needed.
:param header_text: unicode or utf-8 encoded string with header value
:rtype: string | email.header.Header
:return: if ``header_text`` represents a plain ASCII string,
return the same 7-bit string, otherwise returns an email.header.Header
that will perform the appropriate RFC2047 encoding of
non-ASCII values.
"""
if not header_text: return ""
# convert anything to utf-8, suitable for testing ASCIIness, as 7-bit chars are
# encoded as ASCII in utf-8
header_text_utf8 = tools.ustr(header_text).encode('utf-8')
header_text_ascii = try_coerce_ascii(header_text_utf8)
# if this header contains non-ASCII characters,
# we'll need to wrap it up in a message.header.Header
# that will take care of RFC2047-encoding it as
# 7-bit string.
return header_text_ascii if header_text_ascii\
else Header(header_text_utf8, 'utf-8')
def encode_header_param(param_text):
"""Returns an appropriate RFC2047 encoded representation of the given
header parameter value, suitable for direct assignation as the
param value (e.g. via Message.set_param() or Message.add_header())
RFC2822 assumes that headers contain only 7-bit characters,
so we ensure it is the case, using RFC2047 encoding when needed.
:param param_text: unicode or utf-8 encoded string with header value
:rtype: string
:return: if ``param_text`` represents a plain ASCII string,
return the same 7-bit string, otherwise returns an
ASCII string containing the RFC2047 encoded text.
"""
# For details see the encode_header() method that uses the same logic
if not param_text: return ""
param_text_utf8 = tools.ustr(param_text).encode('utf-8')
param_text_ascii = try_coerce_ascii(param_text_utf8)
return param_text_ascii if param_text_ascii\
else Charset('utf8').header_encode(param_text_utf8)
name_with_email_pattern = re.compile(r'("[^<@>]+")\s*<([^ ,<@]+@[^> ,]+)>')
address_pattern = re.compile(r'([^ ,<@]+@[^> ,]+)')
def extract_rfc2822_addresses(text):
"""Returns a list of valid RFC2822 addresses
that can be found in ``source``, ignoring
malformed ones and non-ASCII ones.
"""
if not text: return []
candidates = address_pattern.findall(tools.ustr(text).encode('utf-8'))
return filter(try_coerce_ascii, candidates)
def encode_rfc2822_address_header(header_text):
"""If ``header_text`` contains non-ASCII characters,
attempts to locate patterns of the form
``"Name" <address@domain>`` and replace the
``"Name"`` portion by the RFC2047-encoded
version, preserving the address part untouched.
"""
header_text_utf8 = tools.ustr(header_text).encode('utf-8')
header_text_ascii = try_coerce_ascii(header_text_utf8)
if header_text_ascii:
return header_text_ascii
# non-ASCII characters are present, attempt to
# replace all "Name" patterns with the RFC2047-
# encoded version
def replace(match_obj):
name, email = match_obj.group(1), match_obj.group(2)
name_encoded = str(Header(name, 'utf-8'))
return "%s <%s>" % (name_encoded, email)
header_text_utf8 = name_with_email_pattern.sub(replace,
header_text_utf8)
# try again after encoding
header_text_ascii = try_coerce_ascii(header_text_utf8)
if header_text_ascii:
return header_text_ascii
# fallback to extracting pure addresses only, which could
# still cause a failure downstream if the actual addresses
# contain non-ASCII characters
return COMMASPACE.join(extract_rfc2822_addresses(header_text_utf8))
class ir_mail_server(osv.osv):
"""Represents an SMTP server, able to send outgoing emails, with SSL and TLS capabilities."""
_name = "ir.mail_server"
_columns = {
'name': fields.char('Description', size=64, required=True, select=True),
'smtp_host': fields.char('SMTP Server', size=128, required=True, help="Hostname or IP of SMTP server"),
'smtp_port': fields.integer('SMTP Port', size=5, required=True, help="SMTP Port. Usually 465 for SSL, and 25 or 587 for other cases."),
'smtp_user': fields.char('Username', size=64, help="Optional username for SMTP authentication"),
'smtp_pass': fields.char('Password', size=64, help="Optional password for SMTP authentication"),
'smtp_encryption': fields.selection([('none','None'),
('starttls','TLS (STARTTLS)'),
('ssl','SSL/TLS')],
string='Connection Security', required=True,
help="Choose the connection encryption scheme:\n"
"- None: SMTP sessions are done in cleartext.\n"
"- TLS (STARTTLS): TLS encryption is requested at start of SMTP session (Recommended)\n"
"- SSL/TLS: SMTP sessions are encrypted with SSL/TLS through a dedicated port (default: 465)"),
'smtp_debug': fields.boolean('Debugging', help="If enabled, the full output of SMTP sessions will "
"be written to the server log at DEBUG level"
"(this is very verbose and may include confidential info!)"),
'sequence': fields.integer('Priority', help="When no specific mail server is requested for a mail, the highest priority one "
"is used. Default priority is 10 (smaller number = higher priority)"),
'active': fields.boolean('Active')
}
_defaults = {
'smtp_port': 25,
'active': True,
'sequence': 10,
'smtp_encryption': 'none',
}
def __init__(self, *args, **kwargs):
# Make sure we pipe the smtplib outputs to our own DEBUG logger
if not isinstance(smtplib.stderr, WriteToLogger):
logpiper = WriteToLogger(_logger)
smtplib.stderr = logpiper
smtplib.stdout = logpiper
super(ir_mail_server, self).__init__(*args,**kwargs)
def name_get(self, cr, uid, ids, context=None):
return [(a["id"], "(%s)" % (a['name'])) for a in self.read(cr, uid, ids, ['name'], context=context)]
def test_smtp_connection(self, cr, uid, ids, context=None):
for smtp_server in self.browse(cr, uid, ids, context=context):
smtp = False
try:
smtp = self.connect(smtp_server.smtp_host, smtp_server.smtp_port, user=smtp_server.smtp_user,
password=smtp_server.smtp_pass, encryption=smtp_server.smtp_encryption,
smtp_debug=smtp_server.smtp_debug)
except Exception, e:
raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s") % tools.ustr(e))
finally:
try:
if smtp: smtp.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
raise osv.except_osv(_("Connection test succeeded!"), _("Everything seems properly set up!"))
def connect(self, host, port, user=None, password=None, encryption=False, smtp_debug=False):
"""Returns a new SMTP connection to the give SMTP server, authenticated
with ``user`` and ``password`` if provided, and encrypted as requested
by the ``encryption`` parameter.
:param host: host or IP of SMTP server to connect to
:param int port: SMTP port to connect to
:param user: optional username to authenticate with
:param password: optional password to authenticate with
:param string encryption: optional, ``'ssl'`` | ``'starttls'``
:param bool smtp_debug: toggle debugging of SMTP sessions (all i/o
will be output in logs)
"""
if encryption == 'ssl':
if not 'SMTP_SSL' in smtplib.__all__:
raise osv.except_osv(
_("SMTP-over-SSL mode unavailable"),
_("Your OpenERP Server does not support SMTP-over-SSL. You could use STARTTLS instead."
"If SSL is needed, an upgrade to Python 2.6 on the server-side should do the trick."))
connection = smtplib.SMTP_SSL(host, port)
else:
connection = smtplib.SMTP(host, port)
connection.set_debuglevel(smtp_debug)
if encryption == 'starttls':
# starttls() will perform ehlo() if needed first
# and will discard the previous list of services
# after successfully performing STARTTLS command,
# (as per RFC 3207) so for example any AUTH
# capability that appears only on encrypted channels
# will be correctly detected for next step
connection.starttls()
if user:
# Attempt authentication - will raise if AUTH service not supported
# The user/password must be converted to bytestrings in order to be usable for
# certain hashing schemes, like HMAC.
# See also bug #597143 and python issue #5285
user = tools.ustr(user).encode('utf-8')
password = tools.ustr(password).encode('utf-8')
connection.login(user, password)
return connection
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None,
body_alternative=None, subtype_alternative='plain'):
"""Constructs an RFC2822 email.message.Message object based on the keyword arguments passed, and returns it.
:param string email_from: sender email address
:param list email_to: list of recipient addresses (to be joined with commas)
:param string subject: email subject (no pre-encoding/quoting necessary)
:param string body: email body, of the type ``subtype`` (by default, plaintext).
If html subtype is used, the message will be automatically converted
to plaintext and wrapped in multipart/alternative, unless an explicit
``body_alternative`` version is passed.
:param string body_alternative: optional alternative body, of the type specified in ``subtype_alternative``
:param string reply_to: optional value of Reply-To header
:param string object_id: optional tracking identifier, to be included in the message-id for
recognizing replies. Suggested format for object-id is "res_id-model",
e.g. "12345-crm.lead".
:param string subtype: optional mime subtype for the text body (usually 'plain' or 'html'),
must match the format of the ``body`` parameter. Default is 'plain',
making the content part of the mail "text/plain".
:param string subtype_alternative: optional mime subtype of ``body_alternative`` (usually 'plain'
or 'html'). Default is 'plain'.
:param list attachments: list of (filename, filecontents) pairs, where filecontents is a string
containing the bytes of the attachment
:param list email_cc: optional list of string values for CC header (to be joined with commas)
:param list email_bcc: optional list of string values for BCC header (to be joined with commas)
:param dict headers: optional map of headers to set on the outgoing mail (may override the
other headers, including Subject, Reply-To, Message-Id, etc.)
:rtype: email.message.Message (usually MIMEMultipart)
:return: the new RFC2822 email message
"""
email_from = email_from or tools.config.get('email_from')
assert email_from, "You must either provide a sender address explicitly or configure "\
"a global sender address in the server configuration or with the "\
"--email-from startup parameter."
# Note: we must force all strings to to 8-bit utf-8 when crafting message,
# or use encode_header() for headers, which does it automatically.
headers = headers or {} # need valid dict later
if not email_cc: email_cc = []
if not email_bcc: email_bcc = []
if not body: body = u''
email_body_utf8 = ustr(body).encode('utf-8')
email_text_part = MIMEText(email_body_utf8, _subtype=subtype, _charset='utf-8')
msg = MIMEMultipart()
if not message_id:
if object_id:
message_id = tools.generate_tracking_message_id(object_id)
else:
message_id = make_msgid()
msg['Message-Id'] = encode_header(message_id)
if references:
msg['references'] = encode_header(references)
msg['Subject'] = encode_header(subject)
msg['From'] = encode_rfc2822_address_header(email_from)
del msg['Reply-To']
if reply_to:
msg['Reply-To'] = encode_rfc2822_address_header(reply_to)
else:
msg['Reply-To'] = msg['From']
msg['To'] = encode_rfc2822_address_header(COMMASPACE.join(email_to))
if email_cc:
msg['Cc'] = encode_rfc2822_address_header(COMMASPACE.join(email_cc))
if email_bcc:
msg['Bcc'] = encode_rfc2822_address_header(COMMASPACE.join(email_bcc))
msg['Date'] = formatdate()
# Custom headers may override normal headers or provide additional ones
for key, value in headers.iteritems():
msg[ustr(key).encode('utf-8')] = encode_header(value)
if subtype == 'html' and not body_alternative and html2text:
# Always provide alternative text body ourselves if possible.
text_utf8 = tools.html2text(email_body_utf8.decode('utf-8')).encode('utf-8')
alternative_part = MIMEMultipart(_subtype="alternative")
alternative_part.attach(MIMEText(text_utf8, _charset='utf-8', _subtype='plain'))
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
elif body_alternative:
# Include both alternatives, as specified, within a multipart/alternative part
alternative_part = MIMEMultipart(_subtype="alternative")
body_alternative_utf8 = ustr(body_alternative).encode('utf-8')
alternative_body_part = MIMEText(body_alternative_utf8, _subtype=subtype_alternative, _charset='utf-8')
alternative_part.attach(alternative_body_part)
alternative_part.attach(email_text_part)
msg.attach(alternative_part)
else:
msg.attach(email_text_part)
if attachments:
for (fname, fcontent) in attachments:
filename_rfc2047 = encode_header_param(fname)
part = MIMEBase('application', "octet-stream")
# The default RFC2231 encoding of Message.add_header() works in Thunderbird but not GMail
# so we fix it by using RFC2047 encoding for the filename instead.
part.set_param('name', filename_rfc2047)
part.add_header('Content-Disposition', 'attachment', filename=filename_rfc2047)
part.set_payload(fcontent)
Encoders.encode_base64(part)
msg.attach(part)
return msg
def send_email(self, cr, uid, message, mail_server_id=None, smtp_server=None, smtp_port=None,
smtp_user=None, smtp_password=None, smtp_encryption=None, smtp_debug=False,
context=None):
"""Sends an email directly (no queuing).
No retries are done, the caller should handle MailDeliveryException in order to ensure that
the mail is never lost.
If the mail_server_id is provided, sends using this mail server, ignoring other smtp_* arguments.
If mail_server_id is None and smtp_server is None, use the default mail server (highest priority).
If mail_server_id is None and smtp_server is not None, use the provided smtp_* arguments.
If both mail_server_id and smtp_server are None, look for an 'smtp_server' value in server config,
and fails if not found.
:param message: the email.message.Message to send. The envelope sender will be extracted from the
``Return-Path`` or ``From`` headers. The envelope recipients will be
extracted from the combined list of ``To``, ``CC`` and ``BCC`` headers.
:param mail_server_id: optional id of ir.mail_server to use for sending. overrides other smtp_* arguments.
:param smtp_server: optional hostname of SMTP server to use
:param smtp_encryption: optional TLS mode, one of 'none', 'starttls' or 'ssl' (see ir.mail_server fields for explanation)
:param smtp_port: optional SMTP port, if mail_server_id is not passed
:param smtp_user: optional SMTP user, if mail_server_id is not passed
:param smtp_password: optional SMTP password to use, if mail_server_id is not passed
:param smtp_debug: optional SMTP debug flag, if mail_server_id is not passed
:return: the Message-ID of the message that was just sent, if successfully sent, otherwise raises
MailDeliveryException and logs root cause.
"""
smtp_from = message['Return-Path'] or message['From']
assert smtp_from, "The Return-Path or From header is required for any outbound email"
# The email's "Envelope From" (Return-Path), and all recipient addresses must only contain ASCII characters.
from_rfc2822 = extract_rfc2822_addresses(smtp_from)
assert len(from_rfc2822) == 1, "Malformed 'Return-Path' or 'From' address - it may only contain plain ASCII characters"
smtp_from = from_rfc2822[0]
email_to = message['To']
email_cc = message['Cc']
email_bcc = message['Bcc']
smtp_to_list = filter(None, tools.flatten(map(extract_rfc2822_addresses,[email_to, email_cc, email_bcc])))
assert smtp_to_list, "At least one valid recipient address should be specified for outgoing emails (To/Cc/Bcc)"
# Do not actually send emails in testing mode!
if getattr(threading.currentThread(), 'testing', False):
_logger.log(logging.TEST, "skip sending email in test mode")
return message['Message-Id']
# Get SMTP Server Details from Mail Server
mail_server = None
if mail_server_id:
mail_server = self.browse(cr, uid, mail_server_id)
elif not smtp_server:
mail_server_ids = self.search(cr, uid, [], order='sequence', limit=1)
if mail_server_ids:
mail_server = self.browse(cr, uid, mail_server_ids[0])
if mail_server:
smtp_server = mail_server.smtp_host
smtp_user = mail_server.smtp_user
smtp_password = mail_server.smtp_pass
smtp_port = mail_server.smtp_port
smtp_encryption = mail_server.smtp_encryption
smtp_debug = smtp_debug or mail_server.smtp_debug
else:
# we were passed an explicit smtp_server or nothing at all
smtp_server = smtp_server or tools.config.get('smtp_server')
smtp_port = tools.config.get('smtp_port', 25) if smtp_port is None else smtp_port
smtp_user = smtp_user or tools.config.get('smtp_user')
smtp_password = smtp_password or tools.config.get('smtp_password')
if smtp_encryption is None and tools.config.get('smtp_ssl'):
smtp_encryption = 'starttls' # STARTTLS is the new meaning of the smtp_ssl flag as of v7.0
if not smtp_server:
raise osv.except_osv(
_("Missing SMTP Server"),
_("Please define at least one SMTP server, or provide the SMTP parameters explicitly."))
try:
message_id = message['Message-Id']
# Add email in Maildir if smtp_server contains maildir.
if smtp_server.startswith('maildir:/'):
from mailbox import Maildir
maildir_path = smtp_server[8:]
mdir = Maildir(maildir_path, factory=None, create = True)
mdir.add(message.as_string(True))
return message_id
try:
smtp = self.connect(smtp_server, smtp_port, smtp_user, smtp_password, smtp_encryption or False, smtp_debug)
try:
smtp.sendmail(smtp_from, smtp_to_list, message.as_string())
except Exception, e:
#johnw 02/18/2014 enhance the mail sending logic, to handle the login and from address not mathcing issue
#if from and login user is different, then to try to send using smtp_user again
if smtp_from != smtp_user:
smtp.sendmail(smtp_user, smtp_to_list, message.as_string())
else:
raise e
finally:
try:
# Close Connection of SMTP Server
smtp.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
except Exception, e:
msg = _("Mail delivery failed via SMTP server '%s'.\n%s: %s") % (tools.ustr(smtp_server),
e.__class__.__name__,
tools.ustr(e))
_logger.exception(msg)
raise MailDeliveryException(_("Mail delivery failed"), msg)
return message_id
def on_change_encryption(self, cr, uid, ids, smtp_encryption):
if smtp_encryption == 'ssl':
result = {'value': {'smtp_port': 465}}
if not 'SMTP_SSL' in smtplib.__all__:
result['warning'] = {'title': _('Warning'),
'message': _('Your server does not seem to support SSL, you may want to try STARTTLS instead')}
else:
result = {'value': {'smtp_port': 25}}
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| john-wang-metro/metro-openerp | bug_fix/ir_mail_server.py | Python | agpl-3.0 | 25,985 | 0.005118 |
"""
Test that no StopIteration is raised inside a generator
"""
# pylint: disable=missing-docstring,invalid-name,import-error, try-except-raise, wrong-import-position,not-callable,raise-missing-from
import asyncio
class RebornStopIteration(StopIteration):
"""
A class inheriting from StopIteration exception
"""
# This one is ok
def gen_ok():
yield 1
yield 2
yield 3
# pylint should warn about this one
# because of a direct raising of StopIteration inside generator
def gen_stopiter():
yield 1
yield 2
yield 3
raise StopIteration # [stop-iteration-return]
# pylint should warn about this one
# because of a direct raising of an exception inheriting from StopIteration inside generator
def gen_stopiterchild():
yield 1
yield 2
yield 3
raise RebornStopIteration # [stop-iteration-return]
# pylint should warn here
# because of the possibility that next raises a StopIteration exception
def gen_next_raises_stopiter():
g = gen_ok()
while True:
yield next(g) # [stop-iteration-return]
# This one is the same as gen_next_raises_stopiter
# but is ok because the next function is inside
# a try/except block handling StopIteration
def gen_next_inside_try_except():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that don't handle StopIteration
def gen_next_inside_wrong_try_except():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that handle StopIteration but reraise it
def gen_next_inside_wrong_try_except2():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
raise StopIteration # [stop-iteration-return]
# Those two last are ok
def gen_in_for():
for el in gen_ok():
yield el
def gen_yield_from():
yield from gen_ok()
def gen_dont_crash_on_no_exception():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
raise
def gen_dont_crash_on_uninferable():
# https://github.com/PyCQA/pylint/issues/1779
yield from iter()
raise asyncio.TimeoutError()
# https://github.com/PyCQA/pylint/issues/1830
def gen_next_with_sentinel():
yield next([], 42) # No bad return
from itertools import count
# https://github.com/PyCQA/pylint/issues/2158
def generator_using_next():
counter = count()
number = next(counter)
yield number * 2
# pylint: disable=no-self-use,too-few-public-methods
class SomeClassWithNext:
def next(self):
return iter([1, 2, 3])
def some_gen(self):
for value in self.next():
yield value
SomeClassWithNext().some_gen()
def something_invalid():
raise Exception('cannot iterate this')
def invalid_object_passed_to_next():
yield next(something_invalid()) # [stop-iteration-return]
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/stop_iteration_inside_generator.py | Python | mit | 3,242 | 0.005552 |
#!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Simple SOAP Server implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.02c"
from simplexml import SimpleXMLElement, TYPE_MAP, DateTime, Date, Decimal
DEBUG = False
class SoapDispatcher(object):
"Simple Dispatcher for SOAP Server"
def __init__(self, name, documentation='', action='', location='',
namespace=None, prefix=False,
soap_uri="http://schemas.xmlsoap.org/soap/envelope/",
soap_ns='soap',
**kwargs):
self.methods = {}
self.name = name
self.documentation = documentation
self.action = action # base SoapAction
self.location = location
self.namespace = namespace # targetNamespace
self.prefix = prefix
self.soap_ns = soap_ns
self.soap_uri = soap_uri
def register_function(self, name, fn, returns=None, args=None, doc=None):
self.methods[name] = fn, returns, args, doc or getattr(fn,"__doc__","")
def dispatch(self, xml, action=None):
"Receive and proccess SOAP call"
# default values:
prefix = self.prefix
ret = fault = None
soap_ns, soap_uri = self.soap_ns, self.soap_uri
soap_fault_code = 'VersionMismatch'
try:
request = SimpleXMLElement(xml, namespace=self.namespace)
# detect soap prefix and uri (xmlns attributes of Envelope)
for k, v in request[:]:
if v in ("http://schemas.xmlsoap.org/soap/envelope/",
"http://www.w3.org/2003/05/soap-env",):
soap_ns = request.attributes()[k].localName
soap_uri = request.attributes()[k].value
soap_fault_code = 'Client'
# parse request message and get local method
method = request('Body', ns=soap_uri).children()(0)
if action:
# method name = action
name = action[len(self.action)+1:-1]
prefix = self.prefix
if not action or not name:
# method name = input message name
name = method.get_local_name()
prefix = method.get_prefix()
if DEBUG: print "dispatch method", name
function, returns_types, args_types, doc = self.methods[name]
# de-serialize parameters (if type definitions given)
if args_types:
args = method.children().unmarshall(args_types)
elif args_types is None:
args = {'request':method} # send raw request
else:
args = {} # no parameters
soap_fault_code = 'Server'
# execute function
ret = function(**args)
if DEBUG: print ret
except Exception, e:
import sys
etype, evalue, etb = sys.exc_info()
if DEBUG:
import traceback
detail = ''.join(traceback.format_exception(etype, evalue, etb))
detail += '\n\nXML REQUEST\n\n' + xml
else:
detail = None
fault = {'faultcode': "%s.%s" % (soap_fault_code, etype.__name__),
'faultstring': unicode(evalue),
'detail': detail}
# build response message
if not prefix:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>"""
else:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"
xmlns:%(prefix)s="%(namespace)s"/>"""
xml = xml % {'namespace': self.namespace, 'prefix': prefix,
'soap_ns': soap_ns, 'soap_uri': soap_uri}
response = SimpleXMLElement(xml, namespace=self.namespace,
prefix=prefix)
response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
body = response.add_child("%s:Body" % soap_ns, ns=False)
if fault:
# generate a Soap Fault (with the python exception)
body.marshall("%s:Fault" % soap_ns, fault, ns=False)
else:
# return normal value
res = body.add_child("%sResponse" % name, ns=prefix)
if not prefix:
res['xmlns'] = self.namespace # add target namespace
# serialize returned values (response) if type definition available
if returns_types:
if not isinstance(ret, dict):
res.marshall(returns_types.keys()[0], ret, )
else:
for k,v in ret.items():
res.marshall(k, v)
elif returns_types is None:
# merge xmlelement returned
res.import_node(ret)
return response.as_xml()
# Introspection functions:
def list_methods(self):
"Return a list of aregistered operations"
return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()]
def help(self, method=None):
"Generate sample request and response messages"
(function, returns, args, doc) = self.methods[method]
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)s xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method':method, 'namespace':self.namespace}
request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if args:
items = args.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k,v in items:
request(method).marshall(k, v, add_comments=True, ns=False)
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)sResponse xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method':method, 'namespace':self.namespace}
response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if returns:
items = returns.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k,v in items:
response('%sResponse'%method).marshall(k, v, add_comments=True, ns=False)
return request.as_xml(pretty=True), response.as_xml(pretty=True), doc
def wsdl(self):
"Generate Web Service Description v1.1"
xml = """<?xml version="1.0"?>
<wsdl:definitions name="%(name)s"
targetNamespace="%(namespace)s"
xmlns:tns="%(namespace)s"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<wsdl:documentation xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">%(documentation)s</wsdl:documentation>
<wsdl:types>
<xsd:schema targetNamespace="%(namespace)s"
elementFormDefault="qualified"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</xsd:schema>
</wsdl:types>
</wsdl:definitions>
""" % {'namespace': self.namespace, 'name': self.name, 'documentation': self.documentation}
wsdl = SimpleXMLElement(xml)
for method, (function, returns, args, doc) in self.methods.items():
# create elements:
def parse_element(name, values, array=False, complex=False):
if not complex:
element = wsdl('wsdl:types')('xsd:schema').add_child('xsd:element')
complex = element.add_child("xsd:complexType")
else:
complex = wsdl('wsdl:types')('xsd:schema').add_child('xsd:complexType')
element = complex
element['name'] = name
if values:
items = values
elif values is None:
items = [('value', None)]
else:
items = []
if not array and items:
all = complex.add_child("xsd:all")
elif items:
all = complex.add_child("xsd:sequence")
for k,v in items:
e = all.add_child("xsd:element")
e['name'] = k
if array:
e[:]={'minOccurs': "0", 'maxOccurs': "unbounded"}
if v in TYPE_MAP.keys():
t='xsd:%s' % TYPE_MAP[v]
elif v is None:
t='xsd:anyType'
elif isinstance(v, list):
n="ArrayOf%s%s" % (name, k)
l = []
for d in v:
l.extend(d.items())
parse_element(n, l, array=True, complex=True)
t = "tns:%s" % n
elif isinstance(v, dict):
n="%s%s" % (name, k)
parse_element(n, v.items(), complex=True)
t = "tns:%s" % n
e.add_attribute('type', t)
parse_element("%s" % method, args and args.items())
parse_element("%sResponse" % method, returns and returns.items())
# create messages:
for m,e in ('Input',''), ('Output','Response'):
message = wsdl.add_child('wsdl:message')
message['name'] = "%s%s" % (method, m)
part = message.add_child("wsdl:part")
part[:] = {'name': 'parameters',
'element': 'tns:%s%s' % (method,e)}
# create ports
portType = wsdl.add_child('wsdl:portType')
portType['name'] = "%sPortType" % self.name
for method, (function, returns, args, doc) in self.methods.items():
op = portType.add_child('wsdl:operation')
op['name'] = method
if doc:
op.add_child("wsdl:documentation", doc)
input = op.add_child("wsdl:input")
input['message'] = "tns:%sInput" % method
output = op.add_child("wsdl:output")
output['message'] = "tns:%sOutput" % method
# create bindings
binding = wsdl.add_child('wsdl:binding')
binding['name'] = "%sBinding" % self.name
binding['type'] = "tns:%sPortType" % self.name
soapbinding = binding.add_child('soap:binding')
soapbinding['style'] = "document"
soapbinding['transport'] = "http://schemas.xmlsoap.org/soap/http"
for method in self.methods.keys():
op = binding.add_child('wsdl:operation')
op['name'] = method
soapop = op.add_child('soap:operation')
soapop['soapAction'] = self.action + method
soapop['style'] = 'document'
input = op.add_child("wsdl:input")
##input.add_attribute('name', "%sInput" % method)
soapbody = input.add_child("soap:body")
soapbody["use"] = "literal"
output = op.add_child("wsdl:output")
##output.add_attribute('name', "%sOutput" % method)
soapbody = output.add_child("soap:body")
soapbody["use"] = "literal"
service = wsdl.add_child('wsdl:service')
service["name"] = "%sService" % self.name
service.add_child('wsdl:documentation', text=self.documentation)
port=service.add_child('wsdl:port')
port["name"] = "%s" % self.name
port["binding"] = "tns:%sBinding" % self.name
soapaddress = port.add_child('soap:address')
soapaddress["location"] = self.location
return wsdl.as_xml(pretty=True)
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class SOAPHandler(BaseHTTPRequestHandler):
def do_GET(self):
"User viewable help information and wsdl"
args = self.path[1:].split("?")
print "serving", args
if self.path != "/" and args[0] not in self.server.dispatcher.methods.keys():
self.send_error(404, "Method not found: %s" % args[0])
else:
if self.path == "/":
# return wsdl if no method supplied
response = self.server.dispatcher.wsdl()
else:
# return supplied method help (?request or ?response messages)
req, res, doc = self.server.dispatcher.help(args[0])
if len(args)==1 or args[1]=="request":
response = req
else:
response = res
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(response)
def do_POST(self):
"SOAP POST gateway"
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
request = self.rfile.read(int(self.headers.getheader('content-length')))
response = self.server.dispatcher.dispatch(request)
self.wfile.write(response)
if __name__=="__main__":
import sys
dispatcher = SoapDispatcher(
name = "PySimpleSoapSample",
location = "http://localhost:8008/",
action = 'http://localhost:8008/', # SOAPAction
namespace = "http://example.com/pysimplesoapsamle/", prefix="ns0",
documentation = 'Example soap service using PySimpleSoap',
trace = True,
ns = True)
def adder(p,c, dt=None):
"Add several values"
print c[0]['d'],c[1]['d'],
import datetime
dt = dt + datetime.timedelta(365)
return {'ab': p['a']+p['b'], 'dd': c[0]['d']+c[1]['d'], 'dt': dt}
def dummy(in0):
"Just return input"
return in0
def echo(request):
"Copy request->response (generic, any type)"
return request.value
dispatcher.register_function('Adder', adder,
returns={'AddResult': {'ab': int, 'dd': str } },
args={'p': {'a': int,'b': int}, 'dt': Date, 'c': [{'d': Decimal}]})
dispatcher.register_function('Dummy', dummy,
returns={'out0': str},
args={'in0': str})
dispatcher.register_function('Echo', echo)
if '--local' in sys.argv:
wsdl=dispatcher.wsdl()
print wsdl
testfile = open("C:/test.wsdl","w")
try:
testfile.write(wsdl)
finally:
testfile.close()
# dummy local test (clasic soap dialect)
xml = """<?xml version="1.0" encoding="UTF-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<Adder xmlns="http://example.com/sample.wsdl">
<p><a>1</a><b>2</b></p><c><d>5000000.1</d><d>.2</d></c><dt>20100724</dt>
</Adder>
</soap:Body>
</soap:Envelope>"""
print dispatcher.dispatch(xml)
# dummy local test (modern soap dialect, SoapUI)
xml = """
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:pys="http://example.com/pysimplesoapsamle/">
<soapenv:Header/>
<soapenv:Body>
<pys:Adder>
<pys:p><pys:a>9</pys:a><pys:b>3</pys:b></pys:p>
<pys:dt>19690720<!--1969-07-20T21:28:00--></pys:dt>
<pys:c><pys:d>10.001</pys:d><pys:d>5.02</pys:d></pys:c>
</pys:Adder>
</soapenv:Body>
</soapenv:Envelope>
"""
print dispatcher.dispatch(xml)
# echo local test (generic soap service)
xml = """<?xml version="1.0" encoding="UTF-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body>
<Echo xmlns="http://example.com/sample.wsdl">
<value xsi:type="xsd:string">Hello world</value>
</Echo>
</soap:Body>
</soap:Envelope>"""
print dispatcher.dispatch(xml)
for method, doc in dispatcher.list_methods():
request, response, doc = dispatcher.help(method)
##print request
##print response
if '--serve' in sys.argv:
print "Starting server..."
httpd = HTTPServer(("", 8008), SOAPHandler)
httpd.dispatcher = dispatcher
httpd.serve_forever()
if '--consume' in sys.argv:
from client import SoapClient
client = SoapClient(
location = "http://localhost:8008/",
action = 'http://localhost:8008/', # SOAPAction
namespace = "http://example.com/sample.wsdl",
soap_ns='soap',
trace = True,
ns = False)
response = client.Adder(p={'a':1,'b':2},dt='20100724',c=[{'d':'1.20'},{'d':'2.01'}])
result = response.AddResult
print int(result.ab)
print str(result.dd)
| SEA000/uw-empathica | empathica/gluon/contrib/pysimplesoap/server.py | Python | mit | 17,610 | 0.005849 |
from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
analog_devices = SchLib(tool=SKIDL).add_parts(*[
Part(name='AD623AN',dest=TEMPLATE,tool=SKIDL,keywords='ad623 instumentation amplifier dip-8',description='Single Supply, Rail to Rail, Instumentation Amplifier, RoHS, DIP-8',ref_prefix='U',num_units=1,fplist=['DIP-8*'],do_erc=True,aliases=['AD623BN', 'AD623ANZ', 'AD623BNZ'],pins=[
Pin(num='1',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='-',do_erc=True),
Pin(num='3',name='+',do_erc=True),
Pin(num='4',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Rg',func=Pin.PASSIVE,do_erc=True)]),
Part(name='AD623AR',dest=TEMPLATE,tool=SKIDL,keywords='ad623 instumentation amplifier soic-8',description='Single Supply, Rail to Rail, Instumentation Amplifier, RoHS, SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC-8*'],do_erc=True,aliases=['AD623ARZ', 'AD623BR', 'AD623BRZ'],pins=[
Pin(num='1',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='-',do_erc=True),
Pin(num='3',name='+',do_erc=True),
Pin(num='4',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Rg',func=Pin.PASSIVE,do_erc=True)]),
Part(name='AD623ARM',dest=TEMPLATE,tool=SKIDL,keywords='ad623 instumentation amplifier msop-8',description='Single Supply, Rail to Rail, Instumentation Amplifier, RoHS, MSOP-8',ref_prefix='U',num_units=1,fplist=['MSOP-8*'],do_erc=True,aliases=['AD623ARMZ'],pins=[
Pin(num='1',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='2',name='-',do_erc=True),
Pin(num='3',name='+',do_erc=True),
Pin(num='4',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='Vs+',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='Rg',func=Pin.PASSIVE,do_erc=True)]),
Part(name='AD8422ARMZ',dest=TEMPLATE,tool=SKIDL,keywords='ad8422 instumentation amplifier msop-8',description='Low Power, Rail to Rail, Instumentation Amplifier, MSOP-8',ref_prefix='U',num_units=1,fplist=['MSOP-8*'],do_erc=True,aliases=['AD8422BRMZ', 'AD8421ARMZ', 'AD8421BRMZ', 'AD8236ARMZ'],pins=[
Pin(num='1',name='-',do_erc=True),
Pin(num='2',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='+',do_erc=True),
Pin(num='5',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='Vs+',func=Pin.PWRIN,do_erc=True)]),
Part(name='AD8422ARZ',dest=TEMPLATE,tool=SKIDL,keywords='ad8429 instumentation amplifier soic-8',description='Low Noise, Instumentation Amplifier, SOIC-8',ref_prefix='U',num_units=1,fplist=['SOIC-8*'],do_erc=True,aliases=['AD8422BRZ', 'AD8421ARZ', 'AD8421BRZ', 'AD8429ARZ', 'AD8429BRZ'],pins=[
Pin(num='1',name='-',do_erc=True),
Pin(num='2',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='Rg',func=Pin.PASSIVE,do_erc=True),
Pin(num='4',name='+',do_erc=True),
Pin(num='5',name='Vs-',func=Pin.PWRIN,do_erc=True),
Pin(num='6',name='Ref',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='~',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='Vs+',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADE7758',dest=TEMPLATE,tool=SKIDL,keywords='Energy Metering',description='Poly Phase Multifunction Energy Metering, SO-24',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x15.4mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='APCF',func=Pin.OUTPUT,do_erc=True),
Pin(num='2',name='DGND',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='DVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='AVDD',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='IAP',do_erc=True),
Pin(num='6',name='IAN',do_erc=True),
Pin(num='7',name='IBP',do_erc=True),
Pin(num='8',name='IBN',do_erc=True),
Pin(num='9',name='ICP',do_erc=True),
Pin(num='10',name='ICN',do_erc=True),
Pin(num='20',name='CLKOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='11',name='AGND',func=Pin.PWRIN,do_erc=True),
Pin(num='21',name='~CS',do_erc=True),
Pin(num='12',name='REFIN/OUT',do_erc=True),
Pin(num='22',name='DIN',do_erc=True),
Pin(num='13',name='VN',func=Pin.PWRIN,do_erc=True),
Pin(num='23',name='SCLK',do_erc=True),
Pin(num='14',name='VCP',do_erc=True),
Pin(num='24',name='DOUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='VBP',do_erc=True),
Pin(num='16',name='VAP',do_erc=True),
Pin(num='17',name='VARCF',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='~IRQ',func=Pin.OPENCOLL,do_erc=True),
Pin(num='19',name='CLKIN',do_erc=True)]),
Part(name='ADM2484E',dest=TEMPLATE,tool=SKIDL,keywords='RS485 Transceiver RS422 Transceiver',description='Isolated RS485/RS422 Transceiver, Half-/Full-Duplex, 500kbps,SOIC-16W',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x10.3mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='VDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='RxD',func=Pin.OUTPUT,do_erc=True),
Pin(num='4',name='~RE',do_erc=True),
Pin(num='5',name='DE',do_erc=True),
Pin(num='6',name='TxD',do_erc=True),
Pin(num='7',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='8',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='11',name='Y',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='Z',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='B',do_erc=True),
Pin(num='14',name='A',do_erc=True),
Pin(num='15',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='VDD2',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADM2587E',dest=TEMPLATE,tool=SKIDL,keywords='RS485 Transciever,RS422 Transciever',description='Isolated RS485/RS422 Transciever,Integrated Isolated DC-DC Converter, 500kbps,SO-20',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x12.8mm*Pitch1.27mm*'],do_erc=True,aliases=['ADM2582E'],pins=[
Pin(num='1',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='4',name='RxD',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='~RE',do_erc=True),
Pin(num='6',name='DE',do_erc=True),
Pin(num='7',name='TxD',do_erc=True),
Pin(num='8',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='Visoout',func=Pin.PWROUT,do_erc=True),
Pin(num='13',name='Y',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='15',name='Z',func=Pin.OUTPUT,do_erc=True),
Pin(num='16',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='17',name='B',do_erc=True),
Pin(num='18',name='A',do_erc=True),
Pin(num='19',name='Visoin',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADuM1410',dest=TEMPLATE,tool=SKIDL,keywords='Digital Isolator',description='Quad Channel Digital Isolator,10Mbps,SO-16',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x10.3mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='VDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='VIA',do_erc=True),
Pin(num='4',name='VIB',do_erc=True),
Pin(num='5',name='VIC',do_erc=True),
Pin(num='6',name='VID',do_erc=True),
Pin(num='7',name='DISABLE',do_erc=True),
Pin(num='8',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='CTRL2',do_erc=True),
Pin(num='11',name='VOD',func=Pin.OUTPUT,do_erc=True),
Pin(num='12',name='VOC',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='VOB',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='VOA',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='VDD2',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADuM1411',dest=TEMPLATE,tool=SKIDL,keywords='Digital Isolator',description='Quad Channel Digital Isolator,10Mbps,SO-16',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x10.3mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='VDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='VIA',do_erc=True),
Pin(num='4',name='VIB',do_erc=True),
Pin(num='5',name='VIC',do_erc=True),
Pin(num='6',name='VOD',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='CTRL1',do_erc=True),
Pin(num='8',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='CTRL2',do_erc=True),
Pin(num='11',name='VID',do_erc=True),
Pin(num='12',name='VOC',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='VOB',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='VOA',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='VDD2',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADuM1412',dest=TEMPLATE,tool=SKIDL,keywords='Digital Isolator',description='Quad Channel Digital Isolator,10Mbps,SO-16',ref_prefix='U',num_units=1,fplist=['SOIC*7.5x10.3mm*Pitch1.27mm*'],do_erc=True,pins=[
Pin(num='1',name='VDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='VIA',do_erc=True),
Pin(num='4',name='VIB',do_erc=True),
Pin(num='5',name='VOC',func=Pin.OUTPUT,do_erc=True),
Pin(num='6',name='VOD',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='CTRL1',do_erc=True),
Pin(num='8',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='10',name='CTRL2',do_erc=True),
Pin(num='11',name='VID',do_erc=True),
Pin(num='12',name='VIC',do_erc=True),
Pin(num='13',name='VOB',func=Pin.OUTPUT,do_erc=True),
Pin(num='14',name='VOA',func=Pin.OUTPUT,do_erc=True),
Pin(num='15',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='16',name='VDD2',func=Pin.PWRIN,do_erc=True)]),
Part(name='ADuM6000',dest=TEMPLATE,tool=SKIDL,keywords='Isolated DC-to-DC Converter 5kV',description='Isolated 5 kV DC-to-DC Converter',ref_prefix='U',num_units=1,fplist=['SO-16-W*'],do_erc=True,pins=[
Pin(num='1',name='VDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='4',name='RC_IN',do_erc=True),
Pin(num='5',name='RC_OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='6',name='RC_SEL',do_erc=True),
Pin(num='7',name='VDD1',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='9',name='GND_ISO',func=Pin.PWROUT,do_erc=True),
Pin(num='10',name='V_ISO',func=Pin.PASSIVE,do_erc=True),
Pin(num='11',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='12',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='13',name='V_SEL',do_erc=True),
Pin(num='14',name='NC',func=Pin.NOCONNECT,do_erc=True),
Pin(num='15',name='GND_ISO',func=Pin.PASSIVE,do_erc=True),
Pin(num='16',name='V_ISO',func=Pin.PWROUT,do_erc=True)]),
Part(name='ADuM7643',dest=TEMPLATE,tool=SKIDL,keywords='6Ch Quad Digital Isolator 25Mbps',description='Low Power Six-Channel 3/3 Digital Isolator, 25Mbps 6ns, Fail-Safe High, QSOP20',ref_prefix='U',num_units=1,fplist=['QSOP*'],do_erc=True,pins=[
Pin(num='1',name='VDD1A',func=Pin.PWRIN,do_erc=True),
Pin(num='2',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='3',name='INA',do_erc=True),
Pin(num='4',name='INB',do_erc=True),
Pin(num='5',name='OUTC',func=Pin.OUTPUT,do_erc=True),
Pin(num='6',name='OUTD',func=Pin.OUTPUT,do_erc=True),
Pin(num='7',name='VDD1B',func=Pin.PWRIN,do_erc=True),
Pin(num='8',name='OUTE',func=Pin.OUTPUT,do_erc=True),
Pin(num='9',name='INF',do_erc=True),
Pin(num='10',name='GND1',func=Pin.PWRIN,do_erc=True),
Pin(num='20',name='VDD2A',func=Pin.PWRIN,do_erc=True),
Pin(num='11',name='GND2',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='OUTF',func=Pin.OUTPUT,do_erc=True),
Pin(num='13',name='INE',do_erc=True),
Pin(num='14',name='VDD2B',do_erc=True),
Pin(num='15',name='IND',do_erc=True),
Pin(num='16',name='INC',do_erc=True),
Pin(num='17',name='OUTB',func=Pin.OUTPUT,do_erc=True),
Pin(num='18',name='OUTA',func=Pin.OUTPUT,do_erc=True),
Pin(num='19',name='GND2',func=Pin.PWRIN,do_erc=True)])])
| xesscorp/skidl | skidl/libs/analog_devices_sklib.py | Python | mit | 14,637 | 0.042768 |
"""Support for HomematicIP Cloud climate devices."""
import logging
from typing import Any, Dict, List, Optional, Union
from homematicip.aio.device import AsyncHeatingThermostat, AsyncHeatingThermostatCompact
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.base.enums import AbsenceType
from homematicip.device import Switch
from homematicip.functionalHomes import IndoorClimateHome
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
HEATING_PROFILES = {"PROFILE_1": 0, "PROFILE_2": 1, "PROFILE_3": 2}
COOLING_PROFILES = {"PROFILE_4": 3, "PROFILE_5": 4, "PROFILE_6": 5}
_LOGGER = logging.getLogger(__name__)
ATTR_PRESET_END_TIME = "preset_end_time"
PERMANENT_END_TIME = "permanent"
HMIP_AUTOMATIC_CM = "AUTOMATIC"
HMIP_MANUAL_CM = "MANUAL"
HMIP_ECO_CM = "ECO"
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP climate from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.groups:
if isinstance(device, AsyncHeatingGroup):
entities.append(HomematicipHeatingGroup(hap, device))
if entities:
async_add_entities(entities)
class HomematicipHeatingGroup(HomematicipGenericEntity, ClimateEntity):
"""Representation of the HomematicIP heating group.
Heat mode is supported for all heating devices incl. their defined profiles.
Boost is available for radiator thermostats only.
Cool mode is only available for floor heating systems, if basically enabled in the hmip app.
"""
def __init__(self, hap: HomematicipHAP, device: AsyncHeatingGroup) -> None:
"""Initialize heating group."""
device.modelType = "HmIP-Heating-Group"
super().__init__(hap, device)
self._simple_heating = None
if device.actualTemperature is None:
self._simple_heating = self._first_radiator_thermostat
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
return {
"identifiers": {(HMIPC_DOMAIN, self._device.id)},
"name": self._device.label,
"manufacturer": "eQ-3",
"model": self._device.modelType,
"via_device": (HMIPC_DOMAIN, self._device.homeId),
}
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._device.setPointTemperature
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
if self._simple_heating:
return self._simple_heating.valveActualTemperature
return self._device.actualTemperature
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie."""
if self._disabled_by_cooling_mode and not self._has_switch:
return HVAC_MODE_OFF
if self._device.boostMode:
return HVAC_MODE_HEAT
if self._device.controlMode == HMIP_MANUAL_CM:
return HVAC_MODE_HEAT if self._heat_mode_enabled else HVAC_MODE_COOL
return HVAC_MODE_AUTO
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
if self._disabled_by_cooling_mode and not self._has_switch:
return [HVAC_MODE_OFF]
return (
[HVAC_MODE_AUTO, HVAC_MODE_HEAT]
if self._heat_mode_enabled
else [HVAC_MODE_AUTO, HVAC_MODE_COOL]
)
@property
def hvac_action(self) -> Optional[str]:
"""
Return the current hvac_action.
This is only relevant for radiator thermostats.
"""
if (
self._device.floorHeatingMode == "RADIATOR"
and self._has_radiator_thermostat
and self._heat_mode_enabled
):
return (
CURRENT_HVAC_HEAT if self._device.valvePosition else CURRENT_HVAC_IDLE
)
return None
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode."""
if self._device.boostMode:
return PRESET_BOOST
if self.hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF):
return PRESET_NONE
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType == AbsenceType.VACATION:
return PRESET_AWAY
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.PERMANENT,
]:
return PRESET_ECO
return (
self._device.activeProfile.name
if self._device.activeProfile.name in self._device_profile_names
else None
)
@property
def preset_modes(self) -> List[str]:
"""Return a list of available preset modes incl. hmip profiles."""
# Boost is only available if a radiator thermostat is in the room,
# and heat mode is enabled.
profile_names = self._device_profile_names
presets = []
if (
self._heat_mode_enabled and self._has_radiator_thermostat
) or self._has_switch:
if not profile_names:
presets.append(PRESET_NONE)
presets.append(PRESET_BOOST)
presets.extend(profile_names)
return presets
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._device.minTemperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._device.maxTemperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if self.min_temp <= temperature <= self.max_temp:
await self._device.set_point_temperature(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
return
if hvac_mode == HVAC_MODE_AUTO:
await self._device.set_control_mode(HMIP_AUTOMATIC_CM)
else:
await self._device.set_control_mode(HMIP_MANUAL_CM)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
return
if self._device.boostMode and preset_mode != PRESET_BOOST:
await self._device.set_boost(False)
if preset_mode == PRESET_BOOST:
await self._device.set_boost()
if preset_mode in self._device_profile_names:
profile_idx = self._get_profile_idx_by_name(preset_mode)
if self._device.controlMode != HMIP_AUTOMATIC_CM:
await self.async_set_hvac_mode(HVAC_MODE_AUTO)
await self._device.set_active_profile(profile_idx)
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the access point."""
state_attr = super().device_state_attributes
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.VACATION,
]:
state_attr[ATTR_PRESET_END_TIME] = self._indoor_climate.absenceEndTime
elif self._indoor_climate.absenceType == AbsenceType.PERMANENT:
state_attr[ATTR_PRESET_END_TIME] = PERMANENT_END_TIME
return state_attr
@property
def _indoor_climate(self) -> IndoorClimateHome:
"""Return the hmip indoor climate functional home of this group."""
return self._home.get_functionalHome(IndoorClimateHome)
@property
def _device_profiles(self) -> List[str]:
"""Return the relevant profiles."""
return [
profile
for profile in self._device.profiles
if profile.visible
and profile.name != ""
and profile.index in self._relevant_profile_group
]
@property
def _device_profile_names(self) -> List[str]:
"""Return a collection of profile names."""
return [profile.name for profile in self._device_profiles]
def _get_profile_idx_by_name(self, profile_name: str) -> int:
"""Return a profile index by name."""
relevant_index = self._relevant_profile_group
index_name = [
profile.index
for profile in self._device_profiles
if profile.name == profile_name
]
return relevant_index[index_name[0]]
@property
def _heat_mode_enabled(self) -> bool:
"""Return, if heating mode is enabled."""
return not self._device.cooling
@property
def _disabled_by_cooling_mode(self) -> bool:
"""Return, if group is disabled by the cooling mode."""
return self._device.cooling and (
self._device.coolingIgnored or not self._device.coolingAllowed
)
@property
def _relevant_profile_group(self) -> List[str]:
"""Return the relevant profile groups."""
if self._disabled_by_cooling_mode:
return []
return HEATING_PROFILES if self._heat_mode_enabled else COOLING_PROFILES
@property
def _has_switch(self) -> bool:
"""Return, if a switch is in the hmip heating group."""
for device in self._device.devices:
if isinstance(device, Switch):
return True
return False
@property
def _has_radiator_thermostat(self) -> bool:
"""Return, if a radiator thermostat is in the hmip heating group."""
return bool(self._first_radiator_thermostat)
@property
def _first_radiator_thermostat(
self,
) -> Optional[Union[AsyncHeatingThermostat, AsyncHeatingThermostatCompact]]:
"""Return the first radiator thermostat from the hmip heating group."""
for device in self._device.devices:
if isinstance(
device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)
):
return device
return None
| tchellomello/home-assistant | homeassistant/components/homematicip_cloud/climate.py | Python | apache-2.0 | 11,473 | 0.000697 |
from django.db.models.sql import compiler
from datetime import datetime
import re
from django.db.models.base import Model
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
index_start = len(self.query.extra_select.keys())
values = [self.query.convert_values(v, None, connection=self.connection) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
return tuple(values)
"""
use django as_sql with editing limit
"""
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark != 0)
if not do_offset:
return super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
select_re = re.compile('^SELECT[ ]+(DISTINCT\s)?')
query, params = super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
m = select_re.match(query)
if with_limits and m != None:
num = None
insert = None
if self.query.high_mark is not None:
num = self.query.high_mark - self.query.low_mark
if num <= 0:
return None, None
insert = 'TOP %d' % num
if insert is not None:
if m.groups()[0] != None:
query = select_re.sub('SELECT DISTINCT %s ' % insert, query)
else:
query = select_re.sub('SELECT %s ' % insert, query)
return query, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql_legacy(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
returns_id = bool(self.return_id and
self.connection.features.can_return_id_from_insert)
if returns_id:
result = ['SET NOCOUNT ON']
else:
result = []
result.append('INSERT INTO %s' % qn(opts.db_table))
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
if returns_id:
result.append(';\nSELECT SCOPE_IDENTITY()')
params = self.query.params
sql = ' '.join(result)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
if returns_id:
sql = "SET NOCOUNT ON"
else:
sql = ""
if len(self.query.columns) == 1 and not params:
sql += "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql += "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
if returns_id:
sql += '\n;SELECT SCOPE_IDENTITY()'
return sql, params
def as_sql(self):
from django.db.models.fields import DateTimeField
from django.db.models.fields import DateField
"""
using django as_sql()
with exclude Datetime field with None value
which is nullable
"""
# return super(SQLInsertCompiler, self).as_sql()
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
preset_fields = self.query.fields if has_fields else [opts.pk]
fields = []
if len(self.query.objs) == 1:
# check only one row insert
# multi-row pass
# so multi-row rows will crash
for field in preset_fields:
# if not isinstance(field, (DateField, DateTimeField)):
# fields.append(field)
if field.get_db_prep_save(
getattr(self.query.objs[0], field.attname) if self.query.raw else field.pre_save(self.query.objs[0], True), connection=self.connection) is not None:
fields.append(field)
elif field.blank is not True:
fields.append(field)
else:
fields = preset_fields
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
def as_sql(self):
"""
Copy of django UpdateCommpiler as_sql
need cheack datetime field
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
| VanyaDNDZ/django-sybase-backend | sqlsybase_server/pyodbc/compiler.py | Python | unlicense | 10,019 | 0.003394 |
problem = """
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zeros.)
"""
def is_palindromic(s):
return s[:len(s)/2] == s[:(len(s)-1)/2:-1]
def decimal2binary(num):
x = ''
while num > 0:
x = str(num % 2) + x
num /= 2
return x
double_base_palindromes = set()
for num in range(1000):
p1 = int(str(num) + str(num)[-2::-1])
p2 = int(str(num) + str(num)[::-1])
if is_palindromic(decimal2binary(p1)):
double_base_palindromes.add(p1)
if is_palindromic(decimal2binary(p2)):
double_base_palindromes.add(p2)
print sum(double_base_palindromes)
| lorenyu/project-euler | problem-036.py | Python | mit | 813 | 0.00615 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_items(price_list, sales_or_purchase, item=None, item_group=None):
condition = ""
args = {"price_list": price_list}
if sales_or_purchase == "Sales":
condition = "i.is_sales_item='Yes'"
else:
condition = "i.is_purchase_item='Yes'"
if item_group and item_group != "All Item Groups":
condition += " and i.item_group='%s'" % item_group.replace("'", "\'")
if item:
condition += " and CONCAT(i.name, i.item_name) like %(name)s"
args["name"] = "%%%s%%" % item
return frappe.db.sql("""select i.name, i.item_name, i.image,
item_det.price_list_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, price_list_rate, currency from
`tabItem Price` where price_list=%s) item_det
ON
item_det.item_code=i.name
where
%s""" % ('%(price_list)s', condition), args, as_dict=1)
@frappe.whitelist()
def get_item_code(barcode_serial_no):
input_via = "serial_no"
item_code = frappe.db.sql("""select name, item_code from `tabSerial No` where
name=%s""", (barcode_serial_no), as_dict=1)
if not item_code:
input_via = "barcode"
item_code = frappe.db.sql("""select name from `tabItem` where barcode=%s""",
(barcode_serial_no), as_dict=1)
if item_code:
return item_code, input_via
else:
frappe.throw(frappe._("Invalid Barcode or Serial No"))
@frappe.whitelist()
def get_mode_of_payment():
return frappe.get_list("Mode of Payment")
| suyashphadtare/vestasi-erp-1 | erpnext/erpnext/accounts/doctype/sales_invoice/pos.py | Python | agpl-3.0 | 1,595 | 0.022571 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2019 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Samy Bucher <samy.bucher@outlook.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models
from odoo import fields
class Correspondence(models.Model):
_inherit = 'correspondence'
gift_id = fields.Many2one('sponsorship.gift', 'Gift')
| ecino/compassion-switzerland | sponsorship_switzerland/models/correspondence.py | Python | agpl-3.0 | 583 | 0 |
from .evaluate_all import main
if __name__ == "__main__":
main()
| undertherain/vsmlib | vsmlib/benchmarks/__main__.py | Python | apache-2.0 | 70 | 0 |
import json
import os
import os.path as opath
import shutil
import subprocess
from codegen.datatypes import build_datatype_py, write_datatype_py
from codegen.compatibility import (
write_deprecated_datatypes,
write_graph_objs_graph_objs,
DEPRECATED_DATATYPES,
)
from codegen.figure import write_figure_classes
from codegen.utils import (
TraceNode,
PlotlyNode,
LayoutNode,
FrameNode,
write_init_py,
ElementDefaultsNode,
build_from_imports_py,
)
from codegen.validators import (
write_validator_py,
write_data_validator_py,
get_data_validator_instance,
)
# Import notes
# ------------
# Nothing from the plotly/ package should be imported during code
# generation. This introduces a lot of complexity regarding when imports
# happen relative to when various stages of code generation occur. Instead,
# helpers that are only needed during code generation should reside in the
# codegen/ package, and helpers used both during code generation and at
# runtime should reside in the _plotly_utils/ package.
# ----------------------------------------------------------------------------
def preprocess_schema(plotly_schema):
"""
Central location to make changes to schema before it's seen by the
PlotlyNode classes
"""
# Update template
# ---------------
layout = plotly_schema["layout"]["layoutAttributes"]
# Create codegen-friendly template scheme
template = {
"data": {
trace + "s": {"items": {trace: {}}, "role": "object"}
for trace in plotly_schema["traces"]
},
"layout": {},
"description": """\
Default attributes to be applied to the plot.
This should be a dict with format: `{'layout': layoutTemplate, 'data':
{trace_type: [traceTemplate, ...], ...}}` where `layoutTemplate` is a dict
matching the structure of `figure.layout` and `traceTemplate` is a dict
matching the structure of the trace with type `trace_type` (e.g. 'scatter').
Alternatively, this may be specified as an instance of
plotly.graph_objs.layout.Template.
Trace templates are applied cyclically to
traces of each type. Container arrays (eg `annotations`) have special
handling: An object ending in `defaults` (eg `annotationdefaults`) is
applied to each array item. But if an item has a `templateitemname`
key we look in the template array for an item with matching `name` and
apply that instead. If no matching `name` is found we mark the item
invisible. Any named template item not referenced is appended to the
end of the array, so this can be used to add a watermark annotation or a
logo image, for example. To omit one of these items on the plot, make
an item with matching `templateitemname` and `visible: false`.""",
}
layout["template"] = template
# Rename concentrationscales to colorscale to match conventions
items = plotly_schema["traces"]["sankey"]["attributes"]["link"]["colorscales"][
"items"
]
if "concentrationscales" in items:
items["colorscale"] = items.pop("concentrationscales")
def perform_codegen():
# Set root codegen output directory
# ---------------------------------
# (relative to project root)
abs_file_path = opath.realpath(__file__)
packages_py = opath.dirname(opath.dirname(opath.dirname(abs_file_path)))
outdir = opath.join(packages_py, "plotly", "plotly")
# Delete prior codegen output
# ---------------------------
validators_pkgdir = opath.join(outdir, "validators")
if opath.exists(validators_pkgdir):
shutil.rmtree(validators_pkgdir)
graph_objs_pkgdir = opath.join(outdir, "graph_objs")
if opath.exists(graph_objs_pkgdir):
shutil.rmtree(graph_objs_pkgdir)
# plotly/datatypes is not used anymore, but was at one point so we'll
# still delete it if we find it in case a developer is upgrading from an
# older version
datatypes_pkgdir = opath.join(outdir, "datatypes")
if opath.exists(datatypes_pkgdir):
shutil.rmtree(datatypes_pkgdir)
# Load plotly schema
# ------------------
plot_schema_path = opath.join(
packages_py, "plotly", "codegen", "resources", "plot-schema.json"
)
with open(plot_schema_path, "r") as f:
plotly_schema = json.load(f)
# Preprocess Schema
# -----------------
preprocess_schema(plotly_schema)
# Build node lists
# ----------------
# ### TraceNode ###
base_traces_node = TraceNode(plotly_schema)
compound_trace_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, TraceNode
)
all_trace_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, TraceNode)
# ### LayoutNode ###
compound_layout_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, LayoutNode
)
layout_node = compound_layout_nodes[0]
all_layout_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, LayoutNode)
subplot_nodes = [
node
for node in layout_node.child_compound_datatypes
if node.node_data.get("_isSubplotObj", False)
]
layout_array_nodes = [
node
for node in layout_node.child_compound_datatypes
if node.is_array_element and node.has_child("xref") and node.has_child("yref")
]
# ### FrameNode ###
compound_frame_nodes = PlotlyNode.get_all_compound_datatype_nodes(
plotly_schema, FrameNode
)
frame_node = compound_frame_nodes[0]
all_frame_nodes = PlotlyNode.get_all_datatype_nodes(plotly_schema, FrameNode)
# ### All nodes ###
all_datatype_nodes = all_trace_nodes + all_layout_nodes + all_frame_nodes
all_compound_nodes = [
node
for node in all_datatype_nodes
if node.is_compound and not isinstance(node, ElementDefaultsNode)
]
# Write out validators
# --------------------
# # ### Layout ###
for node in all_layout_nodes:
write_validator_py(outdir, node)
# ### Trace ###
for node in all_trace_nodes:
write_validator_py(outdir, node)
# ### Frames ###
for node in all_frame_nodes:
write_validator_py(outdir, node)
# ### Data (traces) validator ###
write_data_validator_py(outdir, base_traces_node)
# Alls
# ----
alls = {}
# Write out datatypes
# -------------------
for node in all_compound_nodes:
write_datatype_py(outdir, node)
# ### Deprecated ###
# These are deprecated legacy datatypes like graph_objs.Marker
write_deprecated_datatypes(outdir)
# Write figure class to graph_objs
# --------------------------------
data_validator = get_data_validator_instance(base_traces_node)
layout_validator = layout_node.get_validator_instance()
frame_validator = frame_node.get_validator_instance()
write_figure_classes(
outdir,
base_traces_node,
data_validator,
layout_validator,
frame_validator,
subplot_nodes,
layout_array_nodes,
)
# Write validator __init__.py files
# ---------------------------------
# ### Write __init__.py files for each validator package ###
validator_rel_class_imports = {}
for node in all_datatype_nodes:
if node.is_mapped:
continue
key = node.parent_path_parts
validator_rel_class_imports.setdefault(key, []).append(
f"._{node.name_property}.{node.name_validator_class}"
)
# Add Data validator
root_validator_pairs = validator_rel_class_imports[()]
root_validator_pairs.append("._data.DataValidator")
# Output validator __init__.py files
validators_pkg = opath.join(outdir, "validators")
for path_parts, rel_classes in validator_rel_class_imports.items():
write_init_py(validators_pkg, path_parts, [], rel_classes)
# Write datatype __init__.py files
# --------------------------------
datatype_rel_class_imports = {}
datatype_rel_module_imports = {}
for node in all_compound_nodes:
key = node.parent_path_parts
# class import
datatype_rel_class_imports.setdefault(key, []).append(
f"._{node.name_undercase}.{node.name_datatype_class}"
)
# submodule import
if node.child_compound_datatypes:
datatype_rel_module_imports.setdefault(key, []).append(
f".{node.name_undercase}"
)
# ### Write plotly/graph_objs/graph_objs.py ###
# This if for backward compatibility. It just imports everything from
# graph_objs/__init__.py
write_graph_objs_graph_objs(outdir)
# ### Add Figure and FigureWidget ###
root_datatype_imports = datatype_rel_class_imports[()]
root_datatype_imports.append("._figure.Figure")
# ### Add deprecations ###
for dep_clas in DEPRECATED_DATATYPES:
root_datatype_imports.append(f"._deprecations.{dep_clas}")
optional_figure_widget_import = f"""
if sys.version_info < (3, 7):
try:
import ipywidgets as _ipywidgets
from distutils.version import LooseVersion as _LooseVersion
if _LooseVersion(_ipywidgets.__version__) >= _LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
else:
__all__.append("FigureWidget")
orig_getattr = __getattr__
def __getattr__(import_name):
if import_name == "FigureWidget":
try:
import ipywidgets
from distutils.version import LooseVersion
if LooseVersion(ipywidgets.__version__) >= LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
return FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
return FigureWidget
return orig_getattr(import_name)
"""
# ### __all__ ###
for path_parts, class_names in alls.items():
if path_parts and class_names:
filepath = opath.join(outdir, "graph_objs", *path_parts, "__init__.py")
with open(filepath, "at") as f:
f.write(f"\n__all__ = {class_names}")
# ### Output datatype __init__.py files ###
graph_objs_pkg = opath.join(outdir, "graph_objs")
for path_parts in datatype_rel_class_imports:
rel_classes = sorted(datatype_rel_class_imports[path_parts])
rel_modules = sorted(datatype_rel_module_imports.get(path_parts, []))
if path_parts == ():
init_extra = optional_figure_widget_import
else:
init_extra = ""
write_init_py(graph_objs_pkg, path_parts, rel_modules, rel_classes, init_extra)
# ### Output graph_objects.py alias
graph_objects_rel_classes = [
"..graph_objs." + rel_path.split(".")[-1]
for rel_path in datatype_rel_class_imports[()]
]
graph_objects_rel_modules = [
"..graph_objs." + rel_module.split(".")[-1]
for rel_module in datatype_rel_module_imports[()]
]
graph_objects_init_source = build_from_imports_py(
graph_objects_rel_modules,
graph_objects_rel_classes,
init_extra=optional_figure_widget_import,
)
graph_objects_path = opath.join(outdir, "graph_objects", "__init__.py")
os.makedirs(opath.join(outdir, "graph_objects"), exist_ok=True)
with open(graph_objects_path, "wt") as f:
f.write(graph_objects_init_source)
# ### Run black code formatter on output directories ###
subprocess.call(["black", "--target-version=py27", validators_pkgdir])
subprocess.call(["black", "--target-version=py27", graph_objs_pkgdir])
subprocess.call(["black", "--target-version=py27", graph_objects_path])
if __name__ == "__main__":
perform_codegen()
| plotly/plotly.py | packages/python/plotly/codegen/__init__.py | Python | mit | 11,939 | 0.00067 |
"""
Manage grains on the minion
===========================
This state allows for grains to be set.
Grains set or altered with this module are stored in the 'grains'
file on the minions, By default, this file is located at: ``/etc/salt/grains``
.. note::
This does **NOT** override any grains set in the minion config file.
"""
import re
from salt.defaults import DEFAULT_TARGET_DELIM
def exists(name, delimiter=DEFAULT_TARGET_DELIM):
"""
Ensure that a grain is set
name
The grain name
delimiter
A delimiter different from the default can be provided.
Check whether a grain exists. Does not attempt to check or set the value.
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": "Grain exists"}
_non_existent = object()
existing = __salt__["grains.get"](name, _non_existent)
if existing is _non_existent:
ret["result"] = False
ret["comment"] = "Grain does not exist"
return ret
def make_hashable(list_grain, result=None):
"""
Ensure that a list grain is hashable.
list_grain
The list grain that should be hashable
result
This function is recursive, so it must be possible to use a
sublist as parameter to the function. Should not be used by a caller
outside of the function.
Make it possible to compare two list grains to each other if the list
contains complex objects.
"""
result = result or set()
for sublist in list_grain:
if type(sublist) == list:
make_hashable(sublist, result)
else:
result.add(frozenset(sublist))
return result
def present(name, value, delimiter=DEFAULT_TARGET_DELIM, force=False):
"""
Ensure that a grain is set
.. versionchanged:: 2015.8.2
name
The grain name
value
The value to set on the grain
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: 2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: 2015.8.2
It is now capable to set a grain to a complex value (ie. lists and dicts)
and supports nested grains as well.
If the grain does not yet exist, a new grain is set to the given value. For
a nested grain, the necessary keys are created if they don't exist. If
a given key is an existing value, it will be converted, but an existing value
different from the given key will fail the state.
If the grain with the given name exists, its value is updated to the new
value unless its existing or provided value is complex (list or dict). Use
`force: True` to overwrite.
.. code-block:: yaml
cheese:
grains.present:
- value: edam
nested_grain_with_complex_value:
grains.present:
- name: icinga:Apache SSL
- value:
- command: check_https
- params: -H localhost -p 443 -S
with,a,custom,delimiter:
grains.present:
- value: yay
- delimiter: ','
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
_non_existent = object()
existing = __salt__["grains.get"](name, _non_existent)
if existing == value:
ret["comment"] = "Grain is already set"
return ret
if __opts__["test"]:
ret["result"] = None
if existing is _non_existent:
ret["comment"] = "Grain {} is set to be added".format(name)
ret["changes"] = {"new": name}
else:
ret["comment"] = "Grain {} is set to be changed".format(name)
ret["changes"] = {"changed": {name: value}}
return ret
ret = __salt__["grains.set"](name, value, force=force)
if ret["result"] is True and ret["changes"] != {}:
ret["comment"] = "Set grain {} to {}".format(name, value)
ret["name"] = name
return ret
def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 2014.1.0
Ensure the value is present in the list-type grain. Note: If the grain that is
provided in ``name`` is not present on the system, this new grain will be created
with the corresponding provided value.
name
The grain name.
value
The value is present in the list type grain.
delimiter
A delimiter different from the default ``:`` can be provided.
.. versionadded:: 2015.8.2
The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_
.. code-block:: yaml
roles:
grains.list_present:
- value: web
For multiple grains, the syntax looks like:
.. code-block:: yaml
roles:
grains.list_present:
- value:
- web
- dev
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
grain = __salt__["grains.get"](name)
if grain:
# check whether grain is a list
if not isinstance(grain, list):
ret["result"] = False
ret["comment"] = "Grain {} is not a valid list".format(name)
return ret
if isinstance(value, list):
if make_hashable(value).issubset(
make_hashable(__salt__["grains.get"](name))
):
ret["comment"] = "Value {1} is already in grain {0}".format(name, value)
return ret
elif name in __context__.get("pending_grains", {}):
# elements common to both
intersection = set(value).intersection(
__context__.get("pending_grains", {})[name]
)
if intersection:
value = list(
set(value).difference(__context__["pending_grains"][name])
)
ret[
"comment"
] = 'Removed value {} from update due to context found in "{}".\n'.format(
value, name
)
if "pending_grains" not in __context__:
__context__["pending_grains"] = {}
if name not in __context__["pending_grains"]:
__context__["pending_grains"][name] = set()
__context__["pending_grains"][name].update(value)
else:
if value in grain:
ret["comment"] = "Value {1} is already in grain {0}".format(name, value)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value {1} is set to be appended to grain {0}".format(
name, value
)
ret["changes"] = {"new": grain}
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Grain {} is set to be added".format(name)
ret["changes"] = {"new": grain}
return ret
new_grains = __salt__["grains.append"](name, value)
if isinstance(value, list):
if not set(value).issubset(set(__salt__["grains.get"](name))):
ret["result"] = False
ret["comment"] = "Failed append value {1} to grain {0}".format(name, value)
return ret
else:
if value not in __salt__["grains.get"](name, delimiter=DEFAULT_TARGET_DELIM):
ret["result"] = False
ret["comment"] = "Failed append value {1} to grain {0}".format(name, value)
return ret
ret["comment"] = "Append value {1} to grain {0}".format(name, value)
ret["changes"] = {"new": new_grains}
return ret
def list_absent(name, value, delimiter=DEFAULT_TARGET_DELIM):
"""
Delete a value from a grain formed as a list.
.. versionadded:: 2014.1.0
name
The grain name.
value
The value to delete from the grain list.
delimiter
A delimiter different from the default ``:`` can be provided.
.. versionadded:: 2015.8.2
The grain should be `list type <http://docs.python.org/2/tutorial/datastructures.html#data-structures>`_
.. code-block:: yaml
roles:
grains.list_absent:
- value: db
For multiple grains, the syntax looks like:
.. code-block:: yaml
roles:
grains.list_absent:
- value:
- web
- dev
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
comments = []
grain = __salt__["grains.get"](name, None)
if grain:
if isinstance(grain, list):
if not isinstance(value, list):
value = [value]
for val in value:
if val not in grain:
comments.append(
"Value {1} is absent from grain {0}".format(name, val)
)
elif __opts__["test"]:
ret["result"] = None
comments.append(
"Value {1} in grain {0} is set to be deleted".format(name, val)
)
if "deleted" not in ret["changes"].keys():
ret["changes"] = {"deleted": []}
ret["changes"]["deleted"].append(val)
elif val in grain:
__salt__["grains.remove"](name, val)
comments.append(
"Value {1} was deleted from grain {0}".format(name, val)
)
if "deleted" not in ret["changes"].keys():
ret["changes"] = {"deleted": []}
ret["changes"]["deleted"].append(val)
ret["comment"] = "\n".join(comments)
return ret
else:
ret["result"] = False
ret["comment"] = "Grain {} is not a valid list".format(name)
else:
ret["comment"] = "Grain {} does not exist".format(name)
return ret
def absent(name, destructive=False, delimiter=DEFAULT_TARGET_DELIM, force=False):
"""
.. versionadded:: 2014.7.0
Delete a grain from the grains config file
name
The grain name
destructive
If destructive is True, delete the entire grain. If
destructive is False, set the grain's value to None. Defaults to False.
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: 2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: 2015.8.2
.. versionchanged:: 2015.8.2
This state now support nested grains and complex values. It is also more
conservative: if a grain has a value that is a list or a dict, it will
not be removed unless the `force` parameter is True.
.. code-block:: yaml
grain_name:
grains.absent
"""
_non_existent = object()
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
grain = __salt__["grains.get"](name, _non_existent)
if grain is None:
if __opts__["test"]:
ret["result"] = None
if destructive is True:
ret["comment"] = "Grain {} is set to be deleted".format(name)
ret["changes"] = {"deleted": name}
return ret
ret = __salt__["grains.set"](name, None, destructive=destructive, force=force)
if ret["result"]:
if destructive is True:
ret["comment"] = "Grain {} was deleted".format(name)
ret["changes"] = {"deleted": name}
ret["name"] = name
elif grain is not _non_existent:
if __opts__["test"]:
ret["result"] = None
if destructive is True:
ret["comment"] = "Grain {} is set to be deleted".format(name)
ret["changes"] = {"deleted": name}
else:
ret[
"comment"
] = "Value for grain {} is set to be deleted (None)".format(name)
ret["changes"] = {"grain": name, "value": None}
return ret
ret = __salt__["grains.set"](name, None, destructive=destructive, force=force)
if ret["result"]:
if destructive is True:
ret["comment"] = "Grain {} was deleted".format(name)
ret["changes"] = {"deleted": name}
else:
ret["comment"] = "Value for grain {} was set to None".format(name)
ret["changes"] = {"grain": name, "value": None}
ret["name"] = name
else:
ret["comment"] = "Grain {} does not exist".format(name)
return ret
def append(name, value, convert=False, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 2014.7.0
Append a value to a list in the grains config file. The grain that is being
appended to (name) must exist before the new value can be added.
name
The grain name
value
The value to append
convert
If convert is True, convert non-list contents into a list.
If convert is False and the grain contains non-list contents, an error
is given. Defaults to False.
delimiter
A delimiter different from the default can be provided.
.. versionadded:: 2015.8.2
.. code-block:: yaml
grain_name:
grains.append:
- value: to_be_appended
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
grain = __salt__["grains.get"](name, None)
# Check if bool(grain) is False or if the grain is specified in the minions
# grains. Grains can be set to a None value by omitting a value in the
# definition.
if grain or name in __grains__:
if isinstance(grain, list):
if value in grain:
ret[
"comment"
] = "Value {1} is already in the list for grain {0}".format(name, value)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Value {1} in grain {0} is set to be added".format(
name, value
)
ret["changes"] = {"added": value}
return ret
__salt__["grains.append"](name, value)
ret["comment"] = "Value {1} was added to grain {0}".format(name, value)
ret["changes"] = {"added": value}
else:
if convert is True:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"Grain {} is set to be converted "
"to list and value {} will be "
"added".format(name, value)
)
ret["changes"] = {"added": value}
return ret
grain = [] if grain is None else [grain]
grain.append(value)
__salt__["grains.setval"](name, grain)
ret["comment"] = "Value {1} was added to grain {0}".format(name, value)
ret["changes"] = {"added": value}
else:
ret["result"] = False
ret["comment"] = "Grain {} is not a valid list".format(name)
else:
ret["result"] = False
ret["comment"] = "Grain {} does not exist".format(name)
return ret
| saltstack/salt | salt/states/grains.py | Python | apache-2.0 | 15,945 | 0.001568 |
# encoding: utf-8
import os
import subprocess
import mongoengine as db
def generic_backend():
"""Allow Python to handle the details of load average discovery.
This is the fastest method, but may not be portable everywhere.
Testing on a Linux 2.6.35 Rackspace Cloud server: 17µsec.
"""
yield os.getloadavg()
def linux_backend():
"""Process the contents of /proc/loadavg.
This is the second-slowest method and is only viable on Linux hosts.
Testing on a Linux 2.6.35 Rackspace Cloud server: 40µsec.
"""
with open('/proc/loadavg', 'r') as fh:
yield [float(i) for i in fh.read().split(' ', 3)[:3]]
def posix_backend():
"""Process the output of the uptime command.
This is by far the slowest method, only to be used under dire circumstances.
Testing on a Linux 2.6.35 Rackspace Cloud server: 6.9msec.
TODO: Pass the subprocess call back up to the reactor to wait for data.
"""
yield [float(i) for i in subprocess.check_output(['uptime']).rpartition(': ')[2].strip().split(' ', 3)[:3]]
_map = {'generic': generic_backend, 'linux': linux_backend, 'posix': posix_backend, None: generic_backend}
class LoadMixIn(object):
load = db.ListField(db.FloatField, verbose_name="Load Average", default=list)
class LoadExtension(object):
def __init__(self, config):
super(LoadExtension, self).__init__()
# TODO: Standard trifecta.
self.backend = _map[config.get('backend')]
@property
def mixin(self):
return LoadMixIn
def start(self):
pass
def stop(self):
pass
def __call__(self, rec):
for chunk in self.backend():
if type(chunk) != list:
yield chunk
rec.load = chunk
| marrow/monitor.collector | marrow/monitor/collector/ext/load.py | Python | mit | 1,855 | 0.012412 |
"""GIFImage by Matthew Roe"""
import Image
import pygame
from pygame.locals import *
import time
class GIFImage(object):
def __init__(self, filename):
self.filename = filename
self.image = Image.open(filename)
self.frames = []
self.get_frames()
self.cur = 0
self.ptime = time.time()
self.running = True
self.breakpoint = len(self.frames)-1
self.startpoint = 0
self.reversed = False
def get_rect(self):
return pygame.rect.Rect((0,0), self.image.size)
def get_frames(self):
image = self.image
pal = image.getpalette()
base_palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
base_palette.append(rgb)
all_tiles = []
try:
while 1:
if not image.tile:
image.seek(0)
if image.tile:
all_tiles.append(image.tile[0][3][0])
image.seek(image.tell()+1)
except EOFError:
image.seek(0)
all_tiles = tuple(set(all_tiles))
try:
while 1:
try:
duration = image.info["duration"]
except:
duration = 100
duration *= .001 #convert to milliseconds!
cons = False
x0, y0, x1, y1 = (0, 0) + image.size
if image.tile:
tile = image.tile
else:
image.seek(0)
tile = image.tile
if len(tile) > 0:
x0, y0, x1, y1 = tile[0][1]
if all_tiles:
if all_tiles in ((6,), (7,)):
cons = True
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
elif all_tiles in ((7, 8), (8, 7)):
pal = image.getpalette()
palette = []
for i in range(0, len(pal), 3):
rgb = pal[i:i+3]
palette.append(rgb)
else:
palette = base_palette
else:
palette = base_palette
pi = pygame.image.fromstring(image.tostring(), image.size, image.mode)
pi.set_palette(palette)
if "transparency" in image.info:
pi.set_colorkey(image.info["transparency"])
pi2 = pygame.Surface(image.size, SRCALPHA)
if cons:
for i in self.frames:
pi2.blit(i[0], (0,0))
pi2.blit(pi, (x0, y0), (x0, y0, x1-x0, y1-y0))
self.frames.append([pi2, duration])
image.seek(image.tell()+1)
except EOFError:
pass
def render(self, screen, pos):
if self.running:
if time.time() - self.ptime > self.frames[self.cur][1]:
if self.reversed:
self.cur -= 1
if self.cur < self.startpoint:
self.cur = self.breakpoint
else:
self.cur += 1
if self.cur > self.breakpoint:
self.cur = self.startpoint
self.ptime = time.time()
screen.blit(self.frames[self.cur][0], pos)
def seek(self, num):
self.cur = num
if self.cur < 0:
self.cur = 0
if self.cur >= len(self.frames):
self.cur = len(self.frames)-1
def set_bounds(self, start, end):
if start < 0:
start = 0
if start >= len(self.frames):
start = len(self.frames) - 1
if end < 0:
end = 0
if end >= len(self.frames):
end = len(self.frames) - 1
if end < start:
end = start
self.startpoint = start
self.breakpoint = end
def pause(self):
self.running = False
def play(self):
self.running = True
def rewind(self):
self.seek(0)
def fastforward(self):
self.seek(self.length()-1)
def get_height(self):
return self.image.size[1]
def get_width(self):
return self.image.size[0]
def get_size(self):
return self.image.size
def length(self):
return len(self.frames)
def reverse(self):
self.reversed = not self.reversed
def reset(self):
self.cur = 0
self.ptime = time.time()
self.reversed = False
def copy(self):
new = GIFImage(self.filename)
new.running = self.running
new.breakpoint = self.breakpoint
new.startpoint = self.startpoint
new.cur = self.cur
new.ptime = self.ptime
new.reversed = self.reversed
return new
##def main():
## pygame.init()
## screen = pygame.display.set_mode((640, 480))
##
## hulk = GIFImage("hulk.gif")
## football = GIFImage("football.gif")
## hulk2 = hulk.copy()
## hulk2.reverse()
## hulk3 = hulk.copy()
## hulk3.set_bounds(0, 2)
## spiderman = GIFImage("spiderman7.gif")
##
## while 1:
## for event in pygame.event.get():
## if event.type == QUIT:
## pygame.quit()
## return
##
## screen.fill((255,255,255))
## hulk.render(screen, (50, 0))
## hulk2.render(screen, (50, 150))
## hulk3.render(screen, (50, 300))
## football.render(screen, (200, 50))
## spiderman.render(screen, (200, 150))
## pygame.display.flip()
##
##if __name__ == "__main__":
## main() | drfreemayn/ml-testing | sex-dice/GIFImage.py | Python | gpl-2.0 | 5,891 | 0.006451 |
#!/usr/bin/env python3
# bank_account.py
#
# Simple Bank Account class example.
#
# AMJ
# 2017-04-01
from random import randint
class BankAccount:
def __init__ (self, account_holder, has_overdraft):
self.account_number = self.generate_account_number ()
self.account_holder = account_holder
self.has_overdraft = has_overdraft
self.__balance = 0.0
self.is_active = True
@property
def balance (self):
return self.__balance
def deposit (self, deposit_amount):
try:
if deposit_amount > 0:
self.__balance += deposit_amount
except TypeError:
pass
def withdraw (self, withdraw_amount):
try:
if withdraw_amount >= self.__balance or has_overdraft:
self.__balance -= withdraw_amount
except TypeError:
pass
def deactivate (self):
self.is_active = False
def activate (self):
self.is_active = True
def generate_account_number (self):
s = ''
for i in range (9):
s += str (randint (0, 9))
return s
def __str__ (self):
return "Account: {:} Holder: {:} Balance: {:}".format (self.account_number, self.account_holder, self.balance)
| TonyJenkins/cfs2160-python | 04classes/Bank/bank_account.py | Python | unlicense | 1,282 | 0.0117 |
import django
from django.db import models
from django.db.models.sql.query import LOOKUP_SEP
from django.db.models.deletion import Collector
# from django.db.models.related import RelatedObject
from django.db.models.fields.related import ForeignObjectRel as RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.encoding import force_unicode, smart_unicode, smart_str
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse
from django.conf import settings
from django.forms import Media
from django.utils.translation import get_language
import datetime
import decimal
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.templatetags.staticfiles import static
else:
from django.templatetags.static import static
try:
import json
except ImportError:
from django.utils import simplejson as json
try:
from django.utils.timezone import template_localtime as tz_localtime
except ImportError:
from django.utils.timezone import localtime as tz_localtime
try:
from django.contrib.auth import get_user_model
User = get_user_model()
username_field = User.USERNAME_FIELD
except Exception:
from django.contrib.auth.models import User
username_field = 'username'
def xstatic(*tags):
from vendors import vendors
node = vendors
fs = []
lang = get_language()
for tag in tags:
try:
for p in tag.split('.'):
node = node[p]
except Exception, e:
if tag.startswith('xadmin'):
file_type = tag.split('.')[-1]
if file_type in ('css', 'js'):
node = "xadmin/%s/%s" % (file_type, tag)
else:
raise e
else:
raise e
if type(node) in (str, unicode):
files = node
else:
mode = 'dev'
if not settings.DEBUG:
mode = getattr(settings, 'STATIC_USE_CDN',
False) and 'cdn' or 'production'
if mode == 'cdn' and mode not in node:
mode = 'production'
if mode == 'production' and mode not in node:
mode = 'dev'
files = node[mode]
files = type(files) in (list, tuple) and files or [files, ]
fs.extend([f % {'lang': lang.replace('_', '-')} for f in files])
return [f.startswith('http://') and f or static(f) for f in fs]
def vendor(*tags):
media = Media()
for tag in tags:
file_type = tag.split('.')[-1]
files = xstatic(tag)
if file_type == 'js':
media.add_js(files)
elif file_type == 'css':
media.add_css({'screen': files})
return media
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.fields.related.ForeignObjectRel) and
not field.field.unique)):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull') and type(value) == str:
if value.lower() in ('', 'false'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' characters. Similar to urllib.quote, except that the
quoting is slightly different so that it doesn't get automatically
unquoted by the Web browser.
"""
if not isinstance(s, basestring):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
if not isinstance(s, basestring):
return s
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = '%s.delete_%s' % (opts.app_label, opts.model_name)
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe(u'<span class="label label-info">%s:</span> <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return mark_safe(u'<span class="label label-info">%s:</span> %s' %
(escape(capfirst(opts.verbose_name)),
escape(obj)))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr and hasattr(obj, source_attr):
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError, e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_unicode(opts.verbose_name),
'verbose_name_plural': force_unicode(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def is_rel_field(name,model):
if hasattr(name,'split') and name.find("__")>0:
parts = name.split("__")
if parts[0] in model._meta.get_all_field_names():
return True
return False
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
if is_rel_field(name,obj):
parts = name.split("__")
rel_name,sub_rel_name = parts[0],"__".join(parts[1:])
rel_obj = getattr(obj,rel_name)
if rel_obj is not None:
return lookup_field(sub_rel_name,rel_obj,model_admin)
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable or the
name of an object attributes, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_unicode(model._meta.verbose_name)
attr = unicode
elif name == "__str__":
label = smart_str(model._meta.verbose_name)
attr = str
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
elif is_rel_field(name,model):
parts = name.split("__")
rel_name,name = parts[0],"__".join(parts[1:])
field = model._meta.get_field_by_name(rel_name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
rel_model = field.rel.to
rel_label = label_for_field(name, rel_model, model_admin=model_admin, return_attr=return_attr)
if return_attr:
rel_label,attr = rel_label
return ("%s %s"%(label,rel_label), attr)
else:
return "%s %s"%(label,rel_label)
else:
message = "Unable to lookup '%s' on %s" % (
name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
try:
help_text = model._meta.get_field_by_name(name)[0].help_text
except models.FieldDoesNotExist:
help_text = ""
return smart_unicode(help_text)
def admin_urlname(value, arg):
return 'xadmin:%s_%s_%s' % (value.app_label, value.model_name, arg)
def boolean_icon(field_val):
return mark_safe(u'<i class="%s" alt="%s"></i>' % (
{True: 'fa fa-check-circle text-success', False: 'fa fa-times-circle text-error', None: 'fa fa-question-circle muted'}[field_val], field_val))
def display_for_field(value, field):
from xadmin.views.list import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(tz_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
elif isinstance(field.rel, models.ManyToManyRel):
return ', '.join([smart_unicode(obj) for obj in value.all()])
else:
return smart_unicode(value)
def display_for_value(value, boolean=False):
from xadmin.views.list import EMPTY_CHANGELIST_VALUE
if boolean:
return boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(tz_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_unicode(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if isinstance(field, models.fields.related.ForeignObjectRel):
return field.model
elif getattr(field, 'rel'): # or isinstance?
return field.rel.to
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a `limit_choices_to` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'limit_choices_to', None))
if not limit_choices_to:
return models.Q() # empty Q
elif isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
def sortkeypicker(keynames):
negate = set()
for i, k in enumerate(keynames):
if k[:1] == '-':
keynames[i] = k[1:]
negate.add(k[1:])
def getit(adict):
composite = [adict[k] for k in keynames]
for i, (k, v) in enumerate(zip(keynames, composite)):
if k in negate:
composite[i] = -v
return composite
return getit
| pobear/django-xadmin | xadmin/util.py | Python | bsd-3-clause | 19,558 | 0.001534 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.test_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.test_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.test_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.test_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.test_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_one(self):
"""Same image should be returned for gamma equal to one"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=1)
y_tf = self.evaluate(y)
y_np = x_np
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_less_zero(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number."
try:
image_ops.adjust_gamma(x, gamma=-1)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
image = image_ops.adjust_gamma(x, gamma=y)
err_msg = "Gamma should be a non-negative real number."
try:
self.evaluate(image)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def test_adjust_gamma_zero(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=0)
y_tf = self.evaluate(y)
dtype = x.dtype.as_numpy_dtype
y_np = np.array([dtypes.dtype_range[dtype][1]] * x_np.size)
y_np = y_np.reshape((8, 8))
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_less_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
with self.cached_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=0.5)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 31, 45, 55, 63, 71, 78, 84], [
90, 95, 100, 105, 110, 115, 119, 123
], [127, 131, 135, 139, 142, 146, 149, 153], [
156, 159, 162, 165, 168, 171, 174, 177
], [180, 183, 186, 188, 191, 194, 196, 199], [
201, 204, 206, 209, 211, 214, 216, 218
], [221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_greater_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
with self.cached_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=2)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 0, 0, 0, 1, 1, 2, 3], [4, 5, 6, 7, 9, 10, 12, 14], [
16, 18, 20, 22, 25, 27, 30, 33
], [36, 39, 42, 45, 49, 52, 56, 60], [64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138], [
144, 150, 156, 163, 169, 176, 182, 189
], [196, 203, 211, 218, 225, 233, 241, 249]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturationFused(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturationFused(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.test_session(use_gpu=True):
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self._adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
self.assertTrue(y.op.name.startswith("flip_left_right"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
self.assertTrue(y.op.name.startswith("flip_up_down"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
# 100 trials, each containing batch_size elements
# Mean: 50 * batch_size
# Std Dev: ~5 * sqrt(batch_size)
# Six Sigma: 50 * batch_size - (5 * 6 * sqrt(batch_size))
# = 50 * batch_size - 30 * sqrt(batch_size) = 800 - 30 * 4 = 680
six_sigma = 50 * batch_size - 30 * np.sqrt(batch_size)
self.assertGreaterEqual(count_flipped, six_sigma)
self.assertGreaterEqual(count_unflipped, six_sigma)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
self.assertTrue(y.op.name.startswith("transpose_image"))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose_image, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose_image, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegexp(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.test_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.test_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegexp(
ValueError, 'Shape must be rank 0 but is rank 1'):
image_ops.adjust_contrast(x_np, [2.0])
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
x2 = np.square(x).astype(np.float32)
mn = np.mean(x)
vr = np.mean(x2) - (mn * mn)
stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
self.assertTrue(y.op.name.startswith("per_image_standardization"))
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.test_session(use_gpu=True):
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.test_session(use_gpu=True):
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"assertion failed:",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"), ([
0, -1, 3, 3
], "offset_width must be >= 0"), ([0, 0, 0, 3],
"target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], "height must be >= target + offset"),
([0, 2, 3, 3], "width must be >= target + offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = array_ops.placeholder(shape=x_shape, dtype=dtypes.int32)
y = image_ops.central_crop(x, 0.33)
y_tf = y.eval(feed_dict={x: x_np})
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.test_session(use_gpu=True):
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
(0, -1, 4, 4, "offset_width must be >= 0"),
(2, 0, 4, 4, "height must be <= target - offset"),
(0, 2, 4, 4, "width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.test_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_placeholder = array_ops.placeholder(dtypes.float32)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_placeholder,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval(feed_dict={
min_object_covered_placeholder: min_object_covered
})
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
with self.test_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.test_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
class ResizeImagesTest(test_util.TensorFlowTestCase):
OPTIONS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, opt, nptype):
if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.OPTIONS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.OPTIONS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
def testReturnDtype(self):
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for opt in self.OPTIONS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, opt)
if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
opt)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA
]:
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethod.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethod.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.AREA
]:
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.test_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
img_shape = [1, 3, 2, 1]
with self.test_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize_images"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
target_max = [max_h, max_w]
x_tensor = x
feed_dict = {}
y = image_ops.resize_images(x_tensor, target_max,
preserve_aspect_ratio=preserve_aspect_ratio)
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e: # pylint: disable=broad-except
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def testNameScope(self):
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.test_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = sess.run([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
# Combined decode+crop should have the same shape inference
self.assertAllEqual(image1_crop.get_shape().as_list(),
image2.get_shape().as_list())
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = sess.run([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
lambda e: "Invalid JPEG data or crop window" in str(e)):
sess.run(result)
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.test_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = sess.run([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
with self.test_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
self.assertEqual(image_shape.tolist(), [256, 128, 3])
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg = io_ops.read_file(path)
[image_shape] = sess.run([image_ops.extract_jpeg_shape(jpeg)])
# Cmyk jpeg image has 4 channels.
self.assertEqual(image_shape.tolist(), [256, 128, 4])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.test_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = sess.run([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
with self.test_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = sess.run([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
with self.test_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.test_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.test_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, "Identity")
self.assertEquals(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.test_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.test_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.test_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
def testTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = io_ops.read_file(os.path.join(prefix, path)).eval()
images = {}
for name, decode in decoders.items():
image = decode(contents).eval()
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
def testSelectFromThreeClusters(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
def testInvalidShape(self):
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 2 but is rank 1"):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"):
boxes = constant_op.constant([[0.0, 0.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will thrown.
with self.assertRaisesRegexp(ValueError,
"Dimensions must be equal, but are 1 and 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9, 0.75])
selected_indices = image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The scores should be 1D of shape [num_boxes].
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 1 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
image_ops.non_max_suppression(boxes, scores, 3, 0.5)
# The max_output_size should be a scaler (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 1"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, [3], 0.5)
# The iou_threshold should be a scaler (0-D).
with self.assertRaisesRegexp(ValueError,
"Shape must be rank 0 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
# image_ops.non_max_suppression = gen_image_ops.non_max_suppression_v3.
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold).eval()
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
score_threshold = float('-inf')
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):
def testSelectFromThreeClusters(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded.eval(), [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
def testSelectFromContinuousOverLap(self):
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_ouput_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_ouput_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
def testWrongDims(self):
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.test_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
def testShapeMismatch(self):
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.test_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = sess.run([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.test_session(use_gpu=True) as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.test_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.test_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_image1, tf_image2, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.test_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = image_ops.psnr(tf_q20, tf_q72, 1, "psnr1").eval()
tf_psnr2 = image_ops.psnr(tf_q20, tf_q95, 1, "psnr2").eval()
tf_psnr3 = image_ops.psnr(tf_q72, tf_q95, 1, "psnr3").eval()
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.test_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = image_ops.psnr(tf_q20, tf_q20, 1, "psnr").eval()
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(
psnr_uint8.eval(), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = sess.run([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.test_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
ssim = image_ops.ssim(*ph, max_val=1.0)
with self.test_session(use_gpu=True):
scores = [ssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(constant_op.constant(img1),
constant_op.constant(img2), 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(constant_op.constant(img1),
constant_op.constant(img2), 255)
with self.test_session(use_gpu=True):
self.assertLess(ssim.eval(), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = sess.run([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.test_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
msssim = image_ops.ssim_multiscale(*ph, max_val=1.0)
with self.test_session(use_gpu=True):
scores = [msssim.eval(dict(zip(ph, t)))
for t in itertools.combinations_with_replacement(img, 2)]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
ph = [array_ops.placeholder(dtype=dtypes.float32) for _ in range(2)]
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
scaled_ph = [x * scalar for x in ph]
msssim = image_ops.ssim_multiscale(*scaled_ph, max_val=1.0,
power_factors=(1, 1, 1, 1, 1))
grads = gradients.gradients(msssim, scalar)
with self.test_session(use_gpu=True) as sess:
np_grads = sess.run(grads, feed_dict={ph[0]: img[0], ph[1]: img[1]})
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(constant_op.constant(img1),
constant_op.constant(img2), 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.test_session(use_gpu=True) as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [image_ops.ssim_multiscale(x, y, 1.0)
for x, y in itertools.combinations(images, 2)]
msssim = sess.run(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(img1, img2, 1.0)
with self.test_session(use_gpu=True):
self.assertAllClose(
ssim_uint8.eval(), self.evaluate(ssim_float32), atol=0.001)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.test_session(use_gpu=True):
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.test_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.test_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
class DecodeImageTest(test_util.TensorFlowTestCase):
def testJpegUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testGifUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
with self.test_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = sess.run([image0, image1])
self.assertAllEqual(image0, image1)
if __name__ == "__main__":
googletest.main()
| brchiu/tensorflow | tensorflow/python/ops/image_ops_test.py | Python | apache-2.0 | 165,524 | 0.007812 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import itertools
from dashboard.pinpoint.models import change as change_module
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.pinpoint.models.tasks import performance_bisection
from dashboard.pinpoint.models.tasks import read_value
from dashboard.pinpoint.models.tasks import run_test
class Serializer(evaluators.DispatchByTaskType):
"""Serializes a task graph associated with a job.
This Serializer follows the same API contract of an Evaluator, which applies
specific transformations based on the type of a task in the graph.
The end state of the context argument is a mapping with the following schema:
{
'comparison_mode': <string>
'metric': <string>
'quests': [<string>]
'state': [
{
'attempts': [
{
'executions': [
{
'completed': <boolean>
'exception': <string>
'details': [
{
'key': <string>
'value': <string>
'url': <string>
}
]
}
]
}
]
'change': { ... }
'comparisons': {
'next': <string|None>
'prev': <string|None>
}
'result_values': [
<float>
]
}
]
}
NOTE: The 'quests' and 'executions' in the schema are legacy names, which
refers to the previous quest abstractions from which the tasks and evaluators
are derived from. We keep the name in the schema to ensure that we are
backwards-compatible with what the consumers of the data expect (i.e. the Web
UI).
"""
def __init__(self):
super(Serializer, self).__init__({
'find_isolate':
evaluators.SequenceEvaluator(
[find_isolate.Serializer(), TaskTransformer]),
'run_test':
evaluators.SequenceEvaluator(
[run_test.Serializer(), TaskTransformer]),
'read_value':
evaluators.SequenceEvaluator(
[read_value.Serializer(), TaskTransformer]),
'find_culprit':
evaluators.SequenceEvaluator(
[performance_bisection.Serializer(), AnalysisTransformer]),
})
def __call__(self, task, event, context):
# First we delegate to the task-specific serializers, and have the
# domain-aware transformers canonicalise the data in the context. We
# then do a dictionary merge following a simple protocol for editing a
# single context. This way the transformers can output a canonical set
# of transformations to build up the (global) context.
local_context = {}
super(Serializer, self).__call__(task, event, local_context)
# What we expect to see in the local context is data in the following
# form:
#
# {
# # The 'state' key is required to identify to which change and which
# # state we should be performing the actions.
# 'state': {
# 'change': {...}
# 'quest': <string>
#
# # In the quest-based system, we end up with different "execution"
# # details, which come in "quest" order. In the task-based
# # evaluation model, the we use the 'index' in the 'add_details'
# # sub-object to identify the index in the details.
# 'add_execution': {
# 'add_details': {
# 'index': <int>
# ...
# }
# ...
# }
#
# # This allows us to accumulate the resulting values we encounter
# # associated with the change.
# 'append_result_values': [<float>]
#
# # This allows us to set the comparison result for this change in
# # context of other changes.
# 'set_comparison': {
# 'next': <string|None>,
# 'prev': <string|None>,
# }
# }
#
# # If we see the 'order_changes' key in the local context, then
# # that means we can sort the states according to the changes as they
# # appear in the embedded 'changes' list.
# 'order_changes': {
# 'changes': [..]
# }
#
# # If we see the 'set_parameters' key in the local context, then
# # we can set the overall parameters we're looking to compare and
# # convey in the results.
# 'set_parameters': {
# 'comparison_mode': <string>
# 'metric': <string>
# }
# }
#
# At this point we process the context to update the global context
# following the protocol defined above.
if 'state' in local_context:
modification = local_context['state']
states = context.setdefault('state', [])
quests = context.setdefault('quests', [])
# We need to find the existing state which matches the quest and the
# change. If we don't find one, we create the first state entry for that.
state_index = None
change = modification.get('change')
for index, state in enumerate(states):
if state.get('change') == change:
state_index = index
break
if state_index is None:
states.append({'attempts': [{'executions': []}], 'change': change})
state_index = len(states) - 1
quest = modification.get('quest')
try:
quest_index = quests.index(quest)
except ValueError:
quests.append(quest)
quest_index = len(quests) - 1
add_execution = modification.get('add_execution')
append_result_values = modification.get('append_result_values')
attempt_index = modification.get('index', 0)
state = states[state_index]
if add_execution:
attempts = state['attempts']
while len(attempts) < attempt_index + 1:
attempts.append({'executions': []})
executions = state['attempts'][attempt_index]['executions']
while len(executions) < quest_index + 1:
executions.append(None)
executions[quest_index] = dict(add_execution)
if append_result_values:
state.setdefault('result_values', []).extend(append_result_values)
if 'order_changes' in local_context:
# Here, we'll sort the states according to their order of appearance in
# the 'order_changes' list.
states = context.get('state', [])
if states:
state_changes = {
change_module.ReconstituteChange(state.get('change'))
for state in states
}
order_changes = local_context.get('order_changes', {})
all_changes = order_changes.get('changes', [])
comparisons = order_changes.get('comparisons', [])
result_values = order_changes.get('result_values', [])
change_index = {
change: index for index, change in enumerate(
known_change for known_change in all_changes
if known_change in state_changes)
}
ordered_states = [None] * len(states)
for state in states:
index = change_index.get(
change_module.ReconstituteChange(state.get('change')))
if index is not None:
ordered_states[index] = state
# Merge in the comparisons as they appear for the ordered_states.
for state, comparison, result in itertools.izip_longest(
ordered_states, comparisons or [], result_values or []):
if state is None:
continue
if comparison is not None:
state['comparisons'] = comparison
state['result_values'] = result or []
context['state'] = ordered_states
context['difference_count'] = len(order_changes.get('culprits', []))
# At this point set the default comparisons between two adjacent states
# which don't have an associated comparison yet to 'pending'.
states = context.get('state', [])
for index, state in enumerate(states):
comparisons = state.get('comparisons')
if comparisons is None:
state['comparisons'] = {
'prev': None if index == 0 else 'pending',
'next': None if index + 1 == len(states) else 'pending',
}
if 'set_parameters' in local_context:
modification = local_context.get('set_parameters')
context['comparison_mode'] = modification.get('comparison_mode')
context['metric'] = modification.get('metric')
TASK_TYPE_QUEST_MAPPING = {
'find_isolate': 'Build',
'run_test': 'Test',
'read_value': 'Get results',
}
def TaskTransformer(task, _, context):
"""Takes the form:
{
<task id> : {
...
}
}
And turns it into:
{
'state': {
'change': {...}
'quest': <string>
'index': <int>
'add_execution': {
...
}
}
}
"""
if not context:
return None
input_data = context.get(task.id)
if not input_data:
return None
result = {
'state': {
'change': task.payload.get('change'),
'quest': TASK_TYPE_QUEST_MAPPING.get(task.task_type),
'index': task.payload.get('index', 0),
'add_execution': input_data,
}
}
context.clear()
context.update(result)
def AnalysisTransformer(task, _, context):
"""Takes the form:
{
<task id> : {
...
}
}
And turns it into:
{
'set_parameters': {
'comparison_mode': ...
'metric': ...
}
'order_changes': [
<change>, ...
]
}
"""
if not context:
return None
task_data = context.get(task.id)
if not task_data:
return None
result = {
'set_parameters': {
'comparison_mode': task_data.get('comparison_mode'),
'metric': task_data.get('metric'),
},
'order_changes': {
'changes': task_data.get('changes', []),
'comparisons': task_data.get('comparisons', []),
'culprits': task_data.get('culprits', []),
'result_values': task_data.get('result_values', []),
}
}
context.clear()
context.update(result)
| endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/pinpoint/models/evaluators/job_serializer.py | Python | bsd-3-clause | 10,578 | 0.005105 |
import re
from tkinter import *
import tkinter.messagebox as tkMessageBox
from idlelib.editor import EditorWindow
from idlelib import iomenu
class OutputWindow(EditorWindow):
"""An editor window that can serve as an output file.
Also the future base class for the Python shell window.
This class has no input facilities.
"""
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line)
# Customize EditorWindow
def ispythonsource(self, filename):
# No colorization needed
return 0
def short_title(self):
return "Output"
def maybesave(self):
# Override base class method -- don't ask any questions
if self.get_saved():
return "yes"
else:
return "no"
# Act as output file
def write(self, s, tags=(), mark="insert"):
if isinstance(s, (bytes, bytes)):
s = s.decode(iomenu.encoding, "replace")
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
return len(s)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
# Our own right-button menu
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Go to file/line", "<<goto-file-line>>", None),
]
file_line_pats = [
# order of patterns matters
r'file "([^"]*)", line (\d+)',
r'([^\s]+)\((\d+)\)',
r'^(\s*\S.*?):\s*(\d+):', # Win filename, maybe starting with spaces
r'([^\s]+):\s*(\d+):', # filename or path, ltrim
r'^\s*(\S.*?):\s*(\d+):', # Win abs path with embedded spaces, ltrim
]
file_line_progs = None
def goto_file_line(self, event=None):
if self.file_line_progs is None:
l = []
for pat in self.file_line_pats:
l.append(re.compile(pat, re.IGNORECASE))
self.file_line_progs = l
# x, y = self.event.x, self.event.y
# self.text.mark_set("insert", "@%d,%d" % (x, y))
line = self.text.get("insert linestart", "insert lineend")
result = self._file_line_helper(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = self._file_line_helper(line)
if not result:
tkMessageBox.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
parent=self.text)
return
filename, lineno = result
edit = self.flist.open(filename)
edit.gotoline(lineno)
def _file_line_helper(self, line):
for prog in self.file_line_progs:
match = prog.search(line)
if match:
filename, lineno = match.group(1, 2)
try:
f = open(filename, "r")
f.close()
break
except OSError:
continue
else:
return None
try:
return filename, int(lineno)
except TypeError:
return None
# These classes are currently not used but might come in handy
class OnDemandOutputWindow:
tagdefs = {
# XXX Should use IdlePrefs.ColorPrefs
"stdout": {"foreground": "blue"},
"stderr": {"foreground": "#007700"},
}
def __init__(self, flist):
self.flist = flist
self.owin = None
def write(self, s, tags, mark):
if not self.owin:
self.setup()
self.owin.write(s, tags, mark)
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write
| yotchang4s/cafebabepy | src/main/python/idlelib/outwin.py | Python | bsd-3-clause | 4,385 | 0.000456 |
import lxml
from utils import State
from .people import NCPersonScraper
from .bills import NCBillScraper
# from .committees import NCCommitteeScraper
class NorthCarolina(State):
scrapers = {
"people": NCPersonScraper,
# 'committees': NCCommitteeScraper,
"bills": NCBillScraper,
}
legislative_sessions = [
{
"_scraped_name": "1985-1986 Session",
"classification": "primary",
"identifier": "1985",
"name": "1985-1986 Session",
"start_date": "1985-02-05",
"end_date": "1986-07-18",
},
{
"_scraped_name": "1986 Special Session",
"classification": "special",
"identifier": "1985E1",
"name": "1986 Special Session",
"start_date": "1986-02-18",
"end_date": "1986-06-05",
},
{
"_scraped_name": "1987-1988 Session",
"classification": "primary",
"identifier": "1987",
"name": "1987-1988 Session",
"start_date": "1987-02-09",
"end_date": "1988-07-12",
},
{
"_scraped_name": "1989-1990 Session",
"classification": "primary",
"identifier": "1989",
"name": "1989-1990 Session",
"start_date": "1989-01-11",
"end_date": "1990-07-28",
},
{
"_scraped_name": "1989 Special Session",
"classification": "special",
"identifier": "1989E1",
"name": "1989 Extra Session",
"start_date": "1989-12-07",
"end_date": "1989-12-07",
},
{
"_scraped_name": "1990 Special Session",
"classification": "special",
"identifier": "1989E2",
"name": "1990 Extra Session",
"start_date": "1990-03-06",
"end_date": "1990-03-06",
},
{
"_scraped_name": "1991-1992 Session",
"classification": "primary",
"identifier": "1991",
"name": "1991-1992 Session",
"start_date": "1991-01-30",
"end_date": "1992-07-25",
},
{
"_scraped_name": "1991 Special Session",
"classification": "special",
"identifier": "1991E1",
"name": "1991 Special Session",
"start_date": "1991-12-30",
"end_date": "1992-02-03",
},
{
"_scraped_name": "1993-1994 Session",
"classification": "primary",
"identifier": "1993",
"name": "1993-1994 Session",
"start_date": "1993-01-27",
"end_date": "1994-07-17",
},
{
"_scraped_name": "1994 Special Session",
"classification": "special",
"identifier": "1993E1",
"name": "1994 Special Session",
"start_date": "1994-02-08",
"end_date": "1994-03-26",
},
{
"_scraped_name": "1995-1996 Session",
"classification": "primary",
"identifier": "1995",
"name": "1995-1996 Session",
"start_date": "1995-01-25",
"end_date": "1996-06-21",
},
{
"_scraped_name": "1996 1st Special Session",
"classification": "special",
"identifier": "1995E1",
"name": "1996 Special Session 1",
"start_date": "1996-02-21",
"end_date": "1996-02-21",
},
{
"_scraped_name": "1996 2nd Special Session",
"classification": "special",
"identifier": "1995E2",
"name": "1996 Special Session 2",
"start_date": "1996-07-08",
"end_date": "1996-08-03",
},
{
"_scraped_name": "1997-1998 Session",
"classification": "primary",
"identifier": "1997",
"name": "1997-1998 Session",
"start_date": "1997-01-29",
"end_date": "1998-10-29",
},
{
"_scraped_name": "1998 Special Session",
"classification": "special",
"identifier": "1997E1",
"name": "1998 Special Session",
"start_date": "1998-03-24",
"end_date": "1998-05-11",
},
{
"_scraped_name": "1999-2000 Session",
"classification": "primary",
"identifier": "1999",
"name": "1999-2000 Session",
"start_date": "1999-01-27",
"end_date": "2000-07-13",
},
{
"_scraped_name": "1999 Special Session",
"classification": "special",
"identifier": "1999E1",
"name": "1999 Special Session",
"start_date": "1999-12-15",
"end_date": "1999-12-16",
},
{
"_scraped_name": "2000 Special Session",
"classification": "special",
"identifier": "1999E2",
"name": "2000 Special Session",
"start_date": "2000-04-05",
"end_date": "2000-04-05",
},
{
"_scraped_name": "2001-2002 Session",
"classification": "primary",
"identifier": "2001",
"name": "2001-2002 Session",
"start_date": "2001-01-24",
"end_date": "2002-11-13",
},
{
"_scraped_name": "2002 Extra Session",
"classification": "special",
"identifier": "2001E1",
"name": "2002 Extra Session on Redistricting",
"start_date": "2002-05-14",
"end_date": "2002-11-26",
},
{
"_scraped_name": "2003-2004 Session",
"classification": "primary",
"identifier": "2003",
"name": "2003-2004 Session",
"start_date": "2003-01-29",
"end_date": "2004-07-18",
},
{
"_scraped_name": "2003 Extra Session",
"classification": "special",
"identifier": "2003E1",
"name": "2003 Extra Session on Redistricting",
"start_date": "2003-11-24",
"end_date": "2003-11-25",
},
{
"_scraped_name": "2003 Extra Session on Economic Development Issues",
"classification": "special",
"identifier": "2003E2",
"name": "2003 Extra Session on Economic Development Issues",
"start_date": "2003-12-09",
"end_date": "2003-12-10",
},
{
"_scraped_name": "2004 Extra Session",
"classification": "special",
"identifier": "2003E3",
"name": "2004 Extra Session on Economic Development Issues",
"start_date": "2004-11-04",
"end_date": "2004-11-04",
},
{
"_scraped_name": "2005-2006 Session",
"classification": "primary",
"identifier": "2005",
"name": "2005-2006 Session",
"start_date": "2005-01-26",
"end_date": "2006-07-28",
},
{
"_scraped_name": "2007-2008 Session",
"classification": "primary",
"identifier": "2007",
"name": "2007-2008 Session",
"start_date": "2007-01-24",
"end_date": "2008-07-18",
},
{
"_scraped_name": "2007 Extra Session",
"classification": "special",
"identifier": "2007E1",
"name": "2007 Extra Session",
"start_date": "2007-09-10",
"end_date": "2007-09-11",
},
{
"_scraped_name": "2008 Extra Session",
"classification": "special",
"identifier": "2007E2",
"name": "2008 Extra Session",
"start_date": "2008-03-20",
"end_date": "2008-03-20",
},
{
"_scraped_name": "2009-2010 Session",
"classification": "primary",
"identifier": "2009",
"name": "2009-2010 Session",
"start_date": "2009-01-28",
"end_date": "2010-07-10",
},
{
"_scraped_name": "2011-2012 Session",
"classification": "primary",
"identifier": "2011",
"name": "2011-2012 Session",
"start_date": "2011-01-26",
"end_date": "2012-07-03",
},
{
"_scraped_name": "2013-2014 Session",
"classification": "primary",
"identifier": "2013",
"name": "2013-2014 Session",
"start_date": "2013-01-30",
"end_date": "2014-08-20",
},
{
"_scraped_name": "2015-2016 Session",
"classification": "primary",
"identifier": "2015",
"name": "2015-2016 Session",
"start_date": "2015-01-30",
"end_date": "2016-07-01",
},
{
"_scraped_name": "2016 First Extra Session",
"classification": "special",
"identifier": "2015E1",
"name": "2016 Extra Session 1",
"start_date": "2016-02-18",
"end_date": "2016-02-23",
},
{
"_scraped_name": "2016 Second Extra Session",
"classification": "special",
"identifier": "2015E2",
"name": "2016 Extra Session 2",
"start_date": "2016-03-23",
"end_date": "2016-03-23",
},
{
"_scraped_name": "2016 Third Extra Session",
"classification": "special",
"identifier": "2015E3",
"name": "2016 Extra Session 3",
"start_date": "2016-12-13",
"end_date": "2016-12-15",
},
{
"_scraped_name": "2016 Fourth Extra Session",
"classification": "special",
"identifier": "2015E4",
"name": "2016 Extra Session 4",
"start_date": "2016-12-14",
"end_date": "2016-12-19",
},
{
"_scraped_name": "2016 Fifth Extra Session",
"classification": "special",
"identifier": "2015E5",
"name": "2016 Extra Session 5",
"start_date": "2016-12-21",
"end_date": "2016-12-21",
},
{
"_scraped_name": "2017-2018 Session",
"classification": "primary",
"identifier": "2017",
"name": "2017-2018 Session",
"start_date": "2017-01-11",
"end_date": "2018-08-01",
},
{
"_scraped_name": "2018 First Extra Session",
"classification": "special",
"identifier": "2017E1",
"name": "2018 Extra Session 1",
"start_date": "2018-07-24",
"end_date": "2018-08-04",
},
{
"_scraped_name": "2018 Second Extra Session",
"classification": "special",
"identifier": "2017E2",
"name": "2018 Extra Session 2",
"start_date": "2018-08-24",
"end_date": "2018-08-27",
},
{
"_scraped_name": "2018 Third Extra Session",
"classification": "special",
"identifier": "2017E3",
"name": "2018 Extra Session 3",
"start_date": "2018-10-02",
"end_date": "2018-10-16",
},
{
"_scraped_name": "2019-2020 Session",
"classification": "primary",
"identifier": "2019",
"name": "2019-2020 Session",
"start_date": "2019-01-03",
"end_date": "2020-08-01",
},
]
ignored_scraped_sessions = []
def get_session_list(self):
from utils.lxmlize import url_xpath
# This is the URL that populates the session `<select>` in the
# state homepage header navigation
return url_xpath(
"https://webservices.ncleg.net/sessionselectlist/false", "//option/text()"
)
def extract_text(self, doc, data):
doc = lxml.html.fromstring(data)
text = " ".join(
[x.text_content() for x in doc.xpath('//p[starts-with(@class, "a")]')]
)
return text
| sunlightlabs/openstates | scrapers/nc/__init__.py | Python | gpl-3.0 | 12,386 | 0.000242 |
import re
import urllib
import time
import sys
import types
import datetime
import commands
import xml.dom.minidom
from config import panda_config
from pandalogger.LogWrapper import LogWrapper
from pandalogger.PandaLogger import PandaLogger
_log = PandaLogger().getLogger('broker_util')
# curl class
class _Curl:
# constructor
def __init__(self,useProxy=False):
# path to curl
self.path = 'curl --user-agent "dqcurl" -m 180'
# verification of the host certificate
self.verifyHost = False
# use proxy
if useProxy and panda_config.httpProxy != '':
self.path = 'env http_proxy=%s %s' % (panda_config.httpProxy,self.path)
# GET method
def get(self,url,data={}):
# make command
com = '%s --silent --get' % self.path
if not self.verifyHost:
com += ' --insecure'
# data
for key,value in data.iteritems():
com += ' --data "%s"' % urllib.urlencode({key:value})
com += ' %s' % url
# execute
_log.debug(com)
ret = commands.getstatusoutput(com)
_log.debug(ret)
return ret
# get default storage
def _getDefaultStorage(baseURL,sePath=None,seProdPath={}):
_log.debug('_getDefaultStorage (%s %s %s)' % (baseURL,sePath,seProdPath))
# use se+seprodpath when baseURL=''
if baseURL=='':
# get token
match = re.search('^token:([^:]+):',sePath)
if match == None:
_log.error("could not get token from %s" % sePath)
return ""
token = match.group(1)
# get corresponding path
if not seProdPath.has_key(token):
_log.error("could not find path for % in %s" % (token,seProdPath))
return ""
# set se+seprodpath
out = sePath+seProdPath[token]
# append /
if not out.endswith('/'):
out += '/'
_log.debug(out)
else:
# check port to set proxy
useProxy = False
if panda_config.httpProxy != '':
pMatch = re.search('http://[^:/]+:*(\d+)/',baseURL)
if pMatch == None:
# default port
useProxy = True
elif pMatch.group(1) == '80':
# standard port
useProxy = True
# instantiate curl
curl = _Curl(useProxy)
# get default storage
url = baseURL + 'storages/default'
status,out = curl.get(url)
_log.debug(out)
if status != 0:
_log.error("could not get default storage from %s:%s" % (baseURL,status))
return ""
# parse
match = re.search('^[^/]+://[^/]+(/.+)$',out)
if match == None:
_log.error("could not parse string : %s" % out)
return ""
return match.group(1)
# get PoolFileCatalog
def _getPoolFileCatalog(lfns,dq2url):
_log.debug('_getPoolFileCatalog')
# check port to set proxy
useProxy = False
if panda_config.httpProxy != '':
pMatch = re.search('http://[^:/]+:*(\d+)/',dq2url)
if pMatch == None:
# default port
useProxy = True
elif pMatch.group(1) == '80':
# standard port
useProxy = True
# instantiate curl
curl = _Curl(useProxy)
# get PoolFileCatalog
iLFN = 0
outXML =''
strLFNs = ''
if not dq2url.endswith('_'):
url = dq2url + '/lrc/PoolFileCatalog'
else:
# NDGF LRC
url = dq2url + 'lrc/PoolFileCatalog'
for lfn in lfns:
iLFN += 1
# make argument
strLFNs += '%s ' % lfn
if iLFN % 40 == 0 or iLFN == len(lfns):
# get PoolFileCatalog
strLFNs = strLFNs.rstrip()
data = {'lfns':strLFNs}
# avoid too long argument
strLFNs = ''
# execute
status,out = curl.get(url,data)
_log.debug(status)
# sleep
time.sleep(2)
if status != 0:
_log.error("_getPoolFileCatalog : %s %s %s" % (dq2url,status,out))
return status
if status != 0 or out.startswith('Error'):
continue
if not out.startswith('<?xml'):
continue
# append
outXML += out
# remove redundant trailer and header
th = \
"""
</POOLFILECATALOG><\?xml version="1.0" encoding="UTF-8" standalone="no" \?>
<!-- Edited By POOL -->
<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">
<POOLFILECATALOG>
"""
outXML = re.sub(th,'',outXML)
outXML = re.sub("""\s*<META name="fsize" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="md5sum" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="lastmodified" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="archival" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="permanent" type="string"/>""",'',outXML)
outXML = re.sub("""\s*<META name="adler32" type="string"/>""",'',outXML)
# return XML
return outXML
# get files from MySQL
def _getPFNFromMySQL(lfns,dq2url):
_log.debug('_getPFNFromMySQL')
import MySQLdb
comment = ' /* broker_util._getPFNFromMySQL */'
outStr = ''
# parse connection string
match = re.search('^mysql://([^:]+):([^@]+)@([^/:]+):(\d+)/(.+)$',dq2url)
if match == None:
return outStr
# parameters for DB connection
connStr = "mysql -h %s -u %s -p%s -P %s %s"
dbhost = match.group(3)
dbuser = match.group(1)
dbpswd = match.group(2)
dbport = int(match.group(4))
dbname = match.group(5)
connStr = "mysql -h %s -u %s -p%s -P %s %s" % (dbhost,dbuser,dbpswd,dbport,dbname)
try:
_log.debug(connStr)
# connect
dbConn = MySQLdb.connect(db=dbname,host=dbhost,port=dbport,user=dbuser,passwd=dbpswd)
# make cursor
dbCur = dbConn.cursor()
# query files
iLFN = 0
strLFNs = ''
for lfn in lfns:
iLFN += 1
# make argument
strLFNs += " lfname='%s' OR " % lfn
if iLFN % 40 == 0 or iLFN == len(lfns):
# get PoolFileCatalog
strLFNs = strLFNs[:-3]
# construct SQL
sql = 'SELECT lfname FROM t_lfn WHERE %s' % strLFNs
# reset
strLFNs = ''
# execute
_log.debug(sql)
dbCur.execute(sql+comment)
res = dbCur.fetchall()
_log.debug(res)
# append LFNs
if res != None and len(res) != 0:
for resLFN in res:
outStr += '%s ' % resLFN
# close cursor
dbCur.close()
# close connection
dbConn.close()
except:
type, value, traceBack = sys.exc_info()
_log.error("_getPFNFromMySQL : %s %s %s" % (dq2url,type,value))
return -1
# return
return outStr
# get files from LFC
def _getPFNFromLFC(lfns,dq2url,guids,storageName,scopeList=[],tmpLog=None):
if tmpLog == None:
tmpLog = LogWrapper(_log,logPrefix)
tmpLog.debug('_getPFNFromLFC %s %s / %s LFNs:%s %s' % (dq2url,str(storageName),
len(lfns),str(lfns[:3]),str(scopeList[:3])))
outStr = ''
# check paramter
if guids == [] or storageName == [] or (len(lfns) != len(guids)):
tmpLog.debug('_getPFNFromLFC done with empty list')
return outStr
# check scopeList
if not scopeList in [None,[]] and len(lfns) != len(scopeList):
tmpLog.warning('_getPFNFromLFC wrong scopeList %s %s %s %s' % (dq2url,str(storageName),
str(lfns),str(scopeList)))
tmpLog.error('_getPFNFromLFC failed')
return outStr
# loop over all LFNs
iLFN = 0
nLFN = 1000
strFiles = ''
outStr = ''
for iLFN in range(len(lfns)):
if scopeList != []:
strFiles += '%s %s %s\n' % (lfns[iLFN],guids[iLFN],scopeList[iLFN])
else:
strFiles += '%s %s\n' % (lfns[iLFN],guids[iLFN])
# bulk operation
if (iLFN+1) % nLFN == 0 or (iLFN+1) >= len(lfns):
# write to file
inFileName = '%s/lfcin.%s' % (panda_config.logdir,commands.getoutput('uuidgen'))
ifile = open(inFileName,'w')
ifile.write(strFiles)
ifile.close()
# construct commands
strStorage = ''
for storage in storageName:
strStorage += '%s,' % storage
strStorage = strStorage[:-1]
com = 'cd %s > /dev/null 2>&1; export HOME=%s; ' % (panda_config.home_dir_cwd,panda_config.home_dir_cwd)
com+= 'unset LD_LIBRARY_PATH; unset PYTHONPATH; export PATH=/usr/local/bin:/bin:/usr/bin; '
com+= 'source %s; %s/python -Wignore %s/LFCclient.py -f %s -l %s -s %s' % \
(panda_config.glite_source,panda_config.native_python32,panda_config.lfcClient_dir,
inFileName,dq2url,strStorage)
tmpLog.debug(com)
# exeute
status,output = commands.getstatusoutput(com)
tmpLog.debug(status)
if status == 0:
outStr += output
else:
tmpLog.error("_getPFNFromLFC : %s %s %s" % (dq2url,status,output))
# send message to logger
try:
# make message
message = 'LFC access : %s %s %s' % (dq2url,status,output)
# get logger
_pandaLogger = PandaLogger()
_pandaLogger.lock()
_pandaLogger.setParams({'Type':'broker_util'})
logger = _pandaLogger.getHttpLogger(panda_config.loggername)
# add message
logger.error(message)
# release HTTP handler
_pandaLogger.release()
except:
pass
tmpLog.error('_getPFNFromLFC failed')
return status
# reset
strFiles = ''
tmpLog.debug('_getPFNFromLFC done')
# return
return outStr
# get files from LRC
def getFilesFromLRC(files,url,guids=[],storageName=[],terminateWhenFailed=False,getPFN=False,
scopeList=[]):
tmpLog = LogWrapper(_log,None)
tmpLog.debug('getFilesFromLRC "%s" %s' % (url,str(storageName)))
# get PFC
outSTR = ''
if url.startswith('mysql://'):
# from MySQL
outSTR = _getPFNFromMySQL(files,url)
# get PFN
if getPFN:
outPFN = {}
# FIXME
tmpLog.debug('RetPFN:%s ' % str(outPFN))
return outPFN
elif url.startswith('http://'):
# from HTTP I/F
outSTR = _getPoolFileCatalog(files,url)
# get PFN
if getPFN:
outPFN = {}
try:
if not outSTR in ['',None]:
root = xml.dom.minidom.parseString(outSTR)
fileNodes = root.getElementsByTagName('File')
for file in fileNodes:
# get PFN and LFN nodes
physical = file.getElementsByTagName('physical')[0]
pfnNode = physical.getElementsByTagName('pfn')[0]
logical = file.getElementsByTagName('logical')[0]
lfnNode = logical.getElementsByTagName('lfn')[0]
# convert UTF8 to Raw
pfn = str(pfnNode.getAttribute('name'))
lfn = str(lfnNode.getAttribute('name'))
# assign
if not outPFN.has_key(lfn):
outPFN[lfn] = []
outPFN[lfn].append(pfn)
except:
type, value, traceBack = sys.exc_info()
tmpLog.error(outSTR)
tmpLog.error("could not parse XML - %s %s" % (type, value))
tmpLog.debug('RetPFN:%s ' % str(outPFN))
return outPFN
elif url.startswith('lfc://') or url.startswith('rucio://'):
# from LFC
timeStart = datetime.datetime.utcnow()
outSTR = _getPFNFromLFC(files,url,guids,storageName,scopeList=scopeList,tmpLog=tmpLog)
regTime = datetime.datetime.utcnow() - timeStart
tmpLog.debug('file lookup for %s LFNs from %s took %s.%03d sec' % (len(files),url,regTime.seconds,
regTime.microseconds/1000))
# get PFN
if getPFN:
outPFN = {}
try:
if not outSTR in ['',None]:
tmpItems = outSTR.split('LFCRet :')
tmpItems.remove('')
# loop over all returns
for tmpItem in tmpItems:
exec "tmpLFNmap = %s" % tmpItem
for tmpLFN,tmpPFN in tmpLFNmap.iteritems():
outPFN[tmpLFN] = tmpPFN
except:
type, value, traceBack = sys.exc_info()
tmpLog.error(outSTR)
tmpLog.error("could not parse LFC ret - %s %s" % (type, value))
tmpLog.debug('RetPFN:%s files' % len(outPFN))
return outPFN
# check return
if not isinstance(outSTR,types.StringType):
if terminateWhenFailed:
return None
# set empty string
outSTR = ''
# collect OK Files
okFiles = []
for file in files:
if re.search(file,outSTR) != None:
okFiles.append(file)
tmpLog.debug('Ret:%s / %s files' % (str(okFiles[:3]),len(okFiles)))
return okFiles
# get # of files from LRC
def getNFilesFromLRC(files,url):
_log.debug('getNFilesFromLRC')
# get okFiles
okFiles = getFilesFromLRC(files,url)
nFiles = len(okFiles)
_log.debug('Ret:%s ' % nFiles)
return nFiles
# get list of missing LFNs from LRC
def getMissLFNsFromLRC(files,url,guids=[],storageName=[],scopeList=[]):
_log.debug('getMissLFNsFromLRC')
# get OF files
okFiles = getFilesFromLRC(files,url,guids,storageName,scopeList=scopeList)
# collect missing files
missFiles = []
for file in files:
if not file in okFiles:
missFiles.append(file)
_log.debug('Ret:%s ' % str(missFiles))
return missFiles
# get list of missing and tape LFNs
def getMissAndTapeLFNs(files,url,guids=[],storageName=[],scopeList=[],tapeSePath=[]):
# get OF files
okPFNs = getFilesFromLRC(files,url,guids,storageName,scopeList=scopeList,getPFN=True)
# collect missing files
missFiles = []
for file in files:
if not file in okPFNs:
missFiles.append(file)
# get tape files
tapeFiles = set()
for tmpLFN,tmpPFNs in okPFNs.iteritems():
isTape = False
for tmpPFN in tmpPFNs:
for sePath in tapeSePath:
if re.search(sePath,tmpPFN) != None:
isTape = True
break
if isTape:
break
if isTape:
tapeFiles.add(tmpLFN)
_log.debug('Ret:{0} {1}'.format(str(missFiles),str(tapeFiles)))
return missFiles,tapeFiles
# extract list of se hosts from schedconfig
def getSEfromSched(seStr):
tmpSE = []
if seStr != None:
for tmpSrcSiteSE in seStr.split(','):
# extract host
match = re.search('.+://([^:/]+):*\d*/*',tmpSrcSiteSE)
if match != None:
tmpSE.append(match.group(1))
# sort
tmpSE.sort()
# return
return tmpSE
| RRCKI/panda-server | pandaserver/brokerage/broker_util.py | Python | apache-2.0 | 16,126 | 0.014015 |
"""Generate a schema wrapper from a schema"""
import copy
import os
import sys
import json
from os.path import abspath, join, dirname
import textwrap
from urllib import request
import m2r
# import schemapi from here
sys.path.insert(0, abspath(dirname(__file__)))
from schemapi import codegen
from schemapi.codegen import CodeSnippet
from schemapi.utils import get_valid_identifier, SchemaInfo, indent_arglist
class SchemaGenerator(codegen.SchemaGenerator):
def _process_description(self, description):
description = m2r.convert(description)
description = description.replace(m2r.prolog, '')
description = description.replace(":raw-html-m2r:", ":raw-html:")
description = description.replace(r'\ ,', ',')
description = description.replace(r'\ ', ' ')
return description.strip()
def schema_class(*args, **kwargs):
return SchemaGenerator(*args, **kwargs).schema_class()
SCHEMA_URL_TEMPLATE = ('https://vega.github.io/schema/'
'{library}/{version}.json')
SCHEMA_VERSION = {
'vega': {
'v2': 'v2.6.5',
'v3': 'v3.3.1'
},
'vega-lite': {
'v1': 'v1.3.1',
'v2': 'v2.4.3'
}
}
BASE_SCHEMA = """
class {basename}(SchemaBase):
@classmethod
def _default_wrapper_classes(cls):
return {basename}.__subclasses__()
"""
LOAD_SCHEMA = '''
import pkgutil
import json
def load_schema():
"""Load the json schema associated with this module's functions"""
return json.loads(pkgutil.get_data(__name__, '{schemafile}').decode('utf-8'))
'''
CHANNEL_MIXINS = """
class FieldChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
if self.shorthand is Undefined:
kwds = {}
elif isinstance(self.shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy()
kwds.pop('shorthand')
return [self.__class__(shorthand, **kwds).to_dict()
for shorthand in self.shorthand]
elif isinstance(self.shorthand, six.string_types):
kwds = parse_shorthand(self.shorthand, data=context.get('data', None))
type_defined = self._kwds.get('type', Undefined) is not Undefined
if not (type_defined or 'type' in kwds):
if isinstance(context.get('data', None), pd.DataFrame):
raise ValueError("{0} encoding field is specified without a type; "
"the type cannot be inferred because it does not "
"match any column in the data.".format(self.shorthand))
else:
raise ValueError("{0} encoding field is specified without a type; "
"the type cannot be automacially inferred because "
"the data is not specified as a pandas.DataFrame."
"".format(self.shorthand))
else:
# shorthand is not a string; we pass the definition to field
if self.field is not Undefined:
raise ValueError("both shorthand and field specified in {0}"
"".format(self.__class__.__name__))
# field is a RepeatSpec or similar; cannot infer type
kwds = {'field': self.shorthand}
# set shorthand to Undefined, because it's not part of the schema
self.shorthand = Undefined
self._kwds.update({k: v for k, v in kwds.items()
if self._kwds.get(k, Undefined) is Undefined})
return super(FieldChannelMixin, self).to_dict(
validate=validate,
ignore=ignore,
context=context
)
class ValueChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
condition = getattr(self, 'condition', Undefined)
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif 'field' in condition and 'type' not in condition:
kwds = parse_shorthand(condition['field'], context.get('data', None))
copy = self.copy()
copy.condition.update(kwds)
return super(ValueChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
"""
class FieldSchemaGenerator(SchemaGenerator):
schema_class_template = textwrap.dedent('''
class {classname}(FieldChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
{init_code}
''')
class ValueSchemaGenerator(SchemaGenerator):
schema_class_template = textwrap.dedent('''
class {classname}(ValueChannelMixin, core.{basename}):
"""{docstring}"""
_class_is_valid_at_instantiation = False
{init_code}
''')
HEADER = """\
# -*- coding: utf-8 -*-
#
# The contents of this file are automatically written by
# tools/generate_schema_wrapper.py. Do not modify directly.
"""
def schema_url(library, version):
version = SCHEMA_VERSION[library][version]
return SCHEMA_URL_TEMPLATE.format(library=library, version=version)
def download_schemafile(library, version, schemapath):
url = schema_url(library, version)
filename = os.path.join(schemapath, '{library}-schema.json'.format(library=library))
request.urlretrieve(url, filename)
return filename
def copy_schemapi_util():
"""
Copy the schemapi utility and its test file into altair/utils/
"""
# copy the schemapi utility file
source_path = abspath(join(dirname(__file__), 'schemapi', 'schemapi.py'))
destination_path = abspath(join(dirname(__file__), '..', 'altair',
'utils', 'schemapi.py'))
print("Copying\n {0}\n -> {1}".format(source_path, destination_path))
with open(source_path, 'r', encoding='utf8') as source:
with open(destination_path, 'w', encoding='utf8') as dest:
dest.write(HEADER)
dest.writelines(source.readlines())
# Copy the schemapi test file
source_path = abspath(join(dirname(__file__), 'schemapi',
'tests', 'test_schemapi.py'))
destination_path = abspath(join(dirname(__file__), '..', 'altair',
'utils', 'tests', 'test_schemapi.py'))
print("Copying\n {0}\n -> {1}".format(source_path, destination_path))
with open(source_path, 'r', encoding='utf8') as source:
with open(destination_path, 'w', encoding='utf8') as dest:
dest.write(HEADER)
dest.writelines(source.readlines())
def generate_vegalite_schema_wrapper(schema_file):
"""Generate a schema wrapper at the given path."""
# TODO: generate simple tests for each wrapper
basename = 'VegaLiteSchema'
with open(schema_file, encoding='utf8') as f:
rootschema = json.load(f)
contents = [HEADER,
"from altair.utils.schemapi import SchemaBase, Undefined",
LOAD_SCHEMA.format(schemafile='vega-lite-schema.json')]
contents.append(BASE_SCHEMA.format(basename=basename))
contents.append(schema_class('Root', schema=rootschema, basename=basename,
schemarepr=CodeSnippet('load_schema()')))
for name in rootschema['definitions']:
defschema = {'$ref': '#/definitions/' + name}
defschema_repr = {'$ref': '#/definitions/' + name}
contents.append(schema_class(get_valid_identifier(name),
schema=defschema, schemarepr=defschema_repr,
rootschema=rootschema, basename=basename,
rootschemarepr=CodeSnippet("Root._schema")))
contents.append('') # end with newline
return '\n'.join(contents)
def generate_vega_schema_wrapper(schema_file):
"""Generate a schema wrapper at the given path."""
# TODO: generate simple tests for each wrapper
basename = 'VegaSchema'
with open(schema_file, encoding='utf8') as f:
rootschema = json.load(f)
contents = [HEADER,
"from altair.utils.schemapi import SchemaBase, Undefined",
LOAD_SCHEMA.format(schemafile='vega-schema.json')]
contents.append(BASE_SCHEMA.format(basename=basename))
contents.append(schema_class('Root', schema=rootschema, basename=basename,
schemarepr=CodeSnippet('load_schema()')))
for deflist in ['defs', 'refs']:
for name in rootschema[deflist]:
defschema = {'$ref': '#/{0}/{1}'.format(deflist, name)}
defschema_repr = {'$ref': '#/{0}/{1}'.format(deflist,name)}
contents.append(schema_class(get_valid_identifier(name),
schema=defschema, schemarepr=defschema_repr,
rootschema=rootschema, basename=basename,
rootschemarepr=CodeSnippet("Root._schema")))
contents.append('') # end with newline
return '\n'.join(contents)
def generate_vegalite_channel_wrappers(schemafile, imports=None,
encoding_def='Encoding'):
# TODO: generate __all__ for top of file
with open(schemafile, encoding='utf8') as f:
schema = json.load(f)
if imports is None:
imports = ["import six",
"from . import core",
"import pandas as pd",
"from altair.utils.schemapi import Undefined",
"from altair.utils import parse_shorthand"]
contents = [HEADER]
contents.extend(imports)
contents.append('')
contents.append(CHANNEL_MIXINS)
encoding = SchemaInfo(schema['definitions'][encoding_def],
rootschema=schema)
for prop, propschema in encoding.properties.items():
if propschema.is_reference():
definitions = [propschema.ref]
elif propschema.is_anyOf():
definitions = [s.ref for s in propschema.anyOf if s.is_reference()]
else:
raise ValueError("either $ref or anyOf expected")
for definition in definitions:
defschema = {'$ref': definition}
basename = definition.split('/')[-1]
classname = prop.title()
if 'Value' in basename:
Generator = ValueSchemaGenerator
classname += 'Value'
nodefault = ['value']
else:
Generator = FieldSchemaGenerator
nodefault = []
defschema = copy.deepcopy(schema['definitions'][basename])
defschema['properties']['shorthand'] = {'type': 'string',
'description': 'shorthand for field, aggregate, and type'}
defschema['required'] = ['shorthand']
gen = Generator(classname=classname, basename=basename,
schema=defschema, rootschema=schema,
nodefault=nodefault)
contents.append(gen.schema_class())
return '\n'.join(contents)
MARK_METHOD = '''
def mark_{mark}({def_arglist}):
"""Set the chart's mark to '{mark}'
For information on additional arguments, see ``alt.MarkDef``
"""
kwds = dict({dict_arglist})
copy = self.copy(deep=True, ignore=['data'])
if any(val is not Undefined for val in kwds.values()):
copy.mark = core.MarkDef(type="{mark}", **kwds)
else:
copy.mark = "{mark}"
return copy
'''
def generate_vegalite_mark_mixin(schemafile, mark_enum='Mark',
mark_def='MarkDef'):
with open(schemafile, encoding='utf8') as f:
schema = json.load(f)
marks = schema['definitions'][mark_enum]['enum']
info = SchemaInfo({'$ref': '#/definitions/' + mark_def},
rootschema=schema)
# adapted from SchemaInfo.init_code
nonkeyword, required, kwds, invalid_kwds, additional = codegen._get_args(info)
required -= {'type'}
kwds -= {'type'}
def_args = ['self'] + ['{0}=Undefined'.format(p)
for p in (sorted(required) + sorted(kwds))]
dict_args = ['{0}={0}'.format(p)
for p in (sorted(required) + sorted(kwds))]
if additional or invalid_kwds:
def_args.append('**kwds')
dict_args.append('**kwds')
imports = ["from altair.utils.schemapi import Undefined",
"from . import core"]
code = ["class MarkMethodMixin(object):",
' """A mixin class that defines mark methods"""']
for mark in marks:
# TODO: only include args relevant to given type?
mark_method = MARK_METHOD.format(mark=mark,
def_arglist=indent_arglist(def_args, indent_level=10 + len(mark)),
dict_arglist=indent_arglist(dict_args, indent_level=16))
code.append('\n '.join(mark_method.splitlines()))
return imports, '\n'.join(code)
CONFIG_METHOD = """
@use_signature(core.{classname})
def {method}(self, *args, **kwargs):
copy = self.copy()
copy.config = core.{classname}(*args, **kwargs)
return copy
"""
CONFIG_PROP_METHOD = """
@use_signature(core.{classname})
def configure_{prop}(self, *args, **kwargs):
copy = self.copy(deep=False)
if copy.config is Undefined:
copy.config = core.Config()
else:
copy.config = copy.config.copy(deep=False)
copy.config["{prop}"] = core.{classname}(*args, **kwargs)
return copy
"""
def generate_vegalite_config_mixin(schemafile):
imports = ["from . import core",
"from altair.utils import use_signature"]
code = ["class ConfigMethodMixin(object):",
' """A mixin class that defines config methods"""']
with open(schemafile, encoding='utf8') as f:
schema = json.load(f)
info = SchemaInfo({'$ref': '#/definitions/Config'},
rootschema=schema)
# configure() method
method = CONFIG_METHOD.format(classname='Config', method='configure')
code.append('\n '.join(method.splitlines()))
# configure_prop() methods
for prop, prop_info in info.properties.items():
classname = prop_info.refname
if classname and classname.endswith('Config'):
method = CONFIG_PROP_METHOD.format(classname=classname,
prop=prop)
code.append('\n '.join(method.splitlines()))
return imports, '\n'.join(code)
def vegalite_main():
library = 'vega-lite'
encoding_defs = {'v1': 'Encoding', 'v2': 'EncodingWithFacet'}
for version in ['v1', 'v2']:
path = abspath(join(dirname(__file__), '..',
'altair', 'vegalite', version))
schemapath = os.path.join(path, 'schema')
schemafile = download_schemafile(library=library,
version=version,
schemapath=schemapath)
# Generate __init__.py file
outfile = join(schemapath, '__init__.py')
print("Writing {0}".format(outfile))
with open(outfile, 'w', encoding='utf8') as f:
f.write("# flake8: noqa\n")
f.write("from .core import *\nfrom .channels import *\n")
f.write("SCHEMA_VERSION = {0!r}\n"
"".format(SCHEMA_VERSION[library][version]))
f.write("SCHEMA_URL = {0!r}\n"
"".format(schema_url(library, version)))
# Generate the core schema wrappers
outfile = join(schemapath, 'core.py')
print("Generating\n {0}\n ->{1}".format(schemafile, outfile))
file_contents = generate_vegalite_schema_wrapper(schemafile)
with open(outfile, 'w', encoding='utf8') as f:
f.write(file_contents)
# Generate the channel wrappers
outfile = join(schemapath, 'channels.py')
print("Generating\n {0}\n ->{1}".format(schemafile, outfile))
code = generate_vegalite_channel_wrappers(schemafile, encoding_def=encoding_defs[version])
with open(outfile, 'w', encoding='utf8') as f:
f.write(code)
if version != 'v1':
# generate the mark mixin
outfile = join(schemapath, 'mixins.py')
print("Generating\n {0}\n ->{1}".format(schemafile, outfile))
mark_imports, mark_mixin = generate_vegalite_mark_mixin(schemafile)
config_imports, config_mixin = generate_vegalite_config_mixin(schemafile)
imports = sorted(set(mark_imports + config_imports))
with open(outfile, 'w', encoding='utf8') as f:
f.write(HEADER)
f.write('\n'.join(imports))
f.write('\n\n\n')
f.write(mark_mixin)
f.write('\n\n\n')
f.write(config_mixin)
def vega_main():
library = 'vega'
for version in ['v2', 'v3']:
path = abspath(join(dirname(__file__), '..',
'altair', 'vega', version))
schemapath = os.path.join(path, 'schema')
schemafile = download_schemafile(library=library,
version=version,
schemapath=schemapath)
# Generate __init__.py file
outfile = join(schemapath, '__init__.py')
print("Writing {0}".format(outfile))
with open(outfile, 'w', encoding='utf8') as f:
f.write("# flake8: noqa\n")
f.write("from .core import *\n\n")
f.write("SCHEMA_VERSION = {0!r}\n"
"".format(SCHEMA_VERSION[library][version]))
f.write("SCHEMA_URL = {0!r}\n"
"".format(schema_url(library, version)))
# Generate the core schema wrappers
outfile = join(schemapath, 'core.py')
print("Generating\n {0}\n ->{1}".format(schemafile, outfile))
file_contents = generate_vega_schema_wrapper(schemafile)
with open(outfile, 'w', encoding='utf8') as f:
f.write(file_contents)
if __name__ == '__main__':
copy_schemapi_util()
vegalite_main()
vega_main()
| ellisonbg/altair | tools/generate_schema_wrapper.py | Python | bsd-3-clause | 18,648 | 0.001448 |
from MQTT_UI import Ui_MainWindow #Generated by Qt Designer
from PyQt4 import QtCore, QtGui #for gui
import paho.mqtt.client as mqtt #for mqtt
import sys #for exit
class StartQT4(QtGui.QMainWindow):
client1 = mqtt.Client() #for raspberry pi
client2 = mqtt.Client() #for simple mqtt test
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#<Buttons>######################################################################################
self.ui.pushButton_1.clicked.connect(self.pushButton_1)
self.ui.pushButton_2.clicked.connect(self.pushButton_2)
self.ui.pushButton_3.clicked.connect(self.pushButton_3)
self.ui.pushButton_4.clicked.connect(self.pushButton_4)
self.ui.pushButton_5.clicked.connect(self.pushButton_5)
self.ui.pushButton_6.clicked.connect(self.pushButton_6)
self.ui.pushButton_7.clicked.connect(self.pushButton_7)
self.ui.pushButton_8.clicked.connect(self.pushButton_8)
self.ui.pushButton_9.clicked.connect(self.pushButton_9)
self.ui.pushButton_10.clicked.connect(self.pushButton_10)
self.ui.pushButton_11.clicked.connect(self.pushButton_11)
self.ui.pushButton_12.clicked.connect(self.pushButton_12)
self.ui.pushButton_13.clicked.connect(self.pushButton_13)
self.ui.pushButton_14.clicked.connect(self.pushButton_14)
self.ui.pushButton_15.clicked.connect(self.pushButton_15)
self.ui.pushButton_16.clicked.connect(self.pushButton_16)
self.ui.pushButton_17.clicked.connect(self.pushButton_17)
self.ui.pushButton_18.clicked.connect(self.pushButton_18)
self.ui.pushButton_19.clicked.connect(self.pushButton_19)
self.ui.pushButton_20.clicked.connect(self.pushButton_20)
self.ui.pushButton_21.clicked.connect(self.pushButton_21)
self.ui.pushButton_22.clicked.connect(self.pushButton_22)
self.ui.pushButton_23.clicked.connect(self.pushButton_23)
self.ui.pushButton_24.clicked.connect(self.pushButton_24)
#</Buttons>#####################################################################################
#<MQTT Calbacks>####################################################################################
# Called when client1 is connected
def on_connect1(client, userdata, flags, rc):
print("Client 1: Connected with result code " + str(rc)) #'0' means ok
self.client1.subscribe(str(self.ui.lineEdit_4.text()))
# Called when a message has been received on a topic that the client subscribes to.
def on_message1(client, userdata, msg):
print('Client: 1, ' + 'Topic: ' + msg.topic + ', Payload: ' + str(msg.payload))
self.updateDisplay(str(msg.payload))
# Called when the client disconnects from the broker.
def on_disconnect1(client, userdata, rc):
if rc != 0:
print("Client 1: Unexpected disconnection.")
else:
print("Client 1: Clean disconnect.")
# Called when client2 is connected
def on_connect2(client, userdata, flags, rc):
print("Client 2: Connected with result code " + str(rc)) #'0' means ok
# Called when a message has been received on a topic that the client subscribes to.
def on_message2(client, userdata, msg):
print('Client: 2, ' + 'Topic: ' + msg.topic + ', Payload: ' + str(msg.payload))
self.ui.lineEdit_9.setText(str(msg.payload).split('\'')[1])
# Called when the client disconnects from the broker.
def on_disconnect2(client, userdata, rc):
if rc != 0:
print("Client 2: Unexpected disconnection.")
else:
print("Client 2: Clean disconnect.")
# assigning each client to its own callback funtion.
# (callback is a function that will be automatically called when an event occurred)
self.client1.on_connect = on_connect1
self.client1.on_disconnect = on_disconnect1
self.client1.on_message = on_message1
self.client2.on_connect = on_connect2
self.client2.on_disconnect = on_disconnect2
self.client2.on_message = on_message2
# </MQTT Calbacks>###################################################################################
###<button_methods>####
def pushButton_1(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'A')
def pushButton_2(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'B')
def pushButton_3(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'C')
def pushButton_4(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'D')
def pushButton_5(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'E')
def pushButton_6(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'F')
def pushButton_7(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'G')
def pushButton_8(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'H')
def pushButton_9(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'I')
def pushButton_10(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'J')
def pushButton_11(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'K')
def pushButton_12(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'L')
def pushButton_13(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'M')
def pushButton_14(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'N')
def pushButton_15(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'O')
def pushButton_16(self):
self.client1.publish(str(self.ui.lineEdit_3.text()), 'P')
def pushButton_17(self):
self.client1.loop_start() # creates a new thread for client1
self.client1.connect(str(self.ui.lineEdit_1.text()), int(self.ui.lineEdit_2.text()))
def pushButton_18(self):
self.client1.reconnect()
def pushButton_19(self):
self.client2.loop_start() # creates a new thread for client2
self.client2.connect(str(self.ui.lineEdit_5.text()), int(self.ui.lineEdit_6.text()))
def pushButton_20(self):
self.client2.disconnect()
def pushButton_21(self):
self.client2.reconnect()
def pushButton_22(self):
self.client2.publish(str(self.ui.lineEdit_8.text()), str(self.ui.lineEdit_10.text()))
def pushButton_23(self):
self.client2.subscribe(str(self.ui.lineEdit_7.text()))
def pushButton_24(self):
self.client2.unsubscribe(str(self.ui.lineEdit_7.text()))
###</buttons_methods>###
def updateDisplay(self, pl): #assumes data is csv formatted
payload = str(pl)
payload = payload.split('\'')
payload = payload[1].split(',')
self.ui.progressBar_1.setValue(float(payload[0]))
self.ui.progressBar_2.setValue(float(payload[1]))
self.ui.progressBar_3.setValue(float(payload[2]))
self.ui.progressBar_4.setValue(float(payload[3]))
self.ui.progressBar_5.setValue(float(payload[4]))
self.ui.progressBar_6.setValue(float(payload[5]))
self.ui.progressBar_7.setValue(float(payload[6]))
self.ui.progressBar_8.setValue(float(payload[7]))
self.ui.progressBar_9.setValue(float(payload[8]))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
MainWindow = StartQT4()
MainWindow.show()
sys.exit(app.exec_()) | EEEManchester/Food-Computer | Software/MQTT Test GUI/MQTT_GUI/main.py | Python | mit | 7,742 | 0.005425 |
import unittest
from pyramid.compat import PY3
class Test_InstancePropertyMixin(unittest.TestCase):
def _makeOne(self):
cls = self._getTargetClass()
class Foo(cls):
pass
return Foo()
def _getTargetClass(self):
from pyramid.util import InstancePropertyMixin
return InstancePropertyMixin
def test_callable(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker)
foo.bar = 1
self.assertEqual(1, foo.worker)
foo.bar = 2
self.assertEqual(2, foo.worker)
def test_callable_with_name(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, name='x')
foo.bar = 1
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
def test_callable_with_reify(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, reify=True)
foo.bar = 1
self.assertEqual(1, foo.worker)
foo.bar = 2
self.assertEqual(1, foo.worker)
def test_callable_with_name_reify(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(worker, name='x')
foo.set_property(worker, name='y', reify=True)
foo.bar = 1
self.assertEqual(1, foo.y)
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
self.assertEqual(1, foo.y)
def test_property_without_name(self):
def worker(obj): pass
foo = self._makeOne()
self.assertRaises(ValueError, foo.set_property, property(worker))
def test_property_with_name(self):
def worker(obj):
return obj.bar
foo = self._makeOne()
foo.set_property(property(worker), name='x')
foo.bar = 1
self.assertEqual(1, foo.x)
foo.bar = 2
self.assertEqual(2, foo.x)
def test_property_with_reify(self):
def worker(obj): pass
foo = self._makeOne()
self.assertRaises(ValueError, foo.set_property,
property(worker), name='x', reify=True)
def test_override_property(self):
def worker(obj): pass
foo = self._makeOne()
foo.set_property(worker, name='x')
def doit():
foo.x = 1
self.assertRaises(AttributeError, doit)
def test_override_reify(self):
def worker(obj): pass
foo = self._makeOne()
foo.set_property(worker, name='x', reify=True)
foo.x = 1
self.assertEqual(1, foo.x)
foo.x = 2
self.assertEqual(2, foo.x)
def test_reset_property(self):
foo = self._makeOne()
foo.set_property(lambda _: 1, name='x')
self.assertEqual(1, foo.x)
foo.set_property(lambda _: 2, name='x')
self.assertEqual(2, foo.x)
def test_reset_reify(self):
""" This is questionable behavior, but may as well get notified
if it changes."""
foo = self._makeOne()
foo.set_property(lambda _: 1, name='x', reify=True)
self.assertEqual(1, foo.x)
foo.set_property(lambda _: 2, name='x', reify=True)
self.assertEqual(1, foo.x)
def test__make_property(self):
from pyramid.decorator import reify
cls = self._getTargetClass()
name, fn = cls._make_property(lambda x: 1, name='x', reify=True)
self.assertEqual(name, 'x')
self.assertTrue(isinstance(fn, reify))
def test__set_properties_with_iterable(self):
foo = self._makeOne()
x = foo._make_property(lambda _: 1, name='x', reify=True)
y = foo._make_property(lambda _: 2, name='y')
foo._set_properties([x, y])
self.assertEqual(1, foo.x)
self.assertEqual(2, foo.y)
def test__set_properties_with_dict(self):
foo = self._makeOne()
x_name, x_fn = foo._make_property(lambda _: 1, name='x', reify=True)
y_name, y_fn = foo._make_property(lambda _: 2, name='y')
foo._set_properties({x_name: x_fn, y_name: y_fn})
self.assertEqual(1, foo.x)
self.assertEqual(2, foo.y)
def test__set_extensions(self):
inst = self._makeOne()
def foo(self, result):
return result
n, bar = inst._make_property(lambda _: 'bar', name='bar')
class Extensions(object):
def __init__(self):
self.methods = {'foo':foo}
self.descriptors = {'bar':bar}
extensions = Extensions()
inst._set_extensions(extensions)
self.assertEqual(inst.bar, 'bar')
self.assertEqual(inst.foo('abc'), 'abc')
class Test_WeakOrderedSet(unittest.TestCase):
def _makeOne(self):
from pyramid.config import WeakOrderedSet
return WeakOrderedSet()
def test_ctor(self):
wos = self._makeOne()
self.assertEqual(len(wos), 0)
self.assertEqual(wos.last, None)
def test_add_item(self):
wos = self._makeOne()
reg = Dummy()
wos.add(reg)
self.assertEqual(list(wos), [reg])
self.assertTrue(reg in wos)
self.assertEqual(wos.last, reg)
def test_add_multiple_items(self):
wos = self._makeOne()
reg1 = Dummy()
reg2 = Dummy()
wos.add(reg1)
wos.add(reg2)
self.assertEqual(len(wos), 2)
self.assertEqual(list(wos), [reg1, reg2])
self.assertTrue(reg1 in wos)
self.assertTrue(reg2 in wos)
self.assertEqual(wos.last, reg2)
def test_add_duplicate_items(self):
wos = self._makeOne()
reg = Dummy()
wos.add(reg)
wos.add(reg)
self.assertEqual(len(wos), 1)
self.assertEqual(list(wos), [reg])
self.assertTrue(reg in wos)
self.assertEqual(wos.last, reg)
def test_weakref_removal(self):
wos = self._makeOne()
reg = Dummy()
wos.add(reg)
wos.remove(reg)
self.assertEqual(len(wos), 0)
self.assertEqual(list(wos), [])
self.assertEqual(wos.last, None)
def test_last_updated(self):
wos = self._makeOne()
reg = Dummy()
reg2 = Dummy()
wos.add(reg)
wos.add(reg2)
wos.remove(reg2)
self.assertEqual(len(wos), 1)
self.assertEqual(list(wos), [reg])
self.assertEqual(wos.last, reg)
def test_empty(self):
wos = self._makeOne()
reg = Dummy()
reg2 = Dummy()
wos.add(reg)
wos.add(reg2)
wos.empty()
self.assertEqual(len(wos), 0)
self.assertEqual(list(wos), [])
self.assertEqual(wos.last, None)
class Test_object_description(unittest.TestCase):
def _callFUT(self, object):
from pyramid.util import object_description
return object_description(object)
def test_string(self):
self.assertEqual(self._callFUT('abc'), 'abc')
def test_int(self):
self.assertEqual(self._callFUT(1), '1')
def test_bool(self):
self.assertEqual(self._callFUT(True), 'True')
def test_None(self):
self.assertEqual(self._callFUT(None), 'None')
def test_float(self):
self.assertEqual(self._callFUT(1.2), '1.2')
def test_tuple(self):
self.assertEqual(self._callFUT(('a', 'b')), "('a', 'b')")
def test_set(self):
if PY3: # pragma: no cover
self.assertEqual(self._callFUT(set(['a'])), "{'a'}")
else: # pragma: no cover
self.assertEqual(self._callFUT(set(['a'])), "set(['a'])")
def test_list(self):
self.assertEqual(self._callFUT(['a']), "['a']")
def test_dict(self):
self.assertEqual(self._callFUT({'a':1}), "{'a': 1}")
def test_nomodule(self):
o = object()
self.assertEqual(self._callFUT(o), 'object %s' % str(o))
def test_module(self):
import pyramid
self.assertEqual(self._callFUT(pyramid), 'module pyramid')
def test_method(self):
self.assertEqual(
self._callFUT(self.test_method),
'method test_method of class pyramid.tests.test_util.'
'Test_object_description')
def test_class(self):
self.assertEqual(
self._callFUT(self.__class__),
'class pyramid.tests.test_util.Test_object_description')
def test_function(self):
self.assertEqual(
self._callFUT(dummyfunc),
'function pyramid.tests.test_util.dummyfunc')
def test_instance(self):
inst = Dummy()
self.assertEqual(
self._callFUT(inst),
"object %s" % str(inst))
def test_shortened_repr(self):
inst = ['1'] * 1000
self.assertEqual(
self._callFUT(inst),
str(inst)[:100] + ' ... ]')
class TestTopologicalSorter(unittest.TestCase):
def _makeOne(self, *arg, **kw):
from pyramid.util import TopologicalSorter
return TopologicalSorter(*arg, **kw)
def test_remove(self):
inst = self._makeOne()
inst.names.append('name')
inst.name2val['name'] = 1
inst.req_after.add('name')
inst.req_before.add('name')
inst.name2after['name'] = ('bob',)
inst.name2before['name'] = ('fred',)
inst.order.append(('bob', 'name'))
inst.order.append(('name', 'fred'))
inst.remove('name')
self.assertFalse(inst.names)
self.assertFalse(inst.req_before)
self.assertFalse(inst.req_after)
self.assertFalse(inst.name2before)
self.assertFalse(inst.name2after)
self.assertFalse(inst.name2val)
self.assertFalse(inst.order)
def test_add(self):
from pyramid.util import LAST
sorter = self._makeOne()
sorter.add('name', 'factory')
self.assertEqual(sorter.names, ['name'])
self.assertEqual(sorter.name2val,
{'name':'factory'})
self.assertEqual(sorter.order, [('name', LAST)])
sorter.add('name2', 'factory2')
self.assertEqual(sorter.names, ['name', 'name2'])
self.assertEqual(sorter.name2val,
{'name':'factory', 'name2':'factory2'})
self.assertEqual(sorter.order,
[('name', LAST), ('name2', LAST)])
sorter.add('name3', 'factory3', before='name2')
self.assertEqual(sorter.names,
['name', 'name2', 'name3'])
self.assertEqual(sorter.name2val,
{'name':'factory', 'name2':'factory2',
'name3':'factory3'})
self.assertEqual(sorter.order,
[('name', LAST), ('name2', LAST),
('name3', 'name2')])
def test_sorted_ordering_1(self):
sorter = self._makeOne()
sorter.add('name1', 'factory1')
sorter.add('name2', 'factory2')
self.assertEqual(sorter.sorted(),
[
('name1', 'factory1'),
('name2', 'factory2'),
])
def test_sorted_ordering_2(self):
from pyramid.util import FIRST
sorter = self._makeOne()
sorter.add('name1', 'factory1')
sorter.add('name2', 'factory2', after=FIRST)
self.assertEqual(sorter.sorted(),
[
('name2', 'factory2'),
('name1', 'factory1'),
])
def test_sorted_ordering_3(self):
from pyramid.util import FIRST
sorter = self._makeOne()
add = sorter.add
add('auth', 'auth_factory', after='browserid')
add('dbt', 'dbt_factory')
add('retry', 'retry_factory', before='txnmgr', after='exceptionview')
add('browserid', 'browserid_factory')
add('txnmgr', 'txnmgr_factory', after='exceptionview')
add('exceptionview', 'excview_factory', after=FIRST)
self.assertEqual(sorter.sorted(),
[
('exceptionview', 'excview_factory'),
('retry', 'retry_factory'),
('txnmgr', 'txnmgr_factory'),
('dbt', 'dbt_factory'),
('browserid', 'browserid_factory'),
('auth', 'auth_factory'),
])
def test_sorted_ordering_4(self):
from pyramid.util import FIRST
sorter = self._makeOne()
add = sorter.add
add('exceptionview', 'excview_factory', after=FIRST)
add('auth', 'auth_factory', after='browserid')
add('retry', 'retry_factory', before='txnmgr', after='exceptionview')
add('browserid', 'browserid_factory')
add('txnmgr', 'txnmgr_factory', after='exceptionview')
add('dbt', 'dbt_factory')
self.assertEqual(sorter.sorted(),
[
('exceptionview', 'excview_factory'),
('retry', 'retry_factory'),
('txnmgr', 'txnmgr_factory'),
('browserid', 'browserid_factory'),
('auth', 'auth_factory'),
('dbt', 'dbt_factory'),
])
def test_sorted_ordering_5(self):
from pyramid.util import LAST, FIRST
sorter = self._makeOne()
add = sorter.add
add('exceptionview', 'excview_factory')
add('auth', 'auth_factory', after=FIRST)
add('retry', 'retry_factory', before='txnmgr', after='exceptionview')
add('browserid', 'browserid_factory', after=FIRST)
add('txnmgr', 'txnmgr_factory', after='exceptionview', before=LAST)
add('dbt', 'dbt_factory')
self.assertEqual(sorter.sorted(),
[
('browserid', 'browserid_factory'),
('auth', 'auth_factory'),
('exceptionview', 'excview_factory'),
('retry', 'retry_factory'),
('txnmgr', 'txnmgr_factory'),
('dbt', 'dbt_factory'),
])
def test_sorted_ordering_missing_before_partial(self):
from pyramid.exceptions import ConfigurationError
sorter = self._makeOne()
add = sorter.add
add('dbt', 'dbt_factory')
add('auth', 'auth_factory', after='browserid')
add('retry', 'retry_factory', before='txnmgr', after='exceptionview')
add('browserid', 'browserid_factory')
self.assertRaises(ConfigurationError, sorter.sorted)
def test_sorted_ordering_missing_after_partial(self):
from pyramid.exceptions import ConfigurationError
sorter = self._makeOne()
add = sorter.add
add('dbt', 'dbt_factory')
add('auth', 'auth_factory', after='txnmgr')
add('retry', 'retry_factory', before='dbt', after='exceptionview')
add('browserid', 'browserid_factory')
self.assertRaises(ConfigurationError, sorter.sorted)
def test_sorted_ordering_missing_before_and_after_partials(self):
from pyramid.exceptions import ConfigurationError
sorter = self._makeOne()
add = sorter.add
add('dbt', 'dbt_factory')
add('auth', 'auth_factory', after='browserid')
add('retry', 'retry_factory', before='foo', after='txnmgr')
add('browserid', 'browserid_factory')
self.assertRaises(ConfigurationError, sorter.sorted)
def test_sorted_ordering_missing_before_partial_with_fallback(self):
from pyramid.util import LAST
sorter = self._makeOne()
add = sorter.add
add('exceptionview', 'excview_factory', before=LAST)
add('auth', 'auth_factory', after='browserid')
add('retry', 'retry_factory', before=('txnmgr', LAST),
after='exceptionview')
add('browserid', 'browserid_factory')
add('dbt', 'dbt_factory')
self.assertEqual(sorter.sorted(),
[
('exceptionview', 'excview_factory'),
('retry', 'retry_factory'),
('browserid', 'browserid_factory'),
('auth', 'auth_factory'),
('dbt', 'dbt_factory'),
])
def test_sorted_ordering_missing_after_partial_with_fallback(self):
from pyramid.util import FIRST
sorter = self._makeOne()
add = sorter.add
add('exceptionview', 'excview_factory', after=FIRST)
add('auth', 'auth_factory', after=('txnmgr','browserid'))
add('retry', 'retry_factory', after='exceptionview')
add('browserid', 'browserid_factory')
add('dbt', 'dbt_factory')
self.assertEqual(sorter.sorted(),
[
('exceptionview', 'excview_factory'),
('retry', 'retry_factory'),
('browserid', 'browserid_factory'),
('auth', 'auth_factory'),
('dbt', 'dbt_factory'),
])
def test_sorted_ordering_with_partial_fallbacks(self):
from pyramid.util import LAST
sorter = self._makeOne()
add = sorter.add
add('exceptionview', 'excview_factory', before=('wontbethere', LAST))
add('retry', 'retry_factory', after='exceptionview')
add('browserid', 'browserid_factory', before=('wont2', 'exceptionview'))
self.assertEqual(sorter.sorted(),
[
('browserid', 'browserid_factory'),
('exceptionview', 'excview_factory'),
('retry', 'retry_factory'),
])
def test_sorted_ordering_with_multiple_matching_fallbacks(self):
from pyramid.util import LAST
sorter = self._makeOne()
add = sorter.add
add('exceptionview', 'excview_factory', before=LAST)
add('retry', 'retry_factory', after='exceptionview')
add('browserid', 'browserid_factory', before=('retry', 'exceptionview'))
self.assertEqual(sorter.sorted(),
[
('browserid', 'browserid_factory'),
('exceptionview', 'excview_factory'),
('retry', 'retry_factory'),
])
def test_sorted_ordering_with_missing_fallbacks(self):
from pyramid.exceptions import ConfigurationError
from pyramid.util import LAST
sorter = self._makeOne()
add = sorter.add
add('exceptionview', 'excview_factory', before=LAST)
add('retry', 'retry_factory', after='exceptionview')
add('browserid', 'browserid_factory', before=('txnmgr', 'auth'))
self.assertRaises(ConfigurationError, sorter.sorted)
def test_sorted_ordering_conflict_direct(self):
from pyramid.exceptions import CyclicDependencyError
sorter = self._makeOne()
add = sorter.add
add('browserid', 'browserid_factory')
add('auth', 'auth_factory', before='browserid', after='browserid')
self.assertRaises(CyclicDependencyError, sorter.sorted)
def test_sorted_ordering_conflict_indirect(self):
from pyramid.exceptions import CyclicDependencyError
sorter = self._makeOne()
add = sorter.add
add('browserid', 'browserid_factory')
add('auth', 'auth_factory', before='browserid')
add('dbt', 'dbt_factory', after='browserid', before='auth')
self.assertRaises(CyclicDependencyError, sorter.sorted)
class TestSentinel(unittest.TestCase):
def test_repr(self):
from pyramid.util import Sentinel
r = repr(Sentinel('ABC'))
self.assertEqual(r, 'ABC')
class TestActionInfo(unittest.TestCase):
def _getTargetClass(self):
from pyramid.util import ActionInfo
return ActionInfo
def _makeOne(self, filename, lineno, function, linerepr):
return self._getTargetClass()(filename, lineno, function, linerepr)
def test_class_conforms(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IActionInfo
verifyClass(IActionInfo, self._getTargetClass())
def test_instance_conforms(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IActionInfo
verifyObject(IActionInfo, self._makeOne('f', 0, 'f', 'f'))
def test_ctor(self):
inst = self._makeOne('filename', 10, 'function', 'src')
self.assertEqual(inst.file, 'filename')
self.assertEqual(inst.line, 10)
self.assertEqual(inst.function, 'function')
self.assertEqual(inst.src, 'src')
def test___str__(self):
inst = self._makeOne('filename', 0, 'function', ' linerepr ')
self.assertEqual(str(inst),
"Line 0 of file filename:\n linerepr ")
def dummyfunc(): pass
class Dummy(object):
pass
| danielpronych/pyramid-doxygen | pyramid/tests/test_util.py | Python | bsd-2-clause | 21,474 | 0.001537 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
config = {
"suite_definitions": {
"gaiatest_desktop": {
"options": [
"--restart",
"--timeout=%(timeout)s",
"--type=%(type)s",
"--testvars=%(testvars)s",
"--profile=%(profile)s",
"--symbols-path=%(symbols_path)s",
"--gecko-log=%(gecko_log)s",
"--xml-output=%(xml_output)s",
"--html-output=%(html_output)s",
"--log-raw=%(raw_log_file)s",
"--binary=%(binary)s",
"--address=%(address)s",
"--total-chunks=%(total_chunks)s",
"--this-chunk=%(this_chunk)s"
],
"run_filename": "",
"testsdir": ""
},
"gaiatest_emulator": {
"options": [
"--restart",
"--timeout=%(timeout)s",
"--type=%(type)s",
"--testvars=%(testvars)s",
"--profile=%(profile)s",
"--symbols-path=%(symbols_path)s",
"--xml-output=%(xml_output)s",
"--html-output=%(html_output)s",
"--log-raw=%(raw_log_file)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s"
],
"run_filename": "",
"testsdir": ""
},
"marionette_desktop": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--binary=%(binary)s",
"--address=%(address)s",
"--symbols-path=%(symbols_path)s"
],
"run_filename": "",
"testsdir": ""
},
"marionette_emulator": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s",
"--symbols-path=%(symbols_path)s"
],
"run_filename": "",
"testsdir": ""
},
"webapi_desktop": {
"options": [],
"run_filename": "",
"testsdir": ""
},
"webapi_emulator": {
"options": [
"--type=%(type)s",
"--log-raw=%(raw_log_file)s",
"--symbols-path=%(symbols_path)s",
"--logcat-dir=%(logcat_dir)s",
"--emulator=%(emulator)s",
"--homedir=%(homedir)s"
],
"run_filename": "",
"testsdir": ""
}
}
} | vladikoff/fxa-mochitest | tests/config/mozharness/marionette.py | Python | mpl-2.0 | 2,925 | 0.000342 |
# This is a Python module containing functions to parse and analyze ncf components
# This module is designed to run on the latest major versions of the most popular
# server OSes (Debian, Red Hat/CentOS, Ubuntu, SLES, ...)
# At the time of writing (November 2013) these are Debian 7, Red Hat/CentOS 6,
# Ubuntu 12.04 LTS, SLES 11, ...
# The version of Python in all of these is >= 2.6, which is therefore what this
# module must support
import re
import subprocess
import json
import os.path
# Verbose output
VERBOSE = 0
dirs = [ "10_ncf_internals", "20_cfe_basics", "30_generic_methods", "40_it_ops_knowledge", "50_techniques", "60_services" ]
tags = {}
tags["common"] = ["bundle_name", "bundle_args"]
tags["generic_method"] = ["name", "class_prefix", "class_parameter", "class_parameter_id"]
tags["technique"] = ["name", "description", "version"]
def get_root_dir():
return os.path.realpath(os.path.dirname(__file__) + "/../")
# This method emulates the behavior of subprocess check_output method.
# We aim to be compatible with Python 2.6, thus this method does not exist
# yet in subprocess.
def check_output(command):
if VERBOSE == 1:
print "VERBOSE: About to run command '" + " ".join(command) + "'"
process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
retcode = process.poll()
if retcode != 0:
if VERBOSE == 1:
print "VERBOSE: Exception triggered, Command returned error code " + retcode
raise subprocess.CalledProcessError(retcode, command, output=output[0])
if VERBOSE == 1:
print "VERBOSE: Command output: '" + output[0] + "'"
return output[0]
def get_all_generic_methods_filenames():
return get_all_generic_methods_filenames_in_dir(get_root_dir() + "/tree/30_generic_methods")
def get_all_generic_methods_filenames_in_dir(dir):
return get_all_cf_filenames_under_dir(dir)
def get_all_techniques_filenames():
return get_all_cf_filenames_under_dir(get_root_dir() + "/tree/50_techniques")
def get_all_cf_filenames_under_dir(dir):
filenames = []
filenames_add = filenames.append
for root, dirs, files in os.walk(dir):
for file in files:
if not file.startswith("_") and file.endswith(".cf"):
filenames_add(os.path.join(root, file))
return filenames
def parse_technique_metadata(technique_content):
return parse_bundlefile_metadata(technique_content, "technique")
def parse_generic_method_metadata(technique_content):
return parse_bundlefile_metadata(technique_content, "generic_method")
def parse_bundlefile_metadata(content, bundle_type):
res = {}
for line in content.splitlines():
for tag in tags[bundle_type]:
match = re.match("^\s*#\s*@" + tag + "\s+(.*)$", line)
if match :
res[tag] = match.group(1)
match = re.match("[^#]*bundle\s+agent\s+([^(]+)\(?([^)]*)\)?.*$", line)
if match:
res['bundle_name'] = match.group(1)
res['bundle_args'] = []
if len(match.group(2)):
res['bundle_args'] += [x.strip() for x in match.group(2).split(',')]
# Any tags should come before the "bundle agent" declaration
break
# The tag "class_parameter_id" is a magic tag, it's value is built from class_parameter and the list of args
if "class_parameter_id" in tags[bundle_type]:
try:
res['class_parameter_id'] = res['bundle_args'].index(res['class_parameter'])+1
except:
res['class_parameter_id'] = 0
raise Exception("The class_parameter name \"" + res['class_parameter'] + "\" does not seem to match any of the bundle's parameters")
expected_tags = tags[bundle_type] + tags["common"]
if sorted(res.keys()) != sorted(expected_tags):
missing_keys = [mkey for mkey in expected_tags if mkey not in set(res.keys())]
raise Exception("One or more metadata tags not found before the bundle agent declaration (" + ", ".join(missing_keys) + ")")
return res
def parse_technique_methods(technique_file):
res = []
# Check file exists
if not os.path.exists(technique_file):
raise Exception("No such file: " + technique_file)
out = check_output(["cf-promises", "-pjson", "-f", technique_file])
promises = json.loads(out)
# Sanity check: if more than one bundle, this is a weird file and I'm quitting
bundle_count = 0
for bundle in promises['bundles']:
if bundle['bundleType'] == "agent":
bundle_count += 1
if bundle_count > 1:
raise Exception("There is not exactly one bundle in this file, aborting")
# Sanity check: the bundle must be of type agent
if promises['bundles'][0]['bundleType'] != 'agent':
raise Exception("This bundle if not a bundle agent, aborting")
methods_promises = [promiseType for promiseType in promises['bundles'][0]['promiseTypes'] if promiseType['name']=="methods"]
methods = []
if len(methods_promises) >= 1:
methods = methods_promises[0]['contexts']
for context in methods:
class_context = context['name']
for method in context['promises']:
method_name = None
args = None
promiser = method['promiser']
for attribute in method['attributes']:
if attribute['lval'] == 'usebundle':
if attribute['rval']['type'] == 'functionCall':
method_name = attribute['rval']['name']
args = [arg['value'] for arg in attribute['rval']['arguments']]
if attribute['rval']['type'] == 'string':
method_name = attribute['rval']['value']
if args:
res.append({'class_context': class_context, 'method_name': method_name, 'args': args})
else:
res.append({'class_context': class_context, 'method_name': method_name})
return res
def get_all_generic_methods_metadata():
all_metadata = {}
filenames = get_all_generic_methods_filenames()
for file in filenames:
content = open(file).read()
try:
metadata = parse_generic_method_metadata(content)
all_metadata[metadata['bundle_name']] = metadata
except Exception:
continue # skip this file, it doesn't have the right tags in - yuk!
return all_metadata
def get_all_techniques_metadata(include_methods_calls = True):
all_metadata = {}
filenames = get_all_techniques_filenames()
for file in filenames:
content = open(file).read()
try:
metadata = parse_technique_metadata(content)
all_metadata[metadata['bundle_name']] = metadata
if include_methods_calls:
method_calls = parse_technique_methods(file)
all_metadata[metadata['bundle_name']]['method_calls'] = method_calls
except Exception as e:
print "ERROR: Exception triggered, Unable to parse file " + file
print e
continue # skip this file, it doesn't have the right tags in - yuk!
return all_metadata
| ncharles/ncf | tools/ncf.py | Python | gpl-3.0 | 6,835 | 0.016971 |
# _*_ encoding: utf-8 _*_
"""Demonstrate doubly-linked list in python."""
from linked_list import Node
class DoublyLinked(object):
"""Implement a doubly-linked list from a singly-linked list."""
def __init__(self, val=None):
"""Initialize the list."""
self.head = object()
self._mark = self.head
if val:
self.insert(val)
def size(self):
"""Return the length of the list."""
counter = 0
current_node = self.head
while current_node is not self._mark:
counter += 1
current_node = current_node.get_next()
return counter
def search(self, val):
"""Return the node containing 'val' in list if exists, else None."""
current_node = self.head
while current_node.get_data() is not val:
current_node = current_node.get_next()
if current_node is self._mark:
raise IndexError
break
return current_node
def insert(self, val):
"""Insert value at head of list."""
if isinstance(val, list):
for item in val[::-1]:
new_node = DoubleNode(item, self.head, self._mark)
try:
self.head.set_previous(new_node)
except AttributeError:
pass
self.head = new_node
else:
new_node = DoubleNode(val, self.head, self._mark)
try:
self.head.set_previous(new_node)
except AttributeError:
pass
self.head = new_node
def display(self):
"""Print list represented as Python tuple literal."""
output = """"""
current_node = self.head
while current_node is not self._mark:
output += '{}, '.format(current_node.get_data())
current_node = current_node.get_next()
printable = '(' + output[:-2] + ')'
print(printable)
return printable
def pop(self):
"""Pop the first value off the head of the list and return it."""
item = self.head
if item is self._mark:
raise IndexError
else:
self.head = item.get_next()
try:
self.head.set_previous(self._mark)
except AttributeError:
pass
return item.get_data()
def append(self, val):
"""Append the given item to the tail of the list."""
cur = self.head
if cur == self._mark:
new_node = DoubleNode(val, self._mark)
self.head = new_node
else:
new_node = DoubleNode(val, self._mark)
while cur.next_node != self._mark:
cur = cur.next_node
cur.set_next(new_node)
new_node.set_previous(cur)
def shift(self):
"""Remove and returns the last value from the tail of the list."""
cur = self.head
if cur == self._mark:
raise IndexError
else:
while cur.next_node != self._mark:
cur = cur.next_node
try:
cur.prev_node.next_node = self._mark
except AttributeError:
raise IndexError
return cur.get_data()
def remove(self, value):
"""Remove the first occurrence of value in the list."""
previous_node = None
current_node = self.head
while current_node.get_data() is not value:
previous_node = current_node
current_node = current_node.get_next()
if current_node.get_data() is None:
break
if current_node.get_data() == value:
previous_node.set_next(current_node.get_next())
else:
print('Not Found')
class DoubleNode(object):
"""Double Node constructor for doubly linked list."""
def __init__(self, data=None, next_node=None, prev_node=None):
"""Initialize the double node."""
self.data = data
self.prev_node = prev_node
self.next_node = next_node
def set_previous(self, prev):
"""Set previous node."""
self.prev_node = prev
def get_data(self):
"""Get data for node."""
return self.data
def get_next(self):
"""Retrieve next node in list."""
return self.next_node
def set_next(self, next_node):
"""Set next node in list."""
self.next_node = next_node
| palindromed/data-structures2 | src/doubly_linked.py | Python | mit | 4,501 | 0 |
"""This module contains classes for handling matrices in a linear algebra setting.
The primary objects are the `Matrix` and `Cov`. These objects overload most numerical
operators to autoalign the elements based on row and column names."""
from .mat_handler import Matrix, Cov, Jco, concat, save_coo
| jtwhite79/pyemu | pyemu/mat/__init__.py | Python | bsd-3-clause | 301 | 0.006645 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
# +++your code here+++
count = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
count += 1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
#for word in words:
# if word[0] == 'x':
# takeOut =
return
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| missyjcat/pythonexercises | basic/list1.py | Python | apache-2.0 | 3,070 | 0.011726 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark_cassandra.util import as_java_object, as_java_array
from pyspark.streaming.dstream import DStream
from pyspark_cassandra.conf import WriteConf
from pyspark_cassandra.util import helper
from pyspark.serializers import AutoBatchedSerializer, PickleSerializer
def saveToCassandra(dstream, keyspace, table, columns=None, row_format=None, keyed=None,
write_conf=None, **write_conf_kwargs):
ctx = dstream._ssc._sc
gw = ctx._gateway
# create write config as map
write_conf = WriteConf.build(write_conf, **write_conf_kwargs)
write_conf = as_java_object(gw, write_conf.settings())
# convert the columns to a string array
columns = as_java_array(gw, "String", columns) if columns else None
return helper(ctx).saveToCassandra(dstream._jdstream, keyspace, table, columns, row_format,
keyed, write_conf)
def joinWithCassandraTable(dstream, keyspace, table, selected_columns=None, join_columns=None):
"""Joins a DStream (a stream of RDDs) with a Cassandra table
Arguments:
@param dstream(DStream)
The DStream to join. Equals to self when invoking joinWithCassandraTable on a monkey
patched RDD.
@param keyspace(string):
The keyspace to join on.
@param table(string):
The CQL table to join on.
@param selected_columns(string):
The columns to select from the Cassandra table.
@param join_columns(string):
The columns used to join on from the Cassandra table.
"""
ssc = dstream._ssc
ctx = ssc._sc
gw = ctx._gateway
selected_columns = as_java_array(gw, "String", selected_columns) if selected_columns else None
join_columns = as_java_array(gw, "String", join_columns) if join_columns else None
h = helper(ctx)
dstream = h.joinWithCassandraTable(dstream._jdstream, keyspace, table, selected_columns,
join_columns)
dstream = h.pickleRows(dstream)
dstream = h.javaDStream(dstream)
return DStream(dstream, ssc, AutoBatchedSerializer(PickleSerializer()))
# Monkey patch the default python DStream so that data in it can be stored to and joined with
# Cassandra tables
DStream.saveToCassandra = saveToCassandra
DStream.joinWithCassandraTable = joinWithCassandraTable
| TargetHolding/pyspark-cassandra | python/pyspark_cassandra/streaming.py | Python | apache-2.0 | 2,902 | 0.002757 |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FeedbackList(ListResource):
def __init__(self, version, account_sid, message_sid):
"""
Initialize the FeedbackList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param message_sid: The message_sid
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackList
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackList
"""
super(FeedbackList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
}
self._uri = '/Accounts/{account_sid}/Messages/{message_sid}/Feedback.json'.format(**self._solution)
def create(self, outcome=values.unset):
"""
Create a new FeedbackInstance
:param FeedbackInstance.Outcome outcome: The outcome
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
data = values.of({
'Outcome': outcome,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackList>'
class FeedbackPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the FeedbackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param message_sid: The message_sid
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackPage
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackPage
"""
super(FeedbackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FeedbackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
return FeedbackInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackPage>'
class FeedbackInstance(InstanceResource):
class Outcome(object):
CONFIRMED = "confirmed"
UMCONFIRMED = "umconfirmed"
def __init__(self, version, payload, account_sid, message_sid):
"""
Initialize the FeedbackInstance
:returns: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
:rtype: twilio.rest.api.v2010.account.message.feedback.FeedbackInstance
"""
super(FeedbackInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'message_sid': payload['message_sid'],
'outcome': payload['outcome'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'message_sid': message_sid,
}
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def message_sid(self):
"""
:returns: The message_sid
:rtype: unicode
"""
return self._properties['message_sid']
@property
def outcome(self):
"""
:returns: The outcome
:rtype: FeedbackInstance.Outcome
"""
return self._properties['outcome']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def uri(self):
"""
:returns: The uri
:rtype: unicode
"""
return self._properties['uri']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.FeedbackInstance>'
| angadpc/Alexa-Project- | twilio/rest/api/v2010/account/message/feedback.py | Python | mit | 5,676 | 0.001409 |
##
# Copyright (c) 2009-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twistedcaldav.ical import Component as iComponent
from twistedcaldav.vcard import Component as vComponent
__all__ = [
"CalendarFilter",
"AddressFilter",
]
class CalendarFilter(object):
"""
Abstract class that defines an iCalendar filter/merge object
"""
def __init__(self):
pass
def filter(self, ical):
"""
Filter the supplied iCalendar object using the request information.
@param ical: iCalendar object
@type ical: L{Component}
@return: L{Component} for the filtered calendar data
"""
raise NotImplementedError
def merge(self, icalnew, icalold):
"""
Merge the old iCalendar object into the new iCalendar data using the request information.
@param icalnew: new iCalendar object to merge data into
@type icalnew: L{Component}
@param icalold: old iCalendar data to merge data from
@type icalold: L{Component}
"""
raise NotImplementedError
def validCalendar(self, ical):
# If we were passed a string, parse it out as a Component
if isinstance(ical, str):
try:
ical = iComponent.fromString(ical)
except ValueError:
raise ValueError("Not a calendar: %r" % (ical,))
if ical is None or ical.name() != "VCALENDAR":
raise ValueError("Not a calendar: %r" % (ical,))
return ical
class AddressFilter(object):
"""
Abstract class that defines a vCard filter/merge object
"""
def __init__(self):
pass
def filter(self, vcard):
"""
Filter the supplied vCard object using the request information.
@param vcard: iCalendar object
@type vcard: L{Component}
@return: L{Component} for the filtered vcard data
"""
raise NotImplementedError
def merge(self, vcardnew, vcardold):
"""
Merge the old vcard object into the new vcard data using the request information.
@param vcardnew: new vcard object to merge data into
@type vcardnew: L{Component}
@param vcardold: old vcard data to merge data from
@type vcardold: L{Component}
"""
raise NotImplementedError
def validAddress(self, vcard):
# If we were passed a string, parse it out as a Component
if isinstance(vcard, str):
try:
vcard = vComponent.fromString(vcard)
except ValueError:
raise ValueError("Not a vcard: %r" % (vcard,))
if vcard is None or vcard.name() != "VCARD":
raise ValueError("Not a vcard: %r" % (vcard,))
return vcard
| macosforge/ccs-calendarserver | twistedcaldav/datafilters/filter.py | Python | apache-2.0 | 3,318 | 0.000603 |
# coding: utf8
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .norm_exceptions import NORM_EXCEPTIONS
from .tag_map import TAG_MAP
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .morph_rules import MORPH_RULES
from .lemmatizer import LEMMA_RULES, LEMMA_INDEX, LEMMA_EXC, LOOKUP
from .syntax_iterators import SYNTAX_ITERATORS
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...attrs import LANG, NORM
from ...util import update_exc, add_lookups
def _return_en(_):
return 'en'
class EnglishDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters.update(LEX_ATTRS)
lex_attr_getters[LANG] = _return_en
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM],
BASE_NORMS, NORM_EXCEPTIONS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
tag_map = TAG_MAP
stop_words = STOP_WORDS
morph_rules = MORPH_RULES
lemma_rules = LEMMA_RULES
lemma_index = LEMMA_INDEX
lemma_exc = LEMMA_EXC
lemma_lookup = LOOKUP
syntax_iterators = SYNTAX_ITERATORS
class English(Language):
lang = 'en'
Defaults = EnglishDefaults
__all__ = ['English']
| aikramer2/spaCy | spacy/lang/en/__init__.py | Python | mit | 1,389 | 0.00216 |
import mykde
class ActionPackage(mykde.ActionPackage):
author = 'Victor Varvaryuk <victor.varvariuc@gmail.com>'
version = 2
description = """
TODO:
xnview - unpack to ~/apps/ and create .desktop file in Graphics category
clip2net
galaxy icons libreoffice, enter key behavior in calc
"""
| warvariuc/mykde | packages/__init__.py | Python | bsd-3-clause | 301 | 0 |
# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.neutron.v2_0.qos import rule as qos_rule
BANDWIDTH_LIMIT_RULE_RESOURCE = 'bandwidth_limit_rule'
def add_bandwidth_limit_arguments(parser):
parser.add_argument(
'--max-kbps',
help=_('max bandwidth in kbps.'))
parser.add_argument(
'--max-burst-kbps',
help=_('max burst bandwidth in kbps.'))
def update_bandwidth_limit_args2body(parsed_args, body):
max_kbps = parsed_args.max_kbps
max_burst_kbps = parsed_args.max_burst_kbps
if not (max_kbps or max_burst_kbps):
raise exceptions.CommandError(_("Must provide max_kbps"
" or max_burst_kbps option."))
neutronv20.update_dict(parsed_args, body,
['max_kbps', 'max_burst_kbps', 'tenant_id'])
class CreateQoSBandwidthLimitRule(qos_rule.QosRuleMixin,
neutronv20.CreateCommand):
"""Create a qos bandwidth limit rule."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
def add_known_arguments(self, parser):
super(CreateQoSBandwidthLimitRule, self).add_known_arguments(parser)
add_bandwidth_limit_arguments(parser)
def args2body(self, parsed_args):
body = {}
update_bandwidth_limit_args2body(parsed_args, body)
return {self.resource: body}
class ListQoSBandwidthLimitRules(qos_rule.QosRuleMixin,
neutronv20.ListCommand):
"""List all qos bandwidth limit rules belonging to the specified policy."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
_formatters = {}
pagination_support = True
sorting_support = True
class ShowQoSBandwidthLimitRule(qos_rule.QosRuleMixin, neutronv20.ShowCommand):
"""Show information about the given qos bandwidth limit rule."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
allow_names = False
class UpdateQoSBandwidthLimitRule(qos_rule.QosRuleMixin,
neutronv20.UpdateCommand):
"""Update the given qos bandwidth limit rule."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
allow_names = False
def add_known_arguments(self, parser):
super(UpdateQoSBandwidthLimitRule, self).add_known_arguments(parser)
add_bandwidth_limit_arguments(parser)
def args2body(self, parsed_args):
body = {}
update_bandwidth_limit_args2body(parsed_args, body)
return {self.resource: body}
class DeleteQoSBandwidthLimitRule(qos_rule.QosRuleMixin,
neutronv20.DeleteCommand):
"""Delete a given qos bandwidth limit rule."""
resource = BANDWIDTH_LIMIT_RULE_RESOURCE
allow_names = False
| eayunstack/python-neutronclient | neutronclient/neutron/v2_0/qos/bandwidth_limit_rule.py | Python | apache-2.0 | 3,455 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2019 Edgewall Software
# Copyright (C) 2007 Eli Carter <retracile@gmail.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import sys
import trac.env
from trac.ticket.default_workflow import load_workflow_config_snippet
def main():
"""Rewrite the ticket-workflow section of the config; and change all
'assigned' tickets to 'accepted'.
"""
if len(sys.argv) != 2:
print("Usage: %s path_to_trac_environment" % sys.argv[0])
sys.exit(1)
tracdir = sys.argv[1]
trac_env = trac.env.open_environment(tracdir)
# Update the config...
old_workflow = trac_env.config.options('ticket-workflow')
for name, value in old_workflow:
trac_env.config.remove('ticket-workflow', name)
load_workflow_config_snippet(trac_env.config, 'basic-workflow.ini')
trac_env.config.save()
# Update the ticket statuses...
trac_env.db_transaction("""
UPDATE ticket SET status = 'accepted' WHERE status = 'assigned'
""")
if __name__ == '__main__':
main()
| rbaumg/trac | contrib/workflow/migrate_original_to_basic.py | Python | bsd-3-clause | 1,456 | 0.000687 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c
from datetime import datetime
import urllib2
from ming.orm.ormsession import ThreadLocalORMSession
from ming.orm import session
from ming import schema
from nose.tools import raises, assert_equal, assert_in
from forgetracker.model import Ticket, TicketAttachment
from forgetracker.tests.unit import TrackerTestWithModel
from forgetracker.import_support import ResettableStream
from allura.model import Feed, Post, User
from allura.lib import helpers as h
from allura.tests import decorators as td
class TestTicketModel(TrackerTestWithModel):
def test_that_label_counts_are_local_to_tool(self):
"""Test that label queries return only artifacts from the specified
tool.
"""
# create a ticket in two different tools, with the same label
from allura.tests import decorators as td
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
def _test_ticket():
return Ticket(ticket_num=1, summary="ticket1", labels=["mylabel"])
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def _test_ticket2():
return Ticket(ticket_num=2, summary="ticket2", labels=["mylabel"])
# create and save the tickets
t1 = _test_ticket()
t2 = _test_ticket2()
ThreadLocalORMSession.flush_all()
# test label query results
label_count1 = t1.artifacts_labeled_with(
"mylabel", t1.app_config).count()
label_count2 = t2.artifacts_labeled_with(
"mylabel", t2.app_config).count()
assert 1 == label_count1 == label_count2
def test_that_it_has_ordered_custom_fields(self):
custom_fields = dict(my_field='my value')
Ticket(summary='my ticket', custom_fields=custom_fields, ticket_num=3)
ThreadLocalORMSession.flush_all()
ticket = Ticket.query.get(summary='my ticket')
assert ticket.custom_fields == dict(my_field='my value')
@raises(schema.Invalid)
def test_ticket_num_required(self):
Ticket(summary='my ticket')
def test_ticket_num_required2(self):
t = Ticket(summary='my ticket', ticket_num=12)
try:
t.ticket_num = None
except schema.Invalid:
pass
else:
raise AssertionError('Expected schema.Invalid to be thrown')
def test_activity_extras(self):
t = Ticket(summary='my ticket', ticket_num=12)
assert_in('allura_id', t.activity_extras)
assert_equal(t.activity_extras['summary'], t.summary)
def test_private_ticket(self):
from allura.model import ProjectRole
from allura.model import ACE, DENY_ALL
from allura.lib.security import Credentials, has_access
from allura.websetup import bootstrap
admin = c.user
creator = bootstrap.create_user('Not a Project Admin')
developer = bootstrap.create_user('Project Developer')
observer = bootstrap.create_user('Random Non-Project User')
anon = User(_id=None, username='*anonymous',
display_name='Anonymous')
t = Ticket(summary='my ticket', ticket_num=3,
reported_by_id=creator._id)
assert creator == t.reported_by
role_admin = ProjectRole.by_name('Admin')._id
role_developer = ProjectRole.by_name('Developer')._id
role_creator = ProjectRole.by_user(t.reported_by, upsert=True)._id
ProjectRole.by_user(
developer, upsert=True).roles.append(role_developer)
ThreadLocalORMSession.flush_all()
cred = Credentials.get().clear()
t.private = True
assert_equal(t.acl, [
ACE.allow(role_developer, 'save_searches'),
ACE.allow(role_developer, 'read'),
ACE.allow(role_developer, 'create'),
ACE.allow(role_developer, 'update'),
ACE.allow(role_developer, 'unmoderated_post'),
ACE.allow(role_developer, 'post'),
ACE.allow(role_developer, 'moderate'),
ACE.allow(role_developer, 'delete'),
ACE.allow(role_creator, 'read'),
ACE.allow(role_creator, 'post'),
ACE.allow(role_creator, 'create'),
ACE.allow(role_creator, 'unmoderated_post'),
DENY_ALL])
assert has_access(t, 'read', user=admin)()
assert has_access(t, 'create', user=admin)()
assert has_access(t, 'update', user=admin)()
assert has_access(t, 'read', user=creator)()
assert has_access(t, 'post', user=creator)()
assert has_access(t, 'unmoderated_post', user=creator)()
assert has_access(t, 'create', user=creator)()
assert not has_access(t, 'update', user=creator)()
assert has_access(t, 'read', user=developer)()
assert has_access(t, 'create', user=developer)()
assert has_access(t, 'update', user=developer)()
assert not has_access(t, 'read', user=observer)()
assert not has_access(t, 'create', user=observer)()
assert not has_access(t, 'update', user=observer)()
assert not has_access(t, 'read', user=anon)()
assert not has_access(t, 'create', user=anon)()
assert not has_access(t, 'update', user=anon)()
t.private = False
assert t.acl == []
assert has_access(t, 'read', user=admin)()
assert has_access(t, 'create', user=admin)()
assert has_access(t, 'update', user=admin)()
assert has_access(t, 'read', user=developer)()
assert has_access(t, 'create', user=developer)()
assert has_access(t, 'update', user=developer)()
assert has_access(t, 'read', user=creator)()
assert has_access(t, 'unmoderated_post', user=creator)()
assert has_access(t, 'create', user=creator)()
assert not has_access(t, 'update', user=creator)()
assert has_access(t, 'read', user=observer)()
assert has_access(t, 'read', user=anon)()
def test_feed(self):
t = Ticket(
app_config_id=c.app.config._id,
ticket_num=1,
summary='test ticket',
description='test description',
created_date=datetime(2012, 10, 29, 9, 57, 21, 465000))
assert_equal(t.created_date, datetime(2012, 10, 29, 9, 57, 21, 465000))
f = Feed.post(
t,
title=t.summary,
description=t.description,
pubdate=t.created_date)
assert_equal(f.pubdate, datetime(2012, 10, 29, 9, 57, 21, 465000))
assert_equal(f.title, 'test ticket')
assert_equal(f.description,
'<div class="markdown_content"><p>test description</p></div>')
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def test_ticket_move(self):
app1 = c.project.app_instance('bugs')
app2 = c.project.app_instance('bugs2')
with h.push_context(c.project._id, app_config_id=app1.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
ticket.assigned_to_id = User.by_username('test-user')._id
ticket.discussion_thread.add_post(text='test comment')
assert_equal(
Ticket.query.find({'app_config_id': app1.config._id}).count(), 1)
assert_equal(
Ticket.query.find({'app_config_id': app2.config._id}).count(), 0)
assert_equal(
Post.query.find(dict(thread_id=ticket.discussion_thread._id)).count(), 1)
t = ticket.move(app2.config)
assert_equal(
Ticket.query.find({'app_config_id': app1.config._id}).count(), 0)
assert_equal(
Ticket.query.find({'app_config_id': app2.config._id}).count(), 1)
assert_equal(t.summary, 'test ticket')
assert_equal(t.description, 'test description')
assert_equal(t.assigned_to.username, 'test-user')
assert_equal(t.url(), '/p/test/bugs2/1/')
post = Post.query.find(dict(thread_id=ticket.discussion_thread._id,
text={'$ne': 'test comment'})).first()
assert post is not None, 'No comment about ticket moving'
message = 'Ticket moved from /p/test/bugs/1/'
assert_equal(post.text, message)
post = Post.query.find(dict(text='test comment')).first()
assert_equal(post.thread.discussion_id, app2.config.discussion_id)
assert_equal(post.thread.app_config_id, app2.config._id)
assert_equal(post.app_config_id, app2.config._id)
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def test_ticket_move_with_different_custom_fields(self):
app1 = c.project.app_instance('bugs')
app2 = c.project.app_instance('bugs2')
app1.globals.custom_fields.extend([
{'name': '_test', 'type': 'string', 'label': 'Test field'},
{'name': '_test2', 'type': 'string', 'label': 'Test field 2'}])
app2.globals.custom_fields.append(
{'name': '_test', 'type': 'string', 'label': 'Test field'})
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
with h.push_context(c.project._id, app_config_id=app1.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
ticket.custom_fields['_test'] = 'test val'
ticket.custom_fields['_test2'] = 'test val 2'
t = ticket.move(app2.config)
assert_equal(t.summary, 'test ticket')
assert_equal(t.description, 'test description')
assert_equal(t.custom_fields['_test'], 'test val')
post = Post.query.find(
dict(thread_id=ticket.discussion_thread._id)).first()
assert post is not None, 'No comment about ticket moving'
message = 'Ticket moved from /p/test/bugs/1/'
message += '\n\nCan\'t be converted:\n'
message += '\n- **_test2**: test val 2'
assert_equal(post.text, message)
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def test_ticket_move_with_users_not_in_project(self):
app1 = c.project.app_instance('bugs')
app2 = c.project.app_instance('bugs2')
app1.globals.custom_fields.extend([
{'name': '_user_field', 'type': 'user', 'label': 'User field'},
{'name': '_user_field_2', 'type': 'user', 'label': 'User field 2'}])
app2.globals.custom_fields.extend([
{'name': '_user_field', 'type': 'user', 'label': 'User field'},
{'name': '_user_field_2', 'type': 'user', 'label': 'User field 2'}])
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
from allura.websetup import bootstrap
bootstrap.create_user('test-user-0')
with h.push_context(c.project._id, app_config_id=app1.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
ticket.custom_fields['_user_field'] = 'test-user' # in project
# not in project
ticket.custom_fields['_user_field_2'] = 'test-user-0'
# not in project
ticket.assigned_to_id = User.by_username('test-user-0')._id
t = ticket.move(app2.config)
assert_equal(t.assigned_to_id, None)
assert_equal(t.custom_fields['_user_field'], 'test-user')
assert_equal(t.custom_fields['_user_field_2'], '')
post = Post.query.find(
dict(thread_id=ticket.discussion_thread._id)).first()
assert post is not None, 'No comment about ticket moving'
message = 'Ticket moved from /p/test/bugs/1/'
message += '\n\nCan\'t be converted:\n'
message += '\n- **_user_field_2**: test-user-0 (user not in project)'
message += '\n- **assigned_to**: test-user-0 (user not in project)'
assert_equal(post.text, message)
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
def test_attach_with_resettable_stream(self):
with h.push_context(c.project._id, app_config_id=c.app.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
assert_equal(len(ticket.attachments), 0)
f = urllib2.urlopen('file://%s' % __file__)
TicketAttachment.save_attachment(
'test_ticket_model.py', ResettableStream(f),
artifact_id=ticket._id)
ThreadLocalORMSession.flush_all()
# need to refetch since attachments are cached
session(ticket).expunge(ticket)
ticket = Ticket.query.get(_id=ticket._id)
assert_equal(len(ticket.attachments), 1)
assert_equal(ticket.attachments[0].filename, 'test_ticket_model.py')
def test_json_parents(self):
ticket = Ticket.new()
json_keys = ticket.__json__().keys()
assert_in('related_artifacts', json_keys) # from Artifact
assert_in('votes_up', json_keys) # VotableArtifact
assert_in('ticket_num', json_keys) # Ticket
assert ticket.__json__()['assigned_to'] is None
| apache/incubator-allura | ForgeTracker/forgetracker/tests/unit/test_ticket_model.py | Python | apache-2.0 | 14,297 | 0.00028 |
import RPi.GPIO as GPIO
import time
buzzer_pin = 27
notes = {
'B0' : 31,
'C1' : 33, 'CS1' : 35,
'D1' : 37, 'DS1' : 39,
'EB1' : 39,
'E1' : 41,
'F1' : 44, 'FS1' : 46,
'G1' : 49, 'GS1' : 52,
'A1' : 55, 'AS1' : 58,
'BB1' : 58,
'B1' : 62,
'C2' : 65, 'CS2' : 69,
'D2' : 73, 'DS2' : 78,
'EB2' : 78,
'E2' : 82,
'F2' : 87, 'FS2' : 93,
'G2' : 98, 'GS2' : 104,
'A2' : 110, 'AS2' : 117,
'BB2' : 123,
'B2' : 123,
'C3' : 131, 'CS3' : 139,
'D3' : 147, 'DS3' : 156,
'EB3' : 156,
'E3' : 165,
'F3' : 175, 'FS3' : 185,
'G3' : 196, 'GS3' : 208,
'A3' : 220, 'AS3' : 233,
'BB3' : 233,
'B3' : 247,
'C4' : 262, 'CS4' : 277,
'D4' : 294, 'DS4' : 311,
'EB4' : 311,
'E4' : 330,
'F4' : 349, 'FS4' : 370,
'G4' : 392, 'GS4' : 415,
'A4' : 440, 'AS4' : 466,
'BB4' : 466,
'B4' : 494,
'C5' : 523, 'CS5' : 554,
'D5' : 587, 'DS5' : 622,
'EB5' : 622,
'E5' : 659,
'F5' : 698, 'FS5' : 740,
'G5' : 784, 'GS5' : 831,
'A5' : 880, 'AS5' : 932,
'BB5' : 932,
'B5' : 988,
'C6' : 1047, 'CS6' : 1109,
'D6' : 1175, 'DS6' : 1245,
'EB6' : 1245,
'E6' : 1319,
'F6' : 1397, 'FS6' : 1480,
'G6' : 1568, 'GS6' : 1661,
'A6' : 1760, 'AS6' : 1865,
'BB6' : 1865,
'B6' : 1976,
'C7' : 2093, 'CS7' : 2217,
'D7' : 2349, 'DS7' : 2489,
'EB7' : 2489,
'E7' : 2637,
'F7' : 2794, 'FS7' : 2960,
'G7' : 3136, 'GS7' : 3322,
'A7' : 3520, 'AS7' : 3729,
'BB7' : 3729,
'B7' : 3951,
'C8' : 4186, 'CS8' : 4435,
'D8' : 4699, 'DS8' : 4978
}
anmeldung = [
notes['F4'],notes['A4'],notes['G4'],notes['C4'],
]
anmeldung_tempo = [
0.25,0.25,0.25,0.35,
]
def buzz(frequency, length): #create the function "buzz" and feed it the pitch and duration)
if(frequency==0):
time.sleep(length)
return
period = 1.0 / frequency #in physics, the period (sec/cyc) is the inverse of the frequency (cyc/sec)
delayValue = period / 2 #calcuate the time for half of the wave
numCycles = int(length * frequency) #the number of waves to produce is the duration times the frequency
for i in range(numCycles): #start a loop from 0 to the variable "cycles" calculated above
GPIO.output(buzzer_pin, True) #set pin 27 to high
time.sleep(delayValue) #wait with pin 27 high
GPIO.output(buzzer_pin, False) #set pin 27 to low
time.sleep(delayValue) #wait with pin 27 low
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(buzzer_pin, GPIO.IN)
GPIO.setup(buzzer_pin, GPIO.OUT)
def destroy():
GPIO.cleanup() # Release resource
def play(melody,tempo,pause,pace=0.800):
for i in range(0, len(melody)): # Play song
noteDuration = tempo[i]*pace
buzz(melody[i],noteDuration) # Change the frequency along the song note
pauseBetweenNotes = noteDuration * pause
time.sleep(pauseBetweenNotes)
if __name__ == '__main__': # Program start from here
try:
setup()
print "Anmeldung"
play(anmeldung, anmeldung_tempo, 0.50, 1.5000)
time.sleep(2)
destroy()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
| lesscomplex/HomeSec | lock/buzz_anm.py | Python | agpl-3.0 | 2,986 | 0.081045 |
#!/usr/bin/env python
"""
books.py
reads a list of books from an input file and returns them filtered and sorted
features
- iterates through records without holding the entire dataset in memory, allowing for large datasets
- uses SQLite for storage and retrieval
"""
import os
import argparse
import sqlite3
from book_list.book_list_file_reader import BookListFileReader
from book_list.book_list import BookList
# Config
curdir = dir_path = os.path.dirname(os.path.realpath(__file__))
SQLITE3_DB_FILE = curdir + '/db/booklist.sqlite3'
file_import_list = {
'csv': curdir + '/code-test-source-files/csv',
'pipe': curdir + '/code-test-source-files/pipe',
'slash': curdir + '/code-test-source-files/slash',
}
# Command line parsing
parser = argparse.ArgumentParser(
prog='Read multiple formats of book data and display them filtered and sorted.'
)
parser.add_argument('--filter', action='store', default=None,
help='show a subset of books, looks for the argument as a substring of any of the fields')
parser.add_argument('--year', action='store_true', default=False,
help="sort the books by year, ascending instead of default sort")
parser.add_argument('--reverse', action='store_true', default=False,
help='reverse sort')
args = parser.parse_args()
# Read files and populate book list
sqlite3_connection = sqlite3.Connection(SQLITE3_DB_FILE);
book_list = BookList(sqlite3_connection)
for parse_type, file_path in file_import_list.iteritems():
reader = BookListFileReader(file_path, parse_type)
while True:
row = reader.get_result()
if row is None:
break
book_list.insert_record(row)
# Make query based on command line arguments
book_list.query_book_list(filter=args.filter, year=args.year, reverse=args.reverse)
# Output
while True:
row = book_list.get_record()
if row == None:
break
print("{}, {}, {}, {}".format(*row))
| danieltalsky/gp-code-test | books.py | Python | unlicense | 1,974 | 0.003546 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
| mhbu50/erpnext | erpnext/portal/doctype/homepage/homepage.py | Python | gpl-3.0 | 801 | 0.021223 |
def choppedRO(t,period=2e-3,RO_onoff=[0,.5],Trap_onoff=[.5,1]):
'''
period: time in ms
RO_onoff: tuple containing [on,off] as a percentage of period
Trap_onoff: tuple containing [on,off] as a percentage of period
'''
D2_switch(t,0)
vODT_switch(t,0)
D2_switch(t+RO_onoff[0]*period,1)
D2_switch(t+RO_onoff[1]*period,0)
vODT_switch(t+Trap_onoff[0]*period,1)
vODT_switch(t+Trap_onoff[1]*period,0)
return t+period | QuantumQuadrate/CsPyController | python/exp_functional_waveforms/hybridChop.py | Python | lgpl-3.0 | 469 | 0.036247 |
# Copyright (c) 2016 Lee Cannon
# Licensed under the MIT License, see included LICENSE File
from collections import Counter
from .filter import at_trigrams, with_words
def count_trigrams(interactions: list, minimum: int = 1, n: int = None, include_unknown: bool = False) -> list:
"""Returns the n most common trigrams in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param minimum: Ignore trigrams that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param include_unknown: Determines if the interactions with unknown trigrams should be included. Default False
:type include_unknown: bool
:return: The list of most common trigrams in the interactions given.
:rtype: list
"""
# The below (if not interaction.trigram == 'OWN') ignores unknown trigrams
if not include_unknown:
trigram_list = [interaction.trigram for interaction in interactions if not interaction.trigram == 'OWN']
else:
trigram_list = [interaction.trigram for interaction in interactions]
return [trigram for trigram in Counter(trigram_list).most_common(n=n) if trigram[1] > minimum]
def count_words(interactions: list, minimum: int = 1, n: int = None, additional_words_to_ignore: list=None) -> list:
"""Returns the n most common words in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param minimum: Ignore words that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words in the interactions given.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
word_list = [word for interaction in interactions for word in set(interaction.title_words)
if word not in additional_words_to_ignore]
counts = Counter(word_list).most_common(n=n)
counts = [count for count in counts if count[1] > minimum]
return counts
def count_interactions(interactions: list):
return len(interactions)
def count_words_at_trigrams(interactions: list, trigrams: list, n: int = None, minimum: int = 2,
additional_words_to_ignore: list = None) -> list:
"""Returns the list of most common words at the given trigram in order. Ignores words where the number of
occurrences is less than the minimum.
Example of returned list:
| [['modnet', 1234],
| ['password', 123],
| ['outlook', 34],
| ['network', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param trigrams: The list of trigrams to check.
:type trigrams: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words at the given trigram.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
return [word for word in count_words(at_trigrams(interactions, trigrams), n=n)
if word[1] >= minimum and word[0] not in additional_words_to_ignore]
def count_trigram_with_words(interactions: list, words: list, n: int = None, minimum: int = 2) -> list:
"""Returns the list of most common trigrams for occurrences of the given word in order. Ignores trigrams where the
number of occurrences is less than the minimum.
Example of returned list:
| [['ABW', 1234],
| ['NOW', 123],
| ['YOR', 34],
| ['BRC', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param words: The list of words to check.
:type words: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:return: The list of most common words at the given trigram.
:rtype: list
"""
return [trigram for trigram in count_trigrams(with_words(interactions, words), n=n)
if trigram[1] >= minimum]
| leecannon/trending | trending/count.py | Python | mit | 4,854 | 0.004738 |
# "Smart" parser for handling libmagic signature results. Specifically, this implements
# support for binwalk's custom libmagic signature extensions (keyword tags, string processing,
# false positive detection, etc).
import re
import binwalk.core.module
from binwalk.core.compat import *
from binwalk.core.common import get_quoted_strings, MathExpression
class Tag(object):
TAG_DELIM_START = "{"
TAG_DELIM_END = "}"
TAG_ARG_SEPERATOR = ":"
def __init__(self, **kwargs):
self.name = None
self.keyword = None
self.type = None
self.handler = None
self.tag = None
self.default = None
for (k,v) in iterator(kwargs):
setattr(self, k, v)
if self.type == int:
self.default = 0
elif self.type == str:
self.default = ''
if self.keyword is not None:
self.tag = self.TAG_DELIM_START + self.keyword
if self.type is None:
self.tag += self.TAG_DELIM_END
else:
self.tag += self.TAG_ARG_SEPERATOR
if self.handler is None:
if self.type == int:
self.handler = 'get_math_arg'
elif self.type == str:
self.handler = 'get_keyword_arg'
class Signature(object):
'''
Class for parsing smart signature tags in libmagic result strings.
This class is intended for internal use only, but a list of supported 'smart keywords' that may be used
in magic files is available via the SmartSignature.KEYWORDS dictionary:
from binwalk import SmartSignature
for tag in SmartSignature.TAGS:
print tag.keyword
'''
TAGS = [
Tag(name='raw-string', keyword='raw-string', type=str, handler='parse_raw_string'),
Tag(name='string-len', keyword='string-len', type=str, handler='parse_string_len'),
Tag(name='math', keyword='math', type=int, handler='parse_math'),
Tag(name='one-of-many', keyword='one-of-many', handler='one_of_many'),
Tag(name='display-once', keyword='display-once', handler='display_once'),
Tag(name='jump', keyword='jump-to-offset', type=int),
Tag(name='name', keyword='file-name', type=str),
Tag(name='size', keyword='file-size', type=int),
Tag(name='adjust', keyword='offset-adjust', type=int),
Tag(name='delay', keyword='extract-delay', type=str),
Tag(name='year', keyword='file-year', type=str),
Tag(name='epoch', keyword='file-epoch', type=int),
Tag(name='raw-size', keyword='raw-string-length', type=int),
Tag(name='raw-replace', keyword='raw-replace'),
Tag(name='string-len-replace', keyword='string-len'),
]
def __init__(self, filter, ignore_smart_signatures=False):
'''
Class constructor.
@filter - Instance of the MagicFilter class.
@ignore_smart_signatures - Set to True to ignore smart signature keywords.
Returns None.
'''
self.filter = filter
self.last_one_of_many = None
self.valid_once_already_seen = set()
self.ignore_smart_signatures = ignore_smart_signatures
def parse(self, data):
'''
Parse a given data string for smart signature keywords. If any are found, interpret them and strip them.
@data - String to parse, as returned by libmagic.
Returns a dictionary of parsed values.
'''
results = {}
self.valid = True
self.display = True
if data:
for tag in self.TAGS:
if tag.handler is not None:
(d, arg) = getattr(self, tag.handler)(data, tag)
if not self.ignore_smart_signatures:
data = d
if isinstance(arg, type(False)) and arg == False and not self.ignore_smart_signatures:
self.valid = False
elif tag.type is not None:
if self.ignore_smart_signatures:
results[tag.name] = tag.default
else:
results[tag.name] = arg
if self.ignore_smart_signatures:
results['description'] = data
else:
results['description'] = self.strip_tags(data)
else:
self.valid = False
results['valid'] = self.valid
results['display'] = self.display
return binwalk.core.module.Result(**results)
def tag_lookup(self, keyword):
for tag in self.TAGS:
if tag.keyword == keyword:
return tag
return None
def is_valid(self, data):
'''
Validates that result data does not contain smart keywords in file-supplied strings.
@data - Data string to validate.
Returns True if data is OK.
Returns False if data is not OK.
'''
# All strings printed from the target file should be placed in strings, else there is
# no way to distinguish between intended keywords and unintended keywords. Get all the
# quoted strings.
quoted_data = get_quoted_strings(data)
# Check to see if there was any quoted data, and if so, if it contained the keyword starting delimiter
if quoted_data and Tag.TAG_DELIM_START in quoted_data:
# If so, check to see if the quoted data contains any of our keywords.
# If any keywords are found inside of quoted data, consider the keywords invalid.
for tag in self.TAGS:
if tag.tag in quoted_data:
return False
return True
def safe_string(self, data):
'''
Strips out quoted data (i.e., data taken directly from a file).
'''
quoted_string = get_quoted_strings(data)
if quoted_string:
data = data.replace('"' + quoted_string + '"', "")
return data
def display_once(self, data, tag):
'''
Determines if a given data string should be printed if {display-once} was specified.
@data - String result data.
Returns False if the string result should not be displayed.
Returns True if the string result should be displayed.
'''
if self.filter.valid_result(data):
signature = data.split(',')[0]
if signature in self.valid_once_already_seen:
self.display = False
return (data, False)
elif tag.tag in data:
self.valid_once_already_seen.add(signature)
return (data, True)
return (data, True)
def one_of_many(self, data, tag):
'''
Determines if a given data string is one result of many.
@data - String result data.
Returns False if the string result is one of many and should not be displayed.
Returns True if the string result is not one of many and should be displayed.
'''
if self.filter.valid_result(data):
if self.last_one_of_many is not None and data.startswith(self.last_one_of_many):
self.display = False
elif tag.tag in data:
# Only match on the data before the first comma, as that is typically unique and static
self.last_one_of_many = data.split(',')[0]
else:
self.last_one_of_many = None
return (data, True)
def get_keyword_arg(self, data, tag):
'''
Retrieves the argument for keywords that specify arguments.
@data - String result data, as returned by libmagic.
@keyword - Keyword index in KEYWORDS.
Returns the argument string value on success.
Returns a blank string on failure.
'''
arg = ''
safe_data = self.safe_string(data)
if tag.tag in safe_data:
arg = safe_data.split(tag.tag)[1].split(tag.TAG_DELIM_END)[0]
return (data, arg)
def get_math_arg(self, data, tag):
'''
Retrieves the argument for keywords that specifiy mathematical expressions as arguments.
@data - String result data, as returned by libmagic.
@keyword - Keyword index in KEYWORDS.
Returns the resulting calculated value.
'''
value = 0
(data, arg) = self.get_keyword_arg(data, tag)
if arg:
value = MathExpression(arg).value
if value is None:
value = 0
self.valid = False
return (data, value)
def parse_math(self, data, tag):
'''
Replace math keywords with the requested values.
@data - String result data.
Returns the modified string result data.
'''
while tag.tag in self.safe_string(data):
(data, arg) = self.get_keyword_arg(data, tag)
v = '%s%s%s' % (tag.tag, arg, tag.TAG_DELIM_END)
(data, math_value) = self.get_math_arg(data, tag)
data = data.replace(v, "%d" % math_value)
return (data, None)
def parse_raw_string(self, data, raw_str_tag):
'''
Process strings that aren't NULL byte terminated, but for which we know the string length.
This should be called prior to any other smart parsing functions.
@data - String to parse.
Returns a parsed string.
'''
if self.is_valid(data):
raw_str_length_tag = self.tag_lookup('raw-string-length')
raw_replace_tag = self.tag_lookup('raw-replace')
# Get the raw string keyword arg
(data, raw_string) = self.get_keyword_arg(data, raw_str_tag)
# Was a raw string keyword specified?
if raw_string:
# Get the raw string length arg
(data, raw_size) = self.get_math_arg(data, raw_str_length_tag)
# Replace all instances of raw-replace in data with raw_string[:raw_size]
# Also strip out everything after the raw-string keyword, including the keyword itself.
# Failure to do so may (will) result in non-printable characters and this string will be
# marked as invalid when it shouldn't be.
data = data[:data.find(raw_str_tag.tag)].replace(raw_replace_tag.tag, '"' + raw_string[:raw_size] + '"')
return (data, True)
def parse_string_len(self, data, str_len_tag):
'''
Process {string-len} macros.
@data - String to parse.
Returns parsed data string.
'''
if not self.ignore_smart_signatures and self.is_valid(data):
str_len_replace_tag = self.tag_lookup('string-len-replace')
# Get the raw string keyword arg
(data, raw_string) = self.get_keyword_arg(data, str_len_tag)
# Was a string-len keyword specified?
if raw_string:
# Get the string length
try:
string_length = '%d' % len(raw_string)
except KeyboardInterrupt as e:
raise e
except Exception:
string_length = '0'
# Strip out *everything* after the string-len keyword, including the keyword itself.
# Failure to do so can potentially allow keyword injection from a maliciously created file.
data = data.split(str_len_tag.tag)[0].replace(str_len_replace_tag.tag, string_length)
return (data, True)
def strip_tags(self, data):
'''
Strips the smart tags from a result string.
@data - String result data.
Returns a sanitized string.
'''
if not self.ignore_smart_signatures:
for tag in self.TAGS:
start = data.find(tag.tag)
if start != -1:
end = data[start:].find(tag.TAG_DELIM_END)
if end != -1:
data = data.replace(data[start:start+end+1], "")
return data
| Tepira/binwalk | src/binwalk/core/smart.py | Python | mit | 12,128 | 0.002803 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "scripts"))
from Util import runTests
runTests() | ljx0305/ice | allTests.py | Python | gpl-2.0 | 476 | 0.006303 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', views.login, name='login'),
url(r'^register/$', views.register, name='register'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^plaza/$', views.plaza, name='plaza'),
]
| huaiping/pandora | membership/urls.py | Python | mit | 324 | 0 |
# -*- coding: utf-8 -*-
"""UWEC Language Tools student corpus module
Provides functions for processing student corpus data.
"""
# Python 3 forward compatability imports.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Standard imports
import re
# Setup logger.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
def punctuation_density(text, punctuation=r'[^\w\s]'):
"""Returns the punctuation density of the given text.
Arguments:
text (str): The input text.
punctuation (str): A regex pattern for matching punctuation characters.
Defaults to r'[,.!?:;\\/]'.
Returns:
(float): The density of puntuation in the text.
"""
if len(text) == 0:
return 0
words = re.sub(r'\W', ' ', text).split()
puncs = float(sum([len(re.findall(punctuation, x)) for x in text]))
return (puncs / len(words)) if len(words) > 0 else 0.0
def capitalization_density(text):
"""Returns the word-starting capitalized character density of the given
text.
Arguments:
text (str): The input text.
Returns:
(float): The density of capitalized words in the text.
"""
if len(text) == 0:
return 0
words = re.sub(r'\W', ' ', text).split()
caps = float(sum([1 for x in words if re.match('[A-Z]', x)]))
return (caps / len(words)) if len(words) > 0 else 0.0
def clean_punctuation(text, punctuation=r',\.!\?:;'):
"""Returns text modified by removing whitespace before punctuation.
Arguments:
text (str): The input text.
punctuation (str): regex containing the punctuation to remove
whitespace before. Defaults to ',\.!\?:;'.
Returns:
(str): The modified text.
"""
# Straighten quotes, remove interior spaces.
text = re.sub(r'“ ?| ?”', '\"', text)
text = re.sub(r'‘ ?| ?’', '\'', text)
# Remove punctuation after quotes.
text = re.sub(r'([”"])\s*([{0}])'.format(punctuation), r'\2\1 ', text)
text = re.sub(r'([”"])\s*([{0}])'.format(punctuation), r'\1 ', text)
# Remove strings of punctuation.
text = re.sub(r'\b ?([{0}])[{0}\s]+'.format(punctuation), r'\1 ', text)
# Remove extra whitespace.
text = re.sub(r'\s+', r' ', text)
return text
def parse_parentheticals(text, lparen='\(', rparen='\)'):
"""Parses the given text and returns a tree of parentheticals.
Arguments:
text (str): The input text.
lparen (str): A regex for matching the left parenthetical delimiter.
rparen (str): A regex for matching the right parenthetical delimiter.
Returns:
(dict | [str]): A dictionary representing the parse tree or a list of
strings. Each node of the tree will have the following structure:
{'parens': (l, r), 'text': []}
where (l, r) are the parentheticals wrapping the text, and the list
contains raw text and subnodes. For example, the following string
'ab)c((d)ef)g()(hi'
will return:
{'parens': None,
'text': ['ab',
')',
'c',
{'parens': ('(', ')'),
'text': [{'parens': ('(', ')'), 'text': ['d']}, 'ef']},
'g',
{'parens': ('(', ')'), 'text': []},
{'parens': ('(', None), 'text': ['hi']}]}
Unmatched lparens will be interpretted as regular text. Unmatched
rparens will have None as their second parens tuple element. If there
are no parentheticals, a list of text will be returned.
"""
# Precompile regular expressions for ease of use.
n_regex = re.compile(r'([^{}{}]*)'.format(lparen, rparen))
l_regex = re.compile(r'({})'.format(lparen))
r_regex = re.compile(r'({})'.format(rparen))
# Build root of tree.
tree = {'parens': (None, None),
'text': []}
context = [tree]
rest = text
# Keep parsing until nothing is left.
while rest:
node = context[0]
# Match rparens.
m = r_regex.match(rest)
if m:
if node['parens'] == (None, None):
node['text'].append(m.group(1))
else:
node = context.pop(0)
node['parens'] = (node['parens'][0], m.group(1))
rest = rest[len(m.group(1)):]
continue
# Match lparens.
m = l_regex.match(rest)
if m:
new_node = {'parens': (m.group(1), None),
'text': []}
node['text'].append(new_node)
context.insert(0, new_node)
rest = rest[len(m.group(1)):]
continue
# Match text.
m = n_regex.match(rest)
if m:
node['text'].append(m.group(1))
rest = rest[len(m.group(1)):]
# Remove highest level tree if whole string is parenthetical.
if len(tree['text']) == 1:
tree = [tree['text'][0]]
return tree
def recombine_parentheticals(parse_tree, selector_function=None, sep=''):
"""Recombines text seperated by the seperate_parentheticals function by
using a selector function to determine which portions to keep or discard.
Arguments:
parse_tree (dict): A tree of parsed parentheticals
(See parse_parentheticals.)
selector_function ((str, str), str -> true): A function taking a pair
of parenthesis and a string, and returning whether to keep the
string or discard it. Allows for selective recombination of text.
Defaults to None (everything is kept.)
sep (str): The seperator to use when combining the text. Defaults to
''.
Returns:
(str): The resulting text.
Raises:
(ValueError): When unkown values are contained in parse_tree.
"""
# Set default selector test function if none is provided.
selector_function = selector_function or (lambda x, y: True)
# Reconstruct parse tree root for lists and strings.
if isinstance(parse_tree, list):
parse_tree = {'parens': (None, None), 'text': parse_tree}
elif isinstance(parse_tree, str) or isinstance(parse_tree, unicode):
parse_tree = {'parens': (None, None), 'text': [parse_tree]}
text = []
for item in parse_tree['text']:
if isinstance(item, str) or isinstance(item, unicode):
text.append(item)
elif isinstance(item, dict):
# Recreate text from rest of this node.
res = recombine_parentheticals(item,
selector_function=selector_function,
sep=sep)
# Append text if it passes selector test.
if selector_function(parse_tree['parens'], res):
text.append(res)
else:
raise ValueError('Unknown parse tree content.')
res = sep.join(text)
# Use selector test on the whole tree.
if selector_function(parse_tree['parens'], res):
l = parse_tree['parens'][0]
r = parse_tree['parens'][1]
return sep.join([x for x in [l, res, r] if x is not None])
return ''
| SkySchermer/uweclang | uweclang/plain/clean.py | Python | mit | 7,350 | 0.000954 |
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import threading
import paramiko
from touchdown.tests.fixtures.fixture import Fixture
class DummyServer(paramiko.ServerInterface):
def get_allowed_auths(self, username):
return "publickey,password"
def check_auth_password(self, username, password):
return paramiko.AUTH_SUCCESSFUL
def check_auth_publickey(self, username, key):
return paramiko.AUTH_SUCCESSFUL
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
return True
def check_channel_shell_request(self, channel):
return True
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight, modes
):
return True
class SshConnectionFixture(Fixture):
def __enter__(self):
self.listen_socket = socket.socket()
self.listen_socket.bind(("0.0.0.0", 0))
self.listen_socket.listen(1)
self.address, self.port = self.listen_socket.getsockname()
self.fixtures.push(lambda *exc_info: self.listen_socket.close())
self.event = threading.Event()
self.ssh_connection = self.workspace.add_ssh_connection(
name="test-ssh-connection", hostname=self.address, port=self.port
)
self.listen_thread = threading.Thread(target=self.server_thread)
self.listen_thread.daemon = True
self.listen_thread.start()
return self
def server_thread(self):
self.client_socket, addr = self.listen_socket.accept()
self.fixtures.push(lambda *exc_info: self.client_socket.close())
self.server_transport = paramiko.Transport(self.client_socket)
self.fixtures.push(lambda *exc_info: self.server_transport.close())
self.server_transport.add_server_key(
paramiko.RSAKey.from_private_key_file(
os.path.join(os.path.dirname(__file__), "..", "assets/id_rsa_test")
)
)
self.server = DummyServer()
self.server_transport.start_server(self.event, self.server)
| yaybu/touchdown | touchdown/tests/fixtures/ssh_connection.py | Python | apache-2.0 | 2,710 | 0.000369 |
"""
Django settings for eveggie project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (eveggie/config/settings/base.py - 3 = eveggie/)
APPS_DIR = ROOT_DIR.path('eveggie')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'eveggie.users.apps.UsersConfig',
'eveggie.restaurants.apps.RestaurantsConfig',
'eveggie.orders.apps.OrdersConfig',
'eveggie.reviews.apps.ReviewsConfig',
# Your stuff: custom apps go here
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'eveggie.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Felipe Holanda""", 'azul@eveggie.com'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///eveggie'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'eveggie.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'eveggie.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| flp9001/eveggie | config/settings/base.py | Python | mit | 10,190 | 0.001865 |
class Solution:
def minCut(self, s: str) -> int:
cut = [0] * (len(s) + 1)
cut[0] = -1
ispal = []
for _ in range(len(s)):
ispal.append([False] * len(s))
for i in range(len(s)):
mincut = i
for j in range(i+1):
# if i to j is palindorme
if s[i] == s[j] and (i-j <= 2 or ispal[j+1][i-1]):
ispal[j][i] = True
mincut = min(mincut, cut[j] + 1)
cut[i+1] = mincut
return cut[-1]
ob = Solution()
s = "aabbaa"
print(ob.minCut(s)) | shobhitmishra/CodingProblems | LeetCode/Session3/mincut.py | Python | mit | 600 | 0.005 |
#
#
#March 2014
#Adam Breznicky - TxDOT TPP - Mapping Group
#
#This is an independent script which requires a single parameter designating a directory.
#The script will walk through each subfolder and file within the designated directory, identifying the MXD files
#and re-sourcing the Comanche database connections to utilize the new 'Admin' prefix
#
#
#
#
#import modules
import arcpy, os
#variables
directory = ""
def re_source_admin():
#issue list
issues = []
#walk through each directory
for root, dirs, files in os.walk(directory):
#ignore file and personal geodatabases
specDir = root.split("\\")[-1]
dbsuffix = specDir.split(".")[-1]
if dbsuffix == "gdb" or dbsuffix == "mdb" or dbsuffix == "tbx":
pass
else:
for n in files:
#identify the mxds
if str(n).split(".")[-1] == "mxd":
print "working on: " + str(os.path.join(root, n))
map = arcpy.mapping.MapDocument(os.path.join(root, n))
dataframes = arcpy.mapping.ListDataFrames(map)
for df in dataframes:
layers = arcpy.mapping.ListLayers(map, "", df)
for lyr in layers:
try:
if "TPP_GIS.MCHAMB1." in lyr.dataSource:
print "lyr source: " + lyr.dataSource
newsource = lyr.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
lyr.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "lyr replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print lyr.name + " is not a feature layer"
tables = arcpy.mapping.ListTableViews(map, "", df)
for tbl in tables:
try:
if "TPP_GIS.MCHAMB1." in tbl.dataSource:
print "tbl source: " + tbl.dataSource
newsource = tbl.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
tbl.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "tbl replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print tbl.name + " is not a feature layer"
map.save()
re_source_admin()
print "success!"
print "the following MXDs contained issues with a layer having not a dataSource (e.g. a non-feature layer):"
for i in issues:
print str(i) | TxDOT/python | standalone/AdminPrefix_Resourcer_v1.py | Python | mit | 3,702 | 0.006753 |
# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.docstring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Classes for docstring parsing and formatting.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import collections
import inspect
import re
# from six import string_types, u
# from six.moves import range
from .iterators import modify_iter
import sys
def _prepare_docstring(s, ignore=1):
# type: (unicode, int) -> List[unicode]
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
Return the docstring as a list of lines usable for inserting into a docutils
ViewList (used as argument of nested_parse().) An empty line is added to
act as a separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxsize
for line in lines[ignore:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from ignored lines.
for i in range(ignore):
if i < len(lines):
lines[i] = lines[i].lstrip()
if margin < sys.maxsize:
for i in range(ignore, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return lines
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
_google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.*[^\s]+)\s*\)')
_single_colon_regex = re.compile(r'(?<!:):(?!:)')
_xref_regex = re.compile(r'(:(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)')
_bullet_list_regex = re.compile(r'^(\*|\+|\-)(\s+\S|\s*$)')
_enumerated_list_regex = re.compile(
r'^(?P<paren>\()?'
r'(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])'
r'(?(paren)\)|\.)(\s+\S|\s*$)')
class GoogleDocstring(object):
"""Convert Google style docstrings to reStructuredText.
Parameters
----------
docstring : :obj:`str` or :obj:`list` of :obj:`str`
The docstring to parse, given either as a string or split into
individual lines.
Other Parameters
----------------
what : :obj:`str`, optional
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : :obj:`str`, optional
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
Example
-------
>>> from sphinx.ext.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Args:
... arg1(int): Description of `arg1`
... arg2(str): Description of `arg2`
... Returns:
... str: Description of return value.
... '''
>>> print(GoogleDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
"""
def __init__(self, docstring=None, what='', name='',
obj=None, options=None):
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable): # type: ignore
what = 'function'
else:
what = 'object'
if docstring is None:
if obj is None:
raise "If docstring is None, obj may not be"
docstring = obj.__doc__
self._what = what
self._name = name
self._obj = obj
if isinstance(docstring, str):
docstring = _prepare_docstring(docstring)
self._lines = docstring
self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
self._parsed_lines = [] # type: List[unicode]
self._is_in_section = False
self._section_indent = 0
self._directive_sections = [] # type: List[unicode]
self._entry_sections = {
'args': self._parse_fields_section,
'attributes': self._parse_fields_section,
'returns': self._parse_fields_section,
'yields': self._parse_fields_section,
'example args': self._parse_fields_section,
} # type: Dict[unicode, Callable]
self._freeform_sections = {
'example': self._parse_generic_section,
'examples': self._parse_generic_section,
'example returns': self._parse_generic_section,
'note': self._parse_generic_section,
'references': self._parse_generic_section,
'see also': self._parse_generic_section,
'todo': self._parse_generic_section,
} # type: Dict[unicode, Callable]
self._sections = {
name: value
for name, value in [*self._entry_sections.items(), *self._freeform_sections.items()]
}
self._parsed_dicts = {
name: []
for name in self._entry_sections.keys()
}
self._parse()
def lines(self):
# type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return self._parsed_lines
def result(self):
# type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
list(str)
The lines of the docstring in a list.
"""
return {'sections': self._parsed_lines, **self._parsed_dicts}
def _consume_indented_block(self, indent=1):
# type: (int) -> List[unicode]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_contiguous(self):
# type: () -> List[unicode]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_empty(self):
# type: () -> List[unicode]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
line = next(self._line_iter) # type: ignore
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after # type: unicode, unicode, unicode
if parse_type:
match = _google_typed_arg_regex.match(before) # type: ignore
if match:
_name = match.group(1)
_type = match.group(2)
_name = self._escape_args_and_kwargs(_name)
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_descs = [_desc] + self._dedent(self._consume_indented_block(indent))
return _name, _type, _descs
def _consume_fields(self, parse_type=True, prefer_type=False):
# type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
self._consume_empty()
fields = []
while not self._is_section_break():
_name, _type, _desc = self._consume_field(parse_type, prefer_type)
if _name or _type or _desc:
fields.append((_name, _type, _desc,))
return fields
def _consume_section_header(self):
# type: () -> unicode
section = next(self._line_iter) # type: ignore
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
section = stripped_section
return section
def _consume_to_end(self):
# type: () -> List[unicode]
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_to_next_section(self):
# type: () -> List[unicode]
self._consume_empty()
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter)) # type: ignore
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
# type: (List[unicode], bool) -> List[unicode]
if full:
return [line.lstrip() for line in lines]
else:
min_indent = self._get_min_indent(lines)
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
# type: (unicode) -> unicode
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
return r'\*' + name[1:]
else:
return name
def _fix_field_desc(self, desc):
# type: (List[unicode]) -> List[unicode]
if self._is_list(desc):
desc = [u''] + desc
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
desc = [u''] + desc
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc
def _get_current_indent(self, peek_ahead=0):
# type: (int) -> int
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
while line != self._line_iter.sentinel:
if line:
return self._get_indent(line)
peek_ahead += 1
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
return 0
def _get_indent(self, line):
# type: (unicode) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
# type: (List[unicode]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
# type: (List[unicode]) -> int
min_indent = None
for line in lines:
if line:
indent = self._get_indent(line)
if min_indent is None:
min_indent = indent
elif indent < min_indent:
min_indent = indent
return min_indent or 0
def _indent(self, lines, n=4):
# type: (List[unicode], int) -> List[unicode]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
# type: (unicode, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
elif not s.isspace():
return False
return False
def _is_list(self, lines):
# type: (List[unicode]) -> bool
if not lines:
return False
if _bullet_list_regex.match(lines[0]): # type: ignore
return True
if _enumerated_list_regex.match(lines[0]): # type: ignore
return True
if len(lines) < 2 or lines[0].endswith('::'):
return False
indent = self._get_indent(lines[0])
next_indent = indent
for line in lines[1:]:
if line:
next_indent = self._get_indent(line)
break
return next_indent > indent
def _is_section_header(self):
# type: () -> bool
section = self._line_iter.peek().lower()
match = _google_section_regex.match(section)
if match and section.strip(':') in self._sections:
header_indent = self._get_indent(section)
section_indent = self._get_current_indent(peek_ahead=1)
return section_indent > header_indent
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _is_section_break(self):
# type: () -> bool
line = self._line_iter.peek()
return (not self._line_iter.has_next() or
self._is_section_header() or
(self._is_in_section and
line and
not self._is_indented(line, self._section_indent)))
def _parse(self):
# type: () -> None
self._parsed_lines = self._consume_empty()
while self._line_iter.has_next():
if self._is_section_header():
try:
section = self._consume_section_header()
self._is_in_section = True
self._section_indent = self._get_current_indent()
if _directive_regex.match(section): # type: ignore
lines = [section] + self._consume_to_next_section()
else:
section_key = section.lower()
parse_section = self._sections[section_key]
if section_key in self._parsed_dicts:
self._parsed_dicts[section_key].extend(
parse_section())
else:
self._parsed_lines.append(
(section_key, parse_section()))
finally:
self._is_in_section = False
self._section_indent = 0
else:
if not self._parsed_lines:
self._parsed_lines.append(('text', self._consume_contiguous() + self._consume_empty()))
else:
self._parsed_lines.append(('text', self._consume_to_next_section()))
# Multiline docstrings often begin right after the """ and then continue
# with appropriate indentation at the next line break. The above algorithm
# splits a single text section into two. Merge them here if that happens.
if len(self._parsed_lines) >= 2:
first = self._parsed_lines[0]
second = self._parsed_lines[1]
if first[0] == 'text' and second[0] == 'text':
self._parsed_lines = self._parsed_lines[1:]
self._parsed_lines[0] = ('text', first[1] + second[1])
def _parse_fields_section(self):
# type: (unicode) -> List[unicode]
fields = self._consume_fields()
# type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
if any(_desc):
_desc = self._fix_field_desc(_desc)
lines.append((_name, _type, _desc))
return lines
def _parse_generic_section(self):
# type: (unicode, bool) -> List[unicode]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if lines:
return lines
else:
return ['']
def _partition_field_on_colon(self, line):
# type: (unicode) -> Tuple[unicode, unicode, unicode]
before_colon = []
after_colon = []
colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)): # type: ignore
if found_colon:
after_colon.append(source)
else:
m = _single_colon_regex.search(source)
if (i % 2) == 0 and m:
found_colon = True
colon = source[m.start(): m.end()]
before_colon.append(source[:m.start()])
after_colon.append(source[m.end():])
else:
before_colon.append(source)
return ("".join(before_colon).strip(),
colon,
"".join(after_colon).strip())
def _strip_empty(self, lines):
# type: (List[unicode]) -> List[unicode]
if lines:
start = -1
for i, line in enumerate(lines):
if line:
start = i
break
if start == -1:
lines = []
end = -1
for i in reversed(range(len(lines))):
line = lines[i]
if line:
end = i
break
if start > 0 or end + 1 < len(lines):
lines = lines[start:end + 1]
return lines
| ajbouh/tfi | src/tfi/parse/docstring.py | Python | mit | 17,890 | 0.000671 |
"""
Copyright 2015 Sai Gopal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Logger import Logger
from ProfileLookup import ProfileLookup
from RedisConn import RedisConn
class SaslSenderDomainPolicy:
"""
This class provides saslsender domain rate limiting
"""
key = 'sasl_username'
prefix = 'SaslSenderPolicy_'
quota = {}
def __init__(self, parsed_config):
self.parsed_config = parsed_config
self.Enforce = parsed_config.getboolean('SaslSenderDomainPolicy', 'Enforce')
self.RejectMessage = parsed_config.get('SaslSenderDomainPolicy', 'RejectMessage')
self.ProfileLookupObj = ProfileLookup.create_profile_lookup('SaslSenderDomainPolicy', parsed_config)
self.ProfileCacheTTL = parsed_config.getint('SaslSenderDomainPolicy', 'ProfileCacheTime')
for i in parsed_config.items('SaslSenderDomainPolicy-Profiles'):
limits = i[1].split(',')
profile = i[0].lower()
SaslSenderDomainPolicy.quota[profile] = (int(limits[0]), int(limits[1]))
self.value = self.profile = self.error = None
def check_quota(self, message, redis_pipe):
self.error = False
try:
self.value = message.data[self.key].split('@')[1].lower()
self.profile = self.ProfileLookupObj.lookup(self.value, self.ProfileCacheTTL)
RedisConn.LUA_CALL_CHECK_LIMIT(keys=[SaslSenderDomainPolicy.prefix + self.value],
args=[SaslSenderDomainPolicy.quota[self.profile][0]], client=redis_pipe)
except IndexError:
self.error = True
self.message = message
RedisConn.LUA_CALL_DO_NOTHING_SLAVE(keys=[], args=[], client=redis_pipe)
def update_quota(self, redis_pipe):
if self.error:
RedisConn.LUA_CALL_DO_NOTHING_MASTER(keys=[], args=[], client=redis_pipe)
else:
RedisConn.LUA_CALL_INCR(keys=[SaslSenderDomainPolicy.prefix + self.value],
args=[SaslSenderDomainPolicy.quota[self.profile][1]], client=redis_pipe)
def log_quota(self, accept, redis_val=None):
if accept:
if self.error:
Logger.log(
'SaslSenderDomainPolicy Unable To Spilt SaslSender(%s) Action: accept' % (
self.message.data[self.key]))
else:
Logger.log('SaslSenderDomainPolicy SaslSenderDomain: %s Quota: (%s/%s) Profile: %s Action: accept' % (
self.value, str(int(redis_val)), str(SaslSenderDomainPolicy.quota[self.profile][0]), self.profile))
else:
Logger.log('SaslSenderDomainPolicy SaslSenderDomain: %s Quota: Exceeded Profile: %s Action: reject' % (
self.value, self.profile))
| EnduranceIndia/ratelimitd | Policies/SaslSenderDomainPolicy.py | Python | apache-2.0 | 3,282 | 0.004875 |
#!/usr/bin/env python
"Load data, create the validation split, optionally scale data, train a linear model, evaluate"
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer, PolynomialFeatures
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, StandardScaler, RobustScaler
from sklearn.linear_model import LogisticRegression as LR
from sklearn.metrics import roc_auc_score as AUC
from sklearn.metrics import accuracy_score as accuracy
#
def train_and_evaluate( y_train, x_train, y_val, x_val ):
lr = LR()
lr.fit( x_train, y_train )
p = lr.predict_proba( x_val )
p_bin = lr.predict( x_val )
acc = accuracy( y_val, p_bin )
auc = AUC( y_val, p[:,1] )
return ( auc, acc )
def transform_train_and_evaluate( transformer ):
global x_train, x_val, y_train
x_train_new = transformer.fit_transform( x_train )
x_val_new = transformer.transform( x_val )
return train_and_evaluate( y_train, x_train_new, y_val, x_val_new )
#
input_file = 'data/orig/numerai_training_data.csv'
d = pd.read_csv( input_file )
# indices for validation examples
iv = d.validation == 1
val = d[iv].copy()
train = d[~iv].copy()
# no need for validation flag anymore
train.drop( 'validation', axis = 1 , inplace = True )
val.drop( 'validation', axis = 1 , inplace = True )
# encode the categorical variable as one-hot, drop the original column afterwards
train_dummies = pd.get_dummies( train.c1 )
train_num = pd.concat(( train.drop( 'c1', axis = 1 ), train_dummies ), axis = 1 )
val_dummies = pd.get_dummies( val.c1 )
val_num = pd.concat(( val.drop( 'c1', axis = 1 ), val_dummies ), axis = 1 )
#
y_train = train_num.target.values
y_val = val_num.target.values
x_train = train_num.drop( 'target', axis = 1 )
x_val = val_num.drop( 'target', axis = 1 )
# train, predict, evaluate
auc, acc = train_and_evaluate( y_train, x_train, y_val, x_val )
print "No transformation"
print "AUC: {:.2%}, accuracy: {:.2%} \n".format( auc, acc )
# try different transformations for X
transformers = [ MaxAbsScaler(), MinMaxScaler(), RobustScaler(), StandardScaler(),
Normalizer( norm = 'l1' ), Normalizer( norm = 'l2' ), Normalizer( norm = 'max' ),
PolynomialFeatures() ]
poly_scaled = Pipeline([ ( 'poly', PolynomialFeatures()), ( 'scaler', MinMaxScaler()) ])
transformers.append( poly_scaled )
for transformer in transformers:
print transformer
auc, acc = transform_train_and_evaluate( transformer )
print "AUC: {:.2%}, accuracy: {:.2%} \n".format( auc, acc )
"""
No transformation
AUC: 52.67%, accuracy: 52.74%
MaxAbsScaler(copy=True)
AUC: 53.52%, accuracy: 52.46%
MinMaxScaler(copy=True, feature_range=(0, 1))
AUC: 53.52%, accuracy: 52.48%
RobustScaler(copy=True, with_centering=True, with_scaling=True)
AUC: 53.52%, accuracy: 52.45%
StandardScaler(copy=True, with_mean=True, with_std=True)
AUC: 53.52%, accuracy: 52.42%
Normalizer(copy=True, norm='l1')
AUC: 53.16%, accuracy: 53.19%
Normalizer(copy=True, norm='l2')
AUC: 52.92%, accuracy: 53.20%
Normalizer(copy=True, norm='max')
AUC: 53.02%, accuracy: 52.66%
PolynomialFeatures(degree=2, include_bias=True, interaction_only=False)
AUC: 53.25%, accuracy: 52.61%
Pipeline(steps=[
('poly', PolynomialFeatures(degree=2, include_bias=True, interaction_only=False)),
('scaler', MinMaxScaler(copy=True, feature_range=(0, 1)))])
AUC: 53.62%, accuracy: 53.04%
""" | zygmuntz/numer.ai | validate_lr.py | Python | bsd-3-clause | 3,382 | 0.041987 |
from flask import request, jsonify
from sql_classes import UrlList, Acl, UserGroup, User, Role
def _node_base_and_rest(path):
"""
Returns a tuple: (the substring of a path after the last nodeSeparator, the preceding path before it)
If 'base' includes its own baseSeparator - return only a string after it
So if a path is 'OU=Group,OU=Dept,OU=Company', the tuple result would be ('OU=Group,OU=Dept', 'Company')
"""
node_separator = ','
base_separator = '='
node_base = path[path.rfind(node_separator) + 1:]
if path.find(node_separator) != -1:
node_preceding = path[:len(path) - len(node_base) - 1]
else:
node_preceding = ''
return (node_preceding, node_base[node_base.find(base_separator) + 1:])
def _place_user_onto_tree(user, usertree, user_groups):
"""
Places a 'user' object on a 'usertree' object according to user's pathField string key
"""
curr_node = usertree
# Decompose 'OU=Group,OU=Dept,OU=Company' into ('OU=Group,OU=Dept', 'Company')
preceding, base = _node_base_and_rest(user['distinguishedName'])
full_node_path = ''
# Place all path groups onto a tree starting from the outermost
while base != '':
node_found = False
full_node_path = 'OU=' + base + (',' if full_node_path != '' else '') + full_node_path
# Search for corresponding base element on current hierarchy level
for obj in curr_node:
if obj.get('text') == None:
continue
if obj['text'] == base:
node_found = True
curr_node = obj['children']
break
# Create a new group node
if not node_found:
curr_node.append({
'id': 'usergroup_' + str(user_groups[full_node_path]),
'text': base,
'objectType': 'UserGroup',
'children': []
})
curr_node = curr_node[len(curr_node) - 1]['children']
preceding, base = _node_base_and_rest(preceding)
curr_node.append({
'id': 'user_' + str(user['id']),
'text': user['cn'],
'leaf': True,
'iconCls': 'x-fa fa-user' if user['status'] == 1 else 'x-fa fa-user-times',
'objectType': 'User'
})
def _sort_tree(subtree, sort_field):
"""
Sorts a subtree node by a sortField key of each element
"""
# Sort eval function, first by group property, then by text
subtree['children'] = sorted(
subtree['children'],
key=lambda obj: (1 if obj.get('children') == None else 0, obj[sort_field]))
for tree_elem in subtree['children']:
if tree_elem.get('children') != None:
_sort_tree(tree_elem, sort_field)
def _collapse_terminal_nodes(subtree):
"""
Collapses tree nodes which doesn't contain subgroups, just tree leaves
"""
subtree_has_group_nodes = False
for tree_elem in subtree['children']:
if tree_elem.get('children') != None:
subtree_has_group_nodes = True
_collapse_terminal_nodes(tree_elem)
subtree['expanded'] = subtree_has_group_nodes
def _expand_all_nodes(subtree):
"""
Expand all level nodes
"""
for tree_elem in subtree['children']:
if tree_elem.get('children') != None:
_expand_all_nodes(tree_elem)
subtree['expanded'] = True
def _get_user_tree(current_user_properties, Session):
"""
Build user tree
"""
current_user_permissions = current_user_properties['user_permissions']
session = Session()
# Get all groups
query_result = session.query(UserGroup.id, UserGroup.distinguishedName).all()
user_groups = {}
for query_result_row in query_result:
user_groups[query_result_row.distinguishedName] = query_result_row.id
# Get all users if ViewUsers permission present
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewUsers'), None) != None:
query_result = session.query(
User.id.label('user_id'), User.cn, User.status, UserGroup.id.label('usergroup_id'),
UserGroup.distinguishedName).join(UserGroup).filter(User.hidden == 0).all()
# Get just the requester otherwise
else:
query_result = session.query(
User.id.label('user_id'), User.cn, User.status, UserGroup.id.label('usergroup_id'),
UserGroup.distinguishedName).join(UserGroup).\
filter(User.id == current_user_properties['user_object']['id'], User.hidden == 0).all()
Session.remove()
# Future tree
user_tree = []
# Place each user on a tree
for query_result_row in query_result:
user_object = {
'id': query_result_row.user_id,
'distinguishedName': query_result_row.distinguishedName,
'status': query_result_row.status,
'cn': query_result_row.cn
}
_place_user_onto_tree(user_object, user_tree, user_groups)
user_tree = {
'id': 'usergroup_0',
'objectType': 'UserGroup',
'text': 'Пользователи',
'children': user_tree
}
# Sort tree elements
_sort_tree(user_tree, 'text')
# Collapse/expand tree nodes
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewUsers'), None) != None:
_collapse_terminal_nodes(user_tree)
else:
_expand_all_nodes(user_tree)
return user_tree
def _get_url_lists(Session):
"""
Get URL lists
"""
session = Session()
# Get all urllists from DB
query_result = session.query(UrlList.id, UrlList.name, UrlList.whitelist).all()
Session.remove()
urllist_list = []
# Making a list of them
for query_result_row in query_result:
url_list_object = {
'id': 'urllist_' + str(query_result_row.id),
'text': query_result_row.name,
'leaf': True,
'iconCls': 'x-fa fa-unlock' if query_result_row.whitelist else 'x-fa fa-lock',
'objectType': 'UrlList'
}
urllist_list.append(url_list_object)
url_lists = {
'id': 'urllists',
'objectType': 'UrlLists',
'text': 'Списки URL',
'iconCls': 'x-fa fa-cog',
'children': urllist_list
}
# Sort tree elements
_sort_tree(url_lists, 'text')
return url_lists
def _get_acls(Session):
"""
Get ACLs
"""
session = Session()
# Get all access control lists from DB
query_result = session.query(Acl.id, Acl.name).all()
Session.remove()
acl_list = []
# Making a list of them
for query_result_row in query_result:
acl_object = {
'id': 'acl_' + str(query_result_row.id),
'text': query_result_row.name,
'leaf': True,
'iconCls': 'x-fa fa-filter',
'objectType': 'AclContents'
}
acl_list.append(acl_object)
acls = {
'id': 'acls',
'objectType': 'Acls',
'text': 'Списки доступа',
'iconCls': 'x-fa fa-cog',
'children': acl_list
}
# Sort tree elements
_sort_tree(acls, 'text')
return acls
def _get_roles(Session):
"""
Get user roles
"""
session = Session()
# Get all roles from DB
query_result = session.query(Role.id, Role.name).all()
Session.remove()
roles_list = []
# Making a list of them
for query_result_row in query_result:
role_object = {
'id': 'role_' + str(query_result_row.id),
'text': query_result_row.name,
'leaf': True,
'iconCls': 'x-fa fa-key',
'objectType': 'Role'
}
roles_list.append(role_object)
roles = {
'id': 'roles',
'objectType': 'Roles',
'text': 'Роли',
'iconCls': 'x-fa fa-cog',
'children': roles_list
}
# Sorting tree elements
_sort_tree(roles, 'text')
return roles
def select_tree(current_user_properties, node_name, Session):
url_lists_node = None
acls_node = None
roles_node = None
users_node = None
current_user_permissions = current_user_properties['user_permissions']
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewSettings'), None) != None:
if node_name in ['root', 'urllists']:
url_lists_node = _get_url_lists(Session)
if node_name in ['root', 'acls']:
acls_node = _get_acls(Session)
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewPermissions'), None) != None:
if node_name in ['root', 'roles']:
roles_node = _get_roles(Session)
if node_name in ['root']:
users_node = _get_user_tree(current_user_properties, Session)
if node_name == 'root':
children_list = []
if url_lists_node is not None:
children_list.append(url_lists_node)
if acls_node is not None:
children_list.append(acls_node)
if roles_node is not None:
children_list.append(roles_node)
if users_node is not None:
children_list.append(users_node)
result = {
'success': True,
'children': children_list
}
elif node_name == 'urllists':
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewSettings'), None) != None:
result = {
'success': True,
'children': url_lists_node['children']
}
else:
return Response('Forbidden', 403)
elif node_name == 'acls':
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewSettings'), None) != None:
result = {
'success': True,
'children': acls_node['children']
}
else:
return Response('Forbidden', 403)
elif node_name == 'roles':
if next((item for item in current_user_permissions if item['permissionName'] == 'ViewPermissions'), None) != None:
result = {
'success': True,
'children': roles_node['children']
}
else:
return Response('Forbidden', 403)
return jsonify(result)
| Aclz/Tentacles | python3/app/backend/maintree.py | Python | gpl-2.0 | 10,439 | 0.003172 |
"""
GatewayScanner is an abstraction for searching for KNX/IP devices on the local network.
* It walks through all network interfaces
* and sends UDP multicast search requests
* it returns the first found device
"""
from __future__ import annotations
import asyncio
from functools import partial
import logging
from typing import TYPE_CHECKING
import netifaces
from xknx.knxip import (
DIB,
HPAI,
DIBDeviceInformation,
DIBServiceFamily,
DIBSuppSVCFamilies,
KNXIPFrame,
KNXIPServiceType,
SearchRequest,
SearchResponse,
)
from xknx.telegram import IndividualAddress
from .transport import UDPTransport
if TYPE_CHECKING:
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
class GatewayDescriptor:
"""Used to return information about the discovered gateways."""
def __init__(
self,
ip_addr: str,
port: int,
local_ip: str = "",
local_interface: str = "",
name: str = "UNKNOWN",
supports_routing: bool = False,
supports_tunnelling: bool = False,
supports_tunnelling_tcp: bool = False,
individual_address: IndividualAddress | None = None,
):
"""Initialize GatewayDescriptor class."""
self.name = name
self.ip_addr = ip_addr
self.port = port
self.local_interface = local_interface
self.local_ip = local_ip
self.supports_routing = supports_routing
self.supports_tunnelling = supports_tunnelling
self.supports_tunnelling_tcp = supports_tunnelling_tcp
self.individual_address = individual_address
def parse_dibs(self, dibs: list[DIB]) -> None:
"""Parse DIBs for gateway information."""
for dib in dibs:
if isinstance(dib, DIBSuppSVCFamilies):
self.supports_routing = dib.supports(DIBServiceFamily.ROUTING)
if dib.supports(DIBServiceFamily.TUNNELING):
self.supports_tunnelling = True
self.supports_tunnelling_tcp = dib.supports(
DIBServiceFamily.TUNNELING, version=2
)
continue
if isinstance(dib, DIBDeviceInformation):
self.name = dib.name
self.individual_address = dib.individual_address
continue
def __repr__(self) -> str:
"""Return object as representation string."""
return (
"GatewayDescriptor(\n"
f" name={self.name},\n"
f" ip_addr={self.ip_addr},\n"
f" port={self.port},\n"
f" local_interface={self.local_interface},\n"
f" local_ip={self.local_ip},\n"
f" supports_routing={self.supports_routing},\n"
f" supports_tunnelling={self.supports_tunnelling},\n"
f" supports_tunnelling_tcp={self.supports_tunnelling_tcp},\n"
f" individual_address={self.individual_address}\n"
")"
)
def __str__(self) -> str:
"""Return object as readable string."""
return f"{self.individual_address} - {self.name} @ {self.ip_addr}:{self.port}"
class GatewayScanFilter:
"""Filter to limit gateway scan attempts.
If `tunnelling` and `routing` are set it is treated as AND.
KNX/IP devices that don't support `tunnelling` or `routing` aren't matched.
"""
def __init__(
self,
name: str | None = None,
tunnelling: bool | None = None,
tunnelling_tcp: bool | None = None,
routing: bool | None = None,
):
"""Initialize GatewayScanFilter class."""
self.name = name
self.tunnelling = tunnelling
self.tunnelling_tcp = tunnelling_tcp
self.routing = routing
def match(self, gateway: GatewayDescriptor) -> bool:
"""Check whether the device is a gateway and given GatewayDescriptor matches the filter."""
if self.name is not None and self.name != gateway.name:
return False
if (
self.tunnelling is not None
and self.tunnelling != gateway.supports_tunnelling
):
return False
if (
self.tunnelling_tcp is not None
and self.tunnelling_tcp != gateway.supports_tunnelling_tcp
):
return False
if self.routing is not None and self.routing != gateway.supports_routing:
return False
return (
gateway.supports_tunnelling
or gateway.supports_tunnelling_tcp
or gateway.supports_routing
)
class GatewayScanner:
"""Class for searching KNX/IP devices."""
def __init__(
self,
xknx: XKNX,
timeout_in_seconds: float = 4.0,
stop_on_found: int | None = 1,
scan_filter: GatewayScanFilter = GatewayScanFilter(),
):
"""Initialize GatewayScanner class."""
self.xknx = xknx
self.timeout_in_seconds = timeout_in_seconds
self.stop_on_found = stop_on_found
self.scan_filter = scan_filter
self.found_gateways: list[GatewayDescriptor] = []
self._udp_transports: list[UDPTransport] = []
self._response_received_event = asyncio.Event()
self._count_upper_bound = 0
"""Clean value of self.stop_on_found, computed when ``scan`` is called."""
async def scan(self) -> list[GatewayDescriptor]:
"""Scan and return a list of GatewayDescriptors on success."""
if self.stop_on_found is None:
self._count_upper_bound = 0
else:
self._count_upper_bound = max(0, self.stop_on_found)
await self._send_search_requests()
try:
await asyncio.wait_for(
self._response_received_event.wait(),
timeout=self.timeout_in_seconds,
)
except asyncio.TimeoutError:
pass
finally:
self._stop()
return self.found_gateways
def _stop(self) -> None:
"""Stop tearing down udp_transport."""
for udp_transport in self._udp_transports:
udp_transport.stop()
async def _send_search_requests(self) -> None:
"""Find all interfaces with active IPv4 connection to search for gateways."""
for interface in netifaces.interfaces():
try:
af_inet = netifaces.ifaddresses(interface)[netifaces.AF_INET]
ip_addr = af_inet[0]["addr"]
except KeyError:
logger.debug("No IPv4 address found on %s", interface)
continue
except ValueError as err:
# rare case when an interface disappears during search initialisation
logger.debug("Invalid interface %s: %s", interface, err)
continue
else:
await self._search_interface(interface, ip_addr)
async def _search_interface(self, interface: str, ip_addr: str) -> None:
"""Send a search request on a specific interface."""
logger.debug("Searching on %s / %s", interface, ip_addr)
udp_transport = UDPTransport(
self.xknx,
(ip_addr, 0),
(self.xknx.multicast_group, self.xknx.multicast_port),
multicast=True,
)
udp_transport.register_callback(
partial(self._response_rec_callback, interface=interface),
[KNXIPServiceType.SEARCH_RESPONSE],
)
await udp_transport.connect()
self._udp_transports.append(udp_transport)
discovery_endpoint = HPAI(
ip_addr=self.xknx.multicast_group, port=self.xknx.multicast_port
)
search_request = SearchRequest(self.xknx, discovery_endpoint=discovery_endpoint)
udp_transport.send(KNXIPFrame.init_from_body(search_request))
def _response_rec_callback(
self,
knx_ip_frame: KNXIPFrame,
source: HPAI,
udp_transport: UDPTransport,
interface: str = "",
) -> None:
"""Verify and handle knxipframe. Callback from internal udp_transport."""
if not isinstance(knx_ip_frame.body, SearchResponse):
logger.warning("Could not understand knxipframe")
return
gateway = GatewayDescriptor(
ip_addr=knx_ip_frame.body.control_endpoint.ip_addr,
port=knx_ip_frame.body.control_endpoint.port,
local_ip=udp_transport.local_addr[0],
local_interface=interface,
)
gateway.parse_dibs(knx_ip_frame.body.dibs)
logger.debug("Found KNX/IP device at %s: %s", source, repr(gateway))
self._add_found_gateway(gateway)
def _add_found_gateway(self, gateway: GatewayDescriptor) -> None:
if self.scan_filter.match(gateway) and not any(
_gateway.individual_address == gateway.individual_address
for _gateway in self.found_gateways
):
self.found_gateways.append(gateway)
if 0 < self._count_upper_bound <= len(self.found_gateways):
self._response_received_event.set()
| XKNX/xknx | xknx/io/gateway_scanner.py | Python | mit | 9,132 | 0.000986 |
"""Offer state listening automation rules."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.const import CONF_ATTRIBUTE, CONF_FOR, CONF_PLATFORM, MATCH_ALL
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, State, callback
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.event import (
Event,
async_track_same_state,
async_track_state_change_event,
process_state_match,
)
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
CONF_ENTITY_ID = "entity_id"
CONF_FROM = "from"
CONF_TO = "to"
BASE_SCHEMA = {
vol.Required(CONF_PLATFORM): "state",
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_FOR): cv.positive_time_period_template,
vol.Optional(CONF_ATTRIBUTE): cv.match_all,
}
TRIGGER_STATE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
# These are str on purpose. Want to catch YAML conversions
vol.Optional(CONF_FROM): vol.Any(str, [str]),
vol.Optional(CONF_TO): vol.Any(str, [str]),
}
)
TRIGGER_ATTRIBUTE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
vol.Optional(CONF_FROM): cv.match_all,
vol.Optional(CONF_TO): cv.match_all,
}
)
def TRIGGER_SCHEMA(value: Any) -> dict: # pylint: disable=invalid-name
"""Validate trigger."""
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
# We use this approach instead of vol.Any because
# this gives better error messages.
if CONF_ATTRIBUTE in value:
return TRIGGER_ATTRIBUTE_SCHEMA(value)
return TRIGGER_STATE_SCHEMA(value)
async def async_attach_trigger(
hass: HomeAssistant,
config,
action,
automation_info,
*,
platform_type: str = "state",
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
from_state = config.get(CONF_FROM, MATCH_ALL)
to_state = config.get(CONF_TO, MATCH_ALL)
time_delta = config.get(CONF_FOR)
template.attach(hass, time_delta)
match_all = from_state == MATCH_ALL and to_state == MATCH_ALL
unsub_track_same = {}
period: dict[str, timedelta] = {}
match_from_state = process_state_match(from_state)
match_to_state = process_state_match(to_state)
attribute = config.get(CONF_ATTRIBUTE)
job = HassJob(action)
trigger_id = automation_info.get("trigger_id") if automation_info else None
_variables = {}
if automation_info:
_variables = automation_info.get("variables") or {}
@callback
def state_automation_listener(event: Event):
"""Listen for state changes and calls action."""
entity: str = event.data["entity_id"]
from_s: State | None = event.data.get("old_state")
to_s: State | None = event.data.get("new_state")
if from_s is None:
old_value = None
elif attribute is None:
old_value = from_s.state
else:
old_value = from_s.attributes.get(attribute)
if to_s is None:
new_value = None
elif attribute is None:
new_value = to_s.state
else:
new_value = to_s.attributes.get(attribute)
# When we listen for state changes with `match_all`, we
# will trigger even if just an attribute changes. When
# we listen to just an attribute, we should ignore all
# other attribute changes.
if attribute is not None and old_value == new_value:
return
if (
not match_from_state(old_value)
or not match_to_state(new_value)
or (not match_all and old_value == new_value)
):
return
@callback
def call_action():
"""Call action with right context."""
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": platform_type,
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
"for": time_delta if not time_delta else period[entity],
"attribute": attribute,
"description": f"state of {entity}",
"id": trigger_id,
}
},
event.context,
)
if not time_delta:
call_action()
return
trigger_info = {
"trigger": {
"platform": "state",
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
}
}
variables = {**_variables, **trigger_info}
try:
period[entity] = cv.positive_time_period(
template.render_complex(time_delta, variables)
)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error(
"Error rendering '%s' for template: %s", automation_info["name"], ex
)
return
def _check_same_state(_, _2, new_st: State):
if new_st is None:
return False
if attribute is None:
cur_value = new_st.state
else:
cur_value = new_st.attributes.get(attribute)
if CONF_FROM in config and CONF_TO not in config:
return cur_value != old_value
return cur_value == new_value
unsub_track_same[entity] = async_track_same_state(
hass,
period[entity],
call_action,
_check_same_state,
entity_ids=entity,
)
unsub = async_track_state_change_event(hass, entity_id, state_automation_listener)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
| w1ll1am23/home-assistant | homeassistant/components/homeassistant/triggers/state.py | Python | apache-2.0 | 6,291 | 0.000795 |
from .mtproto_plain_sender import MtProtoPlainSender
from .authenticator import do_authentication
from .mtproto_sender import MtProtoSender
from .connection import Connection, ConnectionMode
| andr-04/Telethon | telethon/network/__init__.py | Python | mit | 191 | 0 |
url = "https://skyzh.github.io/social-network-site/1.html"
html_path = "wordcount/test/data/social.html"
devel = False
| SkyZH/ddcm-word-count | wordcount/test/const.py | Python | bsd-3-clause | 119 | 0 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == "__main__":
from build import *
addroot()
import pytools.build as b
b.build()
b.run('qtfract')
| rboman/progs | apps/fractal/cpp_qt/run.py | Python | apache-2.0 | 179 | 0 |
# Generated by Django 2.1.7 on 2019-04-11 06:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20180526_1702'),
]
operations = [
migrations.AlterField(
model_name='article',
name='image',
field=models.URLField(blank=True, verbose_name='image'),
),
migrations.AlterField(
model_name='blogcover',
name='image',
field=models.URLField(blank=True, verbose_name='image'),
),
]
| flavoi/diventi | diventi/blog/migrations/0008_auto_20190411_0806.py | Python | apache-2.0 | 574 | 0 |
from genetic import *
from image import *
from snn import *
import math, random
import numpy
def convert_binary(data, w, h, t):
ans = [[0 for x in xrange(w)] for x in xrange(h)]
for x in xrange(h):
for y in xrange(w):
if data[x][y] > t:
ans[x][y] = 1
else:
ans[x][y] = 0
return ans
def convert_mat(data, w, h, thresh):
ans = [[0 for x in xrange(w)] for x in xrange(h)]
for x in xrange(h):
for y in xrange(w):
if data[x][y] > thresh[x][y]:
ans[x][y] = 1
else:
ans[x][y] = 0
return ans
def shenon_entropy(data, w, h):
black, white = 0,0
for x in xrange(h):
for y in xrange(w):
if data[x][y]:
white += 1
else:
black += 1
total = w*h
prob_white = white / (total*1.0)
prob_black = black / (total*1.0)
formula = - (prob_black * math.log(prob_black,2) + prob_white * math.log(prob_white, 2))
return formula
def fitness(population, data, w, h):
fitness_dict = {}
for x in population:
ans = convert_binary(data, w, h, x)
entropy = shenon_entropy(ans, w, h)
if entropy in fitness_dict:
entropy = entropy + random.random()/1000
fitness_dict[entropy] = x
# imagewrite(ans, w, h)
# print entropy, x
return fitness_dict
def fitness_mat(population, data, w, h):
fitness_dict = {}
for x in population:
ans = convert_mat(data, w, h, x)
entropy = shenon_entropy( ans , w, h)
if entropy in fitness_dict:
entropy = entropy + random.random()/1000
fitness_dict[entropy] = x
return fitness_dict
def fitness_weight(population, w, h, t, ind):
fitness_dict = {}
for y, x in enumerate(population):
ans = convert_binary(x, w, h, t)
entropy = shenon_entropy(ans, w, h)
if entropy in fitness_dict:
entropy = entropy + random.random()/1000
fitness_dict[entropy] = ind[y]
return fitness_dict
# read image
pixel, w, h = imageread('test8.jpg')
# convert to snn
pixel_mat = snn_response(pixel, w, h, 10, 0.05)
d=3
def weight(x1,y1,x,y):
w = 10*math.exp(- ( ( math.pow((x1-x),2)+math.pow((y1-y),2) + math.pow(pixel[x1][y1]-pixel[x][y],2 )/d ) ) )
return w
def second_layer_locality():
second_layer = [[0 for x in xrange(w+2)] for x in xrange(h+2)]
for x in xrange(1,h-1):
for y in xrange(1,w-1):
temp = {}
for i in xrange(x-1, x+1):
for j in xrange(y-1, y+1):
temp[pixel_mat[i][j]] = weight(x,y,i,j)
second_layer[x][y] = response_time(temp)
second_layer = numpy.delete(second_layer, (0), axis=0)
second_layer = numpy.delete(second_layer, len(second_layer)-1, axis=0)
second_layer = numpy.delete(second_layer, (0), axis=1)
second_layer = numpy.delete(second_layer, len(second_layer[0])-1, axis=1)
return second_layer
def second_layer(w_mat):
second_layer = [[0 for x in xrange(w+2)] for x in xrange(h+2)]
for x in xrange(h):
for y in xrange(w):
second_layer[x][y] = response_time({pixel_mat[x][y]:w_mat[x][y]})
return second_layer
def median_filter(mat, w, h):
for x in xrange(h):
for y in xrange(w):
if mat[x][y] < numpy.median(mat):
mat[x][y]=0
return mat
# ==================== SNN Weight ====================
# print "Started "
# population1 = init_three_dim_mat(1, 3,3, 9)
# print population1
# ==================== SNN Matrix ====================
print "Starting GA ..."
population1 = init_mat(10,w,h)
print "Population created ..."
t = 5.0
final = []
for x in xrange(16):
print "Performing Iteration :", x+1
sl = []
for pop in population1:
temp = second_layer(pop)
sl.append(temp)
a = fitness_weight(sl, w, h, t , population1)
population1, m, max = crossover_mat( a, w, h )
print "Maximum fitness for this generation :",max
print "======================================"
sl = second_layer(m)
ans = convert_binary(sl, w, h, t)
final = sl
if x % 4 == 0:
imagesave(ans, w, h, 't6test8gen ' + str(x) + ' fit ' )
imagewrite(ans, w, h)
# print len(final)
# x = median_filter(final, w, h)
# print 'shannon entropy : ',shenon_entropy( x , w, h)
# imagewrite(x, w, h)
# if x % 5 == 0:
# # ans = convert_mat(pixel_mat, w, h, m)
# imagesave(ans, w, h, 'gen ' + str(x) + ' fit ' )
# for x in xrange(11):
# # a = fitness_mat(population1, pixel_mat, w, h)
# a = fitness_mat(population1, second_layer, w, h)
# population1, m, max = crossover_mat( a, w, h )
# print max
# if x % 5 == 0:
# # ans = convert_mat(pixel_mat, w, h, m)
# ans = convert_mat(second_layer, w, h, m)
# imagewrite(ans, w, h)
# imagesave(ans, w, h, 'gen ' + str(x) + ' fit ' )
# print "==========="
# ==================== SNN Int =======================
# imagewrite(ans, w, h)
# initialize population
# population1 = init_num(8)
# for x in xrange(11):
# a = fitness(population1, second_layer, w, h)
# population1, m, max = crossover_num( a )
# if x % 5 == 0:
# ans = convert_binary(second_layer, w, h, m)
# imagewrite(ans, w, h)
# imagesave(ans, w, h, 'gen ' + str(x) + ' fit ' + str(m))
# print "==========="
# print max | harshkothari410/snn-image-segmentation | imageSeg.py | Python | mit | 4,893 | 0.02943 |
# -----------------------------------------------------------
# compares the creation of sorted lists using the python
# bisect module, and the "usual" way
#o
# (C) 2015 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
# import standard modules
import bisect, random, time
def sortListDefault():
# define empty list, and fill with 200000 randomized integers
sortedNumbers = []
for element in range(200000):
# choose a number between 0 and 1000
newNumber = random.randint(0, 1000)
# add number to list
#print ("adding %i to list ... " %newNumber)
sortedNumbers.append(newNumber)
# sort the list in-place
sortedNumbers.sort()
return
def sortListBisect():
# define empty list, and fill with 200000 randomized integers
sortedNumbers = []
for element in range(200000):
# choose a number between 0 and 1000
newNumber = random.randint(0, 1000)
#print ("adding %i to list ... " %newNumber)
# insert into sorted list
bisect.insort(sortedNumbers, newNumber)
return
# evaluate default sort
startTime1 = time.time()
listPosition = sortListDefault()
endTime1 = time.time()
# calculate and output interval time
seconds = endTime1 - startTime1
print ("default sort took %.8f seconds" % seconds)
# evaluate bisect sort
startTime1 = time.time()
listPosition = sortListBisect()
endTime1 = time.time()
# calculate and output interval time
seconds = endTime1 - startTime1
print ("bisect sort took %.8f seconds" % seconds)
| plasmashadow/training-python | time/sorted-list.py | Python | gpl-2.0 | 1,566 | 0.02235 |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Quantum.
# Based on the structure of the OpenVSwitch agent in the
# Quantum OpenVSwitch Plugin.
# @author: Sumit Naiksatam, Cisco Systems, Inc.
import os
import sys
import time
import eventlet
from oslo.config import cfg
import pyudev
from quantum.agent.linux import ip_lib
from quantum.agent.linux import utils
from quantum.agent import rpc as agent_rpc
from quantum.agent import securitygroups_rpc as sg_rpc
from quantum.common import config as logging_config
from quantum.common import constants
from quantum.common import topics
from quantum.common import utils as q_utils
from quantum import context
from quantum.openstack.common import log as logging
from quantum.openstack.common import loopingcall
from quantum.openstack.common.rpc import common as rpc_common
from quantum.openstack.common.rpc import dispatcher
from quantum.plugins.linuxbridge.common import config # noqa
from quantum.plugins.linuxbridge.common import constants as lconst
LOG = logging.getLogger(__name__)
BRIDGE_NAME_PREFIX = "brq"
TAP_INTERFACE_PREFIX = "tap"
BRIDGE_FS = "/sys/devices/virtual/net/"
BRIDGE_NAME_PLACEHOLDER = "bridge_name"
BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/"
DEVICE_NAME_PLACEHOLDER = "device_name"
BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport"
class LinuxBridgeManager:
def __init__(self, interface_mappings, root_helper):
self.interface_mappings = interface_mappings
self.root_helper = root_helper
self.ip = ip_lib.IPWrapper(self.root_helper)
self.udev = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(self.udev)
monitor.filter_by('net')
def device_exists(self, device):
"""Check if ethernet device exists."""
try:
utils.execute(['ip', 'link', 'show', 'dev', device],
root_helper=self.root_helper)
except RuntimeError:
return False
return True
def interface_exists_on_bridge(self, bridge, interface):
directory = '/sys/class/net/%s/brif' % bridge
for filename in os.listdir(directory):
if filename == interface:
return True
return False
def get_bridge_name(self, network_id):
if not network_id:
LOG.warning(_("Invalid Network ID, will lead to incorrect bridge"
"name"))
bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
subinterface_name = '%s.%s' % (physical_interface, vlan_id)
return subinterface_name
def get_tap_device_name(self, interface_id):
if not interface_id:
LOG.warning(_("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = TAP_INTERFACE_PREFIX + interface_id[0:11]
return tap_device_name
def get_all_quantum_bridges(self):
quantum_bridge_list = []
bridge_list = os.listdir(BRIDGE_FS)
for bridge in bridge_list:
if bridge.startswith(BRIDGE_NAME_PREFIX):
quantum_bridge_list.append(bridge)
return quantum_bridge_list
def get_interfaces_on_bridge(self, bridge_name):
if self.device_exists(bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
return os.listdir(bridge_interface_path)
def get_bridge_for_tap_device(self, tap_device_name):
bridges = self.get_all_quantum_bridges()
for bridge in bridges:
interfaces = self.get_interfaces_on_bridge(bridge)
if tap_device_name in interfaces:
return bridge
return None
def is_device_on_bridge(self, device_name):
if not device_name:
return False
else:
bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace(
DEVICE_NAME_PLACEHOLDER, device_name)
return os.path.exists(bridge_port_path)
def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
bridge_name = self.get_bridge_name(network_id)
if self.ensure_bridge(bridge_name, interface):
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, physical_interface):
"""Create a non-vlan bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips, gateway):
return physical_interface
def ensure_local_bridge(self, network_id):
"""Create a local bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it already exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not self.device_exists(interface):
LOG.debug(_("Creating subinterface %(interface)s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s"),
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
if utils.execute(['ip', 'link', 'add', 'link',
physical_interface,
'name', interface, 'type', 'vlan', 'id',
vlan_id], root_helper=self.root_helper):
return
if utils.execute(['ip', 'link', 'set',
interface, 'up'], root_helper=self.root_helper):
return
LOG.debug(_("Done creating subinterface %s"), interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(ip_version=ip['ip_version'],
cidr=ip['cidr'],
broadcast=ip['broadcast'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
# Remove IP's from interface
if ips:
for ip in ips:
src_device.addr.delete(ip_version=ip['ip_version'],
cidr=ip['cidr'])
def ensure_bridge(self, bridge_name, interface=None, ips=None,
gateway=None):
"""Create a bridge unless it already exists."""
if not self.device_exists(bridge_name):
LOG.debug(_("Starting bridge %(bridge_name)s for subinterface "
"%(interface)s"),
{'bridge_name': bridge_name, 'interface': interface})
if utils.execute(['brctl', 'addbr', bridge_name],
root_helper=self.root_helper):
return
if utils.execute(['brctl', 'setfd', bridge_name,
str(0)], root_helper=self.root_helper):
return
if utils.execute(['brctl', 'stp', bridge_name,
'off'], root_helper=self.root_helper):
return
if utils.execute(['ip', 'link', 'set', bridge_name,
'up'], root_helper=self.root_helper):
return
LOG.debug(_("Done starting bridge %(bridge_name)s for "
"subinterface %(interface)s"),
{'bridge_name': bridge_name, 'interface': interface})
if not interface:
return bridge_name
# Update IP info if necessary
self.update_interface_ip_details(bridge_name, interface, ips, gateway)
# Check if the interface is part of the bridge
if not self.interface_exists_on_bridge(bridge_name, interface):
try:
utils.execute(['brctl', 'addif', bridge_name, interface],
root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Unable to add %(interface)s to %(bridge_name)s! "
"Exception: %(e)s"),
{'interface': interface, 'bridge_name': bridge_name,
'e': e})
return
return bridge_name
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
physical_interface = self.interface_mappings.get(physical_network)
if not physical_interface:
LOG.error(_("No mapping for physical network %s"),
physical_network)
return
if network_type == lconst.TYPE_FLAT:
return self.ensure_flat_bridge(network_id, physical_interface)
elif network_type == lconst.TYPE_VLAN:
return self.ensure_vlan_bridge(network_id, physical_interface,
segmentation_id)
else:
LOG.error(_("Unknown network_type %(network_type)s for network "
"%(network_id)s."), {network_type: network_type,
network_id: network_id})
def add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name):
"""Add tap interface.
If a VIF has been plugged into a network, this function will
add the corresponding tap device to the relevant bridge.
"""
if not self.device_exists(tap_device_name):
LOG.debug(_("Tap device: %s does not exist on "
"this host, skipped"), tap_device_name)
return False
bridge_name = self.get_bridge_name(network_id)
if network_type == lconst.TYPE_LOCAL:
self.ensure_local_bridge(network_id)
elif not self.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id):
return False
# Check if device needs to be added to bridge
tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name)
if not tap_device_in_bridge:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
msg = _("Adding device %(tap_device_name)s to bridge "
"%(bridge_name)s") % data
LOG.debug(msg)
if utils.execute(['brctl', 'addif', bridge_name, tap_device_name],
root_helper=self.root_helper):
return False
else:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
msg = _("%(tap_device_name)s already exists on bridge "
"%(bridge_name)s") % data
LOG.debug(msg)
return True
def add_interface(self, network_id, network_type, physical_network,
segmentation_id, port_id):
tap_device_name = self.get_tap_device_name(port_id)
return self.add_tap_interface(network_id, network_type,
physical_network, segmentation_id,
tap_device_name)
def delete_vlan_bridge(self, bridge_name):
if self.device_exists(bridge_name):
interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name)
for interface in interfaces_on_bridge:
self.remove_interface(bridge_name, interface)
for physical_interface in self.interface_mappings.itervalues():
if physical_interface == interface:
# This is a flat network => return IP's from bridge to
# interface
ips, gateway = self.get_interface_details(bridge_name)
self.update_interface_ip_details(interface,
bridge_name,
ips, gateway)
else:
if interface.startswith(physical_interface):
self.delete_vlan(interface)
LOG.debug(_("Deleting bridge %s"), bridge_name)
if utils.execute(['ip', 'link', 'set', bridge_name, 'down'],
root_helper=self.root_helper):
return
if utils.execute(['brctl', 'delbr', bridge_name],
root_helper=self.root_helper):
return
LOG.debug(_("Done deleting bridge %s"), bridge_name)
else:
LOG.error(_("Cannot delete bridge %s, does not exist"),
bridge_name)
def remove_interface(self, bridge_name, interface_name):
if self.device_exists(bridge_name):
if not self.is_device_on_bridge(interface_name):
return True
LOG.debug(_("Removing device %(interface_name)s from bridge "
"%(bridge_name)s"),
{'interface_name': interface_name,
'bridge_name': bridge_name})
if utils.execute(['brctl', 'delif', bridge_name, interface_name],
root_helper=self.root_helper):
return False
LOG.debug(_("Done removing device %(interface_name)s from bridge "
"%(bridge_name)s"),
{'interface_name': interface_name,
'bridge_name': bridge_name})
return True
else:
LOG.debug(_("Cannot remove device %(interface_name)s bridge "
"%(bridge_name)s does not exist"),
{'interface_name': interface_name,
'bridge_name': bridge_name})
return False
def delete_vlan(self, interface):
if self.device_exists(interface):
LOG.debug(_("Deleting subinterface %s for vlan"), interface)
if utils.execute(['ip', 'link', 'set', interface, 'down'],
root_helper=self.root_helper):
return
if utils.execute(['ip', 'link', 'delete', interface],
root_helper=self.root_helper):
return
LOG.debug(_("Done deleting subinterface %s"), interface)
def update_devices(self, registered_devices):
devices = self.udev_get_tap_devices()
if devices == registered_devices:
return
added = devices - registered_devices
removed = registered_devices - devices
return {'current': devices,
'added': added,
'removed': removed}
def udev_get_tap_devices(self):
devices = set()
for device in self.udev.list_devices(subsystem='net'):
name = self.udev_get_name(device)
if self.is_tap_device(name):
devices.add(name)
return devices
def is_tap_device(self, name):
return name.startswith(TAP_INTERFACE_PREFIX)
def udev_get_name(self, device):
return device.sys_name
class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, context, agent):
self.context = context
self.agent = agent
self.sg_agent = agent
def network_delete(self, context, **kwargs):
LOG.debug(_("network_delete received"))
network_id = kwargs.get('network_id')
bridge_name = self.agent.br_mgr.get_bridge_name(network_id)
LOG.debug(_("Delete %s"), bridge_name)
self.agent.br_mgr.delete_vlan_bridge(bridge_name)
def port_update(self, context, **kwargs):
LOG.debug(_("port_update received"))
# Check port exists on node
port = kwargs.get('port')
tap_device_name = self.agent.br_mgr.get_tap_device_name(port['id'])
devices = self.agent.br_mgr.udev_get_tap_devices()
if tap_device_name not in devices:
return
if 'security_groups' in port:
self.sg_agent.refresh_firewall()
try:
if port['admin_state_up']:
network_type = kwargs.get('network_type')
if network_type:
segmentation_id = kwargs.get('segmentation_id')
else:
# compatibility with pre-Havana RPC vlan_id encoding
vlan_id = kwargs.get('vlan_id')
(network_type,
segmentation_id) = lconst.interpret_vlan_id(vlan_id)
physical_network = kwargs.get('physical_network')
# create the networking for the port
self.agent.br_mgr.add_interface(port['network_id'],
network_type,
physical_network,
segmentation_id,
port['id'])
# update plugin about port status
self.agent.plugin_rpc.update_device_up(self.context,
tap_device_name,
self.agent.agent_id)
else:
bridge_name = self.agent.br_mgr.get_bridge_name(
port['network_id'])
self.agent.br_mgr.remove_interface(bridge_name,
tap_device_name)
# update plugin about port status
self.agent.plugin_rpc.update_device_down(self.context,
tap_device_name,
self.agent.agent_id)
except rpc_common.Timeout:
LOG.error(_("RPC timeout while updating port %s"), port['id'])
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return dispatcher.RpcDispatcher([self])
class LinuxBridgePluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class LinuxBridgeQuantumAgentRPC(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, interface_mappings, polling_interval,
root_helper):
self.polling_interval = polling_interval
self.root_helper = root_helper
self.setup_linux_bridge(interface_mappings)
self.agent_state = {
'binary': 'quantum-linuxbridge-agent',
'host': cfg.CONF.host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': interface_mappings,
'agent_type': constants.AGENT_TYPE_LINUXBRIDGE,
'start_flag': True}
self.setup_rpc(interface_mappings.values())
self.init_firewall()
def _report_state(self):
try:
devices = len(self.br_mgr.udev_get_tap_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception("Failed reporting state!")
def setup_rpc(self, physical_interfaces):
if physical_interfaces:
mac = utils.get_interface_mac(physical_interfaces[0])
else:
devices = ip_lib.IPWrapper(self.root_helper).get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
else:
LOG.error(_("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
exit(1)
self.agent_id = '%s%s' % ('lb', (mac.replace(":", "")))
LOG.info(_("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.plugin_rpc = LinuxBridgePluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.callbacks = LinuxBridgeRpcCallbacks(self.context,
self)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def setup_linux_bridge(self, interface_mappings):
self.br_mgr = LinuxBridgeManager(interface_mappings, self.root_helper)
def remove_port_binding(self, network_id, interface_id):
bridge_name = self.br_mgr.get_bridge_name(network_id)
tap_device_name = self.br_mgr.get_tap_device_name(interface_id)
return self.br_mgr.remove_interface(bridge_name, tap_device_name)
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
if 'added' in device_info:
resync_a = self.treat_devices_added(device_info['added'])
if 'removed' in device_info:
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_devices_added(self, devices):
resync = False
self.prepare_devices_filter(devices)
for device in devices:
LOG.debug(_("Port %s added"), device)
try:
details = self.plugin_rpc.get_device_details(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("Unable to get port details for "
"%(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
if 'port_id' in details:
LOG.info(_("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
if details['admin_state_up']:
# create the networking for the port
network_type = details.get('network_type')
if network_type:
segmentation_id = details.get('segmentation_id')
else:
# compatibility with pre-Havana RPC vlan_id encoding
vlan_id = details.get('vlan_id')
(network_type,
segmentation_id) = lconst.interpret_vlan_id(vlan_id)
self.br_mgr.add_interface(details['network_id'],
network_type,
details['physical_network'],
segmentation_id,
details['port_id'])
else:
self.remove_port_binding(details['network_id'],
details['port_id'])
else:
LOG.info(_("Device %s not defined on plugin"), device)
return resync
def treat_devices_removed(self, devices):
resync = False
self.remove_devices_filter(devices)
for device in devices:
LOG.info(_("Attachment %s removed"), device)
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("port_removed failed for %(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
if details['exists']:
LOG.info(_("Port %s updated."), device)
# Nothing to do regarding local networking
else:
LOG.debug(_("Device %s not defined on plugin"), device)
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_("LinuxBridge Agent RPC Daemon Started!"))
while True:
start = time.time()
if sync:
LOG.info(_("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
try:
device_info = self.br_mgr.update_devices(devices)
except Exception:
LOG.exception(_("Update devices failed"))
sync = True
try:
# notify plugin about device deltas
if device_info:
LOG.debug(_("Agent loop has new devices!"))
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!"),
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def main():
eventlet.monkey_patch()
cfg.CONF(project='quantum')
logging_config.setup_logging(cfg.CONF)
try:
interface_mappings = q_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error(_("Parsing physical_interface_mappings failed: %s."
" Agent terminated!"), e)
sys.exit(1)
LOG.info(_("Interface mappings: %s"), interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
root_helper = cfg.CONF.AGENT.root_helper
plugin = LinuxBridgeQuantumAgentRPC(interface_mappings,
polling_interval,
root_helper)
LOG.info(_("Agent initialized successfully, now running... "))
plugin.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
| linvictor88/vse-lbaas-driver | quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py | Python | apache-2.0 | 29,895 | 0 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config
from lib import num_split
table = None
mode = None
next_func = None
row_css = None
def begin(table_id=None, title=None, **kwargs):
global table, mode, next_func
# Use our pagename as table id if none is specified
if table_id == None:
table_id = html.myfile
try:
limit = config.table_row_limit
except:
limit = None
limit = kwargs.get('limit', limit)
if html.var('limit') == 'none' or kwargs.get("output_format", "html") != "html":
limit = None
table = {
"id" : table_id,
"title" : title,
"headers" : [],
"collect_headers" : False, # also: True, "finished"
"rows" : [],
"limit" : limit,
"omit_if_empty" : kwargs.get("omit_if_empty", False),
"omit_headers" : kwargs.get("omit_headers", False),
"searchable" : kwargs.get("searchable", True),
"sortable" : kwargs.get("sortable", True),
"next_header" : None,
"output_format" : kwargs.get("output_format", "html"), # possible: html, csv, fetch
}
if kwargs.get("empty_text"):
table["empty_text"] = kwargs["empty_text"]
else:
table["empty_text"] = _("No entries.")
if kwargs.get("help"):
table["help"] = kwargs["help"]
if kwargs.get("css"):
table["css"] = kwargs["css"]
html.plug()
mode = 'row'
next_func = None
def finish_previous():
global next_func
if next_func:
next_func(*next_args[0], **next_args[1])
next_func = None
def row(*posargs, **kwargs):
finish_previous()
global next_func, next_args
next_func = add_row
next_args = posargs, kwargs
def add_row(css=None, state=0, collect_headers=True, fixed=False):
if table["next_header"]:
table["rows"].append((table["next_header"], None, "header", True))
table["next_header"] = None
table["rows"].append(([], css, state, fixed))
if collect_headers:
if table["collect_headers"] == False:
table["collect_headers"] = True
elif table["collect_headers"] == True:
table["collect_headers"] = "finished"
elif not collect_headers and table["collect_headers"]:
table["collect_headers"] = False
# Intermediate title, shown as soon as there is a following row.
# We store the group headers in the list of rows, with css None
# and state set to "header"
def groupheader(title):
table["next_header"] = title
def cell(*posargs, **kwargs):
finish_previous()
global next_func, next_args
next_func = add_cell
next_args = posargs, kwargs
def add_cell(title="", text="", css=None, help=None, colspan=None, sortable=True):
if type(text) != unicode:
text = str(text)
htmlcode = text + html.drain()
if table["collect_headers"] == True:
# small helper to make sorting introducion easier. Cells which contain
# buttons are never sortable
if css and 'buttons' in css and sortable:
sortable = False
table["headers"].append((title, help, sortable))
table["rows"][-1][0].append((htmlcode, css, colspan))
def end():
global table
finish_previous()
html.unplug()
if not table:
return
# Output-Format "fetch" simply means that all data is being
# returned as Python-values to be rendered somewhere else.
if table["output_format"] == "fetch":
return table["headers"], table["rows"]
if table["output_format"] == "csv":
do_csv = True
csv_separator = html.var("csv_separator", ";")
else:
do_csv = False
if not table["rows"] and table["omit_if_empty"]:
table = None
return
#html.guitest_record_output("data_tables", table)
if table["title"] and not do_csv:
html.write("<h3>%s</h3>" % table["title"])
if table.get("help") and not do_csv:
html.help(table["help"])
if not table["rows"] and not do_csv:
html.write("<div class=info>%s</div>" % table["empty_text"])
table = None
return
table_id = table['id']
rows = table["rows"]
# Controls wether or not actions are available for a table
search_term = None
actions_enabled = (table["searchable"] or table["sortable"]) and not do_csv
if actions_enabled:
user_opts = config.user.load_file("tableoptions", {})
user_opts.setdefault(table_id, {})
table_opts = user_opts[table_id]
# Handle the initial visibility of the actions
actions_visible = user_opts[table_id].get('actions_visible', False)
if html.var('_%s_actions' % table_id):
actions_visible = html.var('_%s_actions' % table_id) == '1'
user_opts[table_id]['actions_visible'] = actions_visible
if html.var('_%s_reset' % table_id):
html.del_var('_%s_search' % table_id)
if 'search' in table_opts:
del table_opts['search'] # persist
if table["searchable"]:
# Search is always lower case -> case insensitive
search_term = html.get_unicode_input('_%s_search' % table_id, table_opts.get('search', '')).lower()
if search_term:
html.set_var('_%s_search' % table_id, search_term)
table_opts['search'] = search_term # persist
filtered_rows = []
for row, css, state, fixed in rows:
if state == "header" or fixed:
filtered_rows.append((row, css, state, fixed))
continue # skip filtering of headers or fixed rows
for cell_content, css_classes, colspan in row:
if search_term in cell_content.lower():
filtered_rows.append((row, css, state, fixed))
break # skip other cells when matched
rows = filtered_rows
if html.var('_%s_reset_sorting' % table_id):
html.del_var('_%s_sort' % table_id)
if 'sort' in table_opts:
del table_opts['sort'] # persist
if table["sortable"]:
# Now apply eventual sorting settings
sort = html.var('_%s_sort' % table_id, table_opts.get('sort'))
if sort != None:
html.set_var('_%s_sort' % table_id, sort)
table_opts['sort'] = sort # persist
sort_col, sort_reverse = map(int, sort.split(',', 1))
# remove and remind fixed rows, add to separate list
fixed_rows = []
for index, row in enumerate(rows[:]):
if row[3] == True:
rows.remove(row)
fixed_rows.append((index, row))
# Then use natural sorting to sort the list. Note: due to a
# change in the number of columns of a table in different software
# versions the cmp-function might fail. This is because the sorting
# column is persisted in a user file. So we ignore exceptions during
# sorting. This gives the user the chance to change the sorting and
# see the table in the first place.
try:
rows.sort(cmp=lambda a, b: cmp(num_split(a[0][sort_col][0]),
num_split(b[0][sort_col][0])),
reverse=sort_reverse==1)
except IndexError:
pass
# Now re-add the removed "fixed" rows to the list again
if fixed_rows:
for index, row in fixed_rows:
rows.insert(index, row)
num_rows_unlimited = len(rows)
num_cols = len(table["headers"])
# Apply limit after search / sorting etc.
limit = table['limit']
if limit is not None:
rows = rows[:limit]
if not do_csv:
html.write('<table class="data oddeven')
if "css" in table:
html.write(" %s" % table["css"])
html.write('">\n')
def render_headers():
if table["omit_headers"]:
return
if do_csv:
html.write(csv_separator.join([html.strip_tags(header) or "" for (header, help, sortable) in table["headers"]]) + "\n")
else:
html.write(" <tr>")
first_col = True
for nr, (header, help, sortable) in enumerate(table["headers"]):
text = header
if help:
header = '<span title="%s">%s</span>' % (html.attrencode(help), header)
if not table["sortable"] or not sortable:
html.write(" <th>")
else:
reverse = 0
sort = html.var('_%s_sort' % table_id)
if sort:
sort_col, sort_reverse = map(int, sort.split(',', 1))
if sort_col == nr:
reverse = sort_reverse == 0 and 1 or 0
html.write(" <th class=\"sort\" title=\"%s\" onclick=\"location.href='%s'\">" %
(_('Sort by %s') % text, html.makeactionuri([('_%s_sort' % table_id, '%d,%d' % (nr, reverse))])))
# Add the table action link
if first_col:
first_col = False
if actions_enabled:
if not header:
header = " " # Fixes layout problem with white triangle
if actions_visible:
state = '0'
help = _('Hide table actions')
img = 'table_actions_on'
else:
state = '1'
help = _('Display table actions')
img = 'table_actions_off'
html.write("<div class=\"toggle_actions\">")
html.icon_button(html.makeuri([('_%s_actions' % table_id, state)]),
help, img, cssclass = 'toggle_actions')
html.write("<span>%s</span>" % header)
html.write("</div>")
else:
html.write(header)
else:
html.write(header)
html.write("</th>\n")
html.write(" </tr>\n")
# If we have no group headers then paint the headers now
if table["rows"] and table["rows"][0][2] != "header":
render_headers()
if actions_enabled and actions_visible and not do_csv:
html.write('<tr class="data even0 actions"><td colspan=%d>' % num_cols)
if not html.in_form():
html.begin_form("%s_actions" % table_id)
if table["searchable"]:
html.write("<div class=search>")
html.text_input("_%s_search" % table_id)
html.button("_%s_submit" % table_id, _("Search"))
html.button("_%s_reset" % table_id, _("Reset search"))
html.set_focus("_%s_search" % table_id)
html.write("</div>\n")
if html.has_var('_%s_sort' % table_id):
html.write("<div class=sort>")
html.button("_%s_reset_sorting" % table_id, _("Reset sorting"))
html.write("</div>\n")
if not html.in_form():
html.begin_form("%s_actions" % table_id)
html.hidden_fields()
html.end_form()
html.write('</tr>')
odd = "even"
for nr, (row, css, state, fixed) in enumerate(rows):
if do_csv:
html.write(csv_separator.join([html.strip_tags(cell_content) for cell_content, css_classes, colspan in row ]))
html.write("\n")
else: # HTML output
# Intermediate header
if state == "header":
# Show the header only, if at least one (non-header) row follows
if nr < len(rows) - 1 and rows[nr+1][2] != "header":
html.write(' <tr class="groupheader"><td colspan=%d><br>%s</td></tr>' % (num_cols, row))
odd = "even"
render_headers()
continue
odd = odd == "odd" and "even" or "odd"
html.write(' <tr class="data %s%d' % (odd, state))
if css:
html.write(' %s' % css)
html.write('">\n')
for cell_content, css_classes, colspan in row:
colspan = colspan and (' colspan="%d"' % colspan) or ''
html.write(" <td%s%s>" % (css_classes and (" class='%s'" % css_classes) or "", colspan))
html.write(cell_content)
html.write("</td>\n")
html.write("</tr>\n")
if table["searchable"] and search_term and not rows and not do_csv:
html.write('<tr class="data odd0 no_match"><td colspan=%d>%s</td></tr>' %
(num_cols, _('Found no matching rows. Please try another search term.')))
if not do_csv:
html.write("</table>\n")
if limit is not None and num_rows_unlimited > limit and not do_csv:
html.message(_('This table is limited to show only %d of %d rows. '
'Click <a href="%s">here</a> to disable the limitation.') %
(limit, num_rows_unlimited, html.makeuri([('limit', 'none')])))
if actions_enabled and not do_csv:
config.user.save_file("tableoptions", user_opts)
table = None
| ypid-bot/check_mk | web/htdocs/table.py | Python | gpl-2.0 | 15,132 | 0.005155 |
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from apps.referencepool.models import *
import requests
import json
import os
__author__ = 'fki'
class Command(BaseCommand):
help = 'Harvest external resources to fill the Reference Pool'
def handle(self, *args, **options):
if args:
for a in args:
try:
func = getattr(self, '_harvest_' + a)
except AttributeError:
self.stdout.write('No such Harvester')
func()
else:
self.stdout.write('Harvesting everything')
for f in dir(self):
if f.startswith('_harvest_'):
getattr(self, f)()
def _harvest_languages(self):
self.stdout.write('Harvesting Languages')
url = 'http://data.okfn.org/data/core/language-codes/r/language-codes.json'
result = json.loads((requests.get(url)).text)
for lang in result:
try:
l = Language(code=lang['alpha2'], title=lang['English'])
l.save()
except IntegrityError:
pass
self.stdout.write('Successfully Harvested Languages')
def _harvest_countries(self):
self.stdout.write('Harvesting Countries')
url = 'http://data.okfn.org/data/core/country-codes/r/country-codes.json'
result = json.loads((requests.get(url)).text)
country_class = DataClass.objects.get(title='Country')
for country in result:
try:
c = Individual(data_class=country_class, title=country['name'],
code=country['ISO3166-1-Alpha-3'])
c.save()
except IntegrityError:
pass
def _harvest_external_resources(self):
self.stdout.write('Harvesting External Resources')
result = self._file_to_json('../../resources/open-data-monitor.json')
for resource in result:
try:
name = result[resource]['col_1'].replace('_', '.').replace('-',
'.')
url = 'http://' + name
r = ExternalResource(title=name, url=url, api_url=url)
r.save()
except IntegrityError:
pass
def _file_to_json(self, rel_path):
dir = os.path.dirname(__file__)
abs_path = os.path.join(dir, rel_path)
with open(abs_path, "r") as file:
data = json.load(file)
return data
| policycompass/policycompass-services | apps/referencepool/management/commands/harvest.py | Python | agpl-3.0 | 2,610 | 0.001149 |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad units in a publisher ad client.
To get ad clients, run get_all_ad_clients_for_publisher.py.
Tags: accounts.adunits.list
"""
__author__ = 'jalc@google.com (Jose Alcerreca)'
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'account_id',
help='The ID of the pub account on which the ad unit exists')
argparser.add_argument(
'ad_client_id',
help='The ID of the ad client on which the ad unit exists')
MAX_PAGE_SIZE = 50
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'adsensehost', 'v4.1', __doc__, __file__, parents=[argparser])
ad_client_id = flags.ad_client_id
account_id = flags.account_id
try:
# Retrieve ad unit list in pages and display data as we receive it.
request = service.accounts().adunits().list(adClientId=ad_client_id,
accountId=account_id,
maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
if 'items' in result:
ad_units = result['items']
for ad_unit in ad_units:
print ('Ad unit with ID "%s", code "%s", name "%s" and status "%s" '
'was found.' %
(ad_unit['id'], ad_unit['code'], ad_unit['name'],
ad_unit['status']))
request = service.accounts().adunits().list_next(request, result)
else:
print 'No ad units were found.'
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| iLotus/googleads-adsensehost-examples | python/v4.x/get_all_ad_units_for_publisher.py | Python | apache-2.0 | 2,497 | 0.005607 |
import sys, os
from main import app
from flask_script import Manager, Server, Command, Option
from flask_security.utils import encrypt_password
from models import db, populate_db, StatusData, GrowthData, LifeData, GrowthDataAverages
from main import app
import random
from datetime import date, datetime
import pandas
from tqdm import tqdm
from dateutil import parser
from sqlalchemy import desc
class ResetDB(Command):
"""Drops all tables and recreates them"""
def run(self, **kwargs):
db.drop_all()
db.create_all()
class PopulateDB(Command):
option_list = (
Option('--file', '-f', dest='user_data_file', default='scripts/user_data.csv'),
)
"""Fills in predefined data into DB"""
def run(self, user_data_file, **kwargs):
print("Complete")
populate_db()
def parse_float(val):
try:
float(val)
return float(val)
except ValueError:
if val in (".", ""):
return None
print(val)
raise Exception
class ConvertAll(Command):
def run(self):
for animal in GrowthData.query.all():
animal.weight = animal.weight*0.453592 if isinstance(animal.weight, (int, float)) else None
animal.height = animal.height*2.54 if isinstance(animal.height, (int, float)) else None
animal.lifetime_adg = animal.lifetime_adg*0.453592 if isinstance(animal.lifetime_adg, (int, float)) else None
animal.monthly_adg = animal.monthly_adg*0.453592 if isinstance(animal.monthly_adg, (int, float)) else None
animal.monthly_height_change = (animal.monthly_height_change/10) * 25.4 if isinstance(animal.monthly_height_change, (int, float)) else None
db.session.commit()
print("GrowthData converted")
for animal in LifeData.query.all():
animal.bwt = animal.bwt*0.453592 if isinstance(animal.bwt, (int, float)) else None
db.session.commit()
print("LifeData converted")
class ProcessLifeData(Command):
option_list = (
Option('--file', '-f', dest='full_filename', default='data/lifeData.csv'),
)
def run(self, full_filename):
data = pandas.read_csv(full_filename)
data = data.dropna()
# Removes the first two rows of not data
data = data.ix[4:]
# Labels the columns as follows (so columns MUST BE IN THIS ORDER)
data.columns = ['FID', 'EID', 'Breed', 'DOB']
#app.logger.info(data)
for index, row in data.iterrows():
life = LifeData.query.filter_by(fid=row['FID']).first()
if life is None:
life = LifeData(fid=row['FID'], eid=row['EID'], breed=row['Breed'], dob=parser.parse(row['DOB']))
db.session.add(life)
else:
life.dob=parser.parse(row['DOB'])
life.breed=row['Breed']
life.eid=row['EID']
# Add won't happen without it
db.session.commit()
class ProcessGrowthData(Command):
option_list = (
Option('--file', '-f', dest='full_filename', default='data/growthData.xlsx'),
)
def run(self, full_filename):
data = pandas.read_excel(full_filename)
data = data.set_index('Index')
status_data, growth_data_old, growth_data_new = data.ix[:, :6], data.ix[:, 6:158], data.ix[:, 158:]
# print(growth_data_old.index)
for index, row in tqdm(status_data.iterrows()):
status = StatusData(fid=int(index), status=row['Status'], status_date=None if pandas.isnull(row['Date']) else row['Date'])
db.session.add(status)
life = LifeData.query.filter_by(fid=int(index)).first()
if life is None:
life = LifeData(fid=int(index), bwt=row['BWt'], dob=row['Birthdate'], breed=row['Brd'], estimate=True if type(row['Estimate']) is unicode else False)
db.session.add(life)
else:
if life.bwt is None:
life.bwt = row['BWt']
life.dob = row['Birthdate']
life.breed = row['Brd']
life.estimate = True if type(row['Estimate']) is unicode else False
db.session.commit()
growth_data_old.columns = pandas.MultiIndex.from_tuples([(c[:-1], c[-1]) for c in growth_data_old.columns])
for row_name, row in tqdm(growth_data_old.iterrows()):
row = row.where((pandas.notnull(row)), None)
for date_name, weight_data in row.unstack().iterrows():
weight = weight_data['W'] if type(weight_data['W']) == int or type(weight_data['W']) == float else None
date = weight_data['D'] if type(weight_data['D']) != pandas.tslib.NaTType else None
location = weight_data['L'] if type(weight_data['L']) != pandas.tslib.NaTType else None
height = weight_data['H'] if type(weight_data['H']) != pandas.tslib.NaTType else None
# print(row_name, weight, date, location, height)
if weight is None:
continue
measurement = GrowthData.new(fid=int(row_name), date=date, weight=weight, height=parse_float(height) if height is not None else height, location=location)
db.session.add(measurement)
# print("Adding weighing "+str(row_name)+", "+date_name+":", weight_data.get('D', date_name), weight_data['L'], weight_data['W'], weight_data['H'])
db.session.commit()
growth_data_new.columns = pandas.MultiIndex.from_tuples([(c[:-1], c[-1]) for c in growth_data_new.columns])
for row_name, row in tqdm(growth_data_new.iterrows()):
row = row.where((pandas.notnull(row)), None)
for date_name, weight_data in row.unstack().iterrows():
date = datetime.strptime(date_name, '%y%m%d').date()
weight = weight_data['W'] if type(weight_data['W']) == int or type(weight_data['W']) == float else None
location = weight_data['L'] if type(weight_data['L']) != pandas.tslib.NaTType else None
bcs = weight_data['C']
# print(type(bcs))
height = weight_data['H'] if type(weight_data['H']) != pandas.tslib.NaTType else None
#print(row_name, weight, date, location, height)
if weight is None:
continue
measurement = GrowthData.new(fid=int(row_name), bcs=parse_float(bcs) if bcs is not None else bcs, location=location, date=date, weight=weight, height=parse_float(height) if height is not None else height)
db.session.add(measurement)
# print("Adding weighing "+str(row_name)+", "+date_name+":", weight_data['C'], weight_data.get('D', date_name), weight_data['L'], weight_data['W'], weight_data['H'])
db.session.commit()
class CalculateGrowthAverageData(Command):
def run(self):
fids = db.session.query(GrowthData.fid).distinct()
for fid in tqdm(fids):
fid_data = db.session.query(GrowthData).filter(GrowthData.fid == fid.fid).order_by(desc(GrowthData.date)).all()
today = fid_data[0]
growth_averages = GrowthDataAverages.query.filter_by(fid=int(fid.fid)).first()
life_data = LifeData.query.filter_by(fid=int(fid.fid)).first()
if len(fid_data) > 1:
previous = fid_data[1]
time_dif = today.date - previous.date
time_dif = time_dif.days
monthly_weight_dif = float(today.weight - previous.weight)
monthly_adg = float(monthly_weight_dif/time_dif)
if previous.height is not None and today.height is not None:
monthly_height_dif = float(today.height - previous.height)
monthly_height_change = float(monthly_height_dif/time_dif)
else:
monthly_height_change = None
age = today.date - life_data.dob
age = age.days
lifetime_weight_dif = float(today.weight - life_data.bwt)
lifetime_adg = float(lifetime_weight_dif/age)
if growth_averages is None:
growth_averages = GrowthDataAverages(fid=int(fid.fid), most_recent_date=today.date, monthly_adg=monthly_adg, age=age, lifetime_adg=lifetime_adg, monthly_height_change=monthly_height_change)
db.session.add(growth_averages)
else:
growth_averages.most_recent_date = today.date
growth_averages.monthly_adg = monthly_adg
growth_averages.age = age
growth_averages.lifetime_adg = lifetime_adg
growth_averages.monthly_height_change = monthly_height_change
else:
time_dif = 0
db.session.commit()
class DisplayDB(Command):
def run(self, **kwargs):
from sqlalchemy import MetaData
from sqlalchemy_schemadisplay3 import create_schema_graph
connection = app.config['SQLALCHEMY_DATABASE_URI']
filename='dbschema.png'
graph = create_schema_graph(metadata=MetaData(connection),
show_datatypes=False, # The image would get nasty big if we'd show the datatypes
show_indexes=False, # ditto for indexes
rankdir='BT', # From left to right (instead of top to bottom)
font='Helvetica',
concentrate=False # Don't try to join the relation lines together
)
graph.write_png(filename) # write out the file
manager = Manager(app)
# Server commands context
#manager.add_command("secure", Server(ssl_context=context))
# Database Commands
manager.add_command("reset_db", ResetDB())
manager.add_command("populate_db", PopulateDB())
manager.add_command("display_db", DisplayDB())
manager.add_command("process_growth_data", ProcessGrowthData())
manager.add_command("process_life_data", ProcessLifeData())
manager.add_command("convert_all", ConvertAll())
manager.add_command("calculate_growth_averages", CalculateGrowthAverageData())
if __name__ == "__main__":
manager.run()
| ElBell/VTDairyDB | manage.py | Python | gpl-3.0 | 10,171 | 0.005309 |
# coding: utf-8
# Copyright 2015 rpaas authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from rpaas import plan, storage
class MongoDBStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage = storage.MongoDBStorage()
self.storage.db[self.storage.quota_collection].remove()
self.storage.db[self.storage.plans_collection].remove()
self.storage.db[self.storage.plans_collection].insert(
{"_id": "small",
"description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}}
)
self.storage.db[self.storage.plans_collection].insert(
{"_id": "huge",
"description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}}
)
def test_set_team_quota(self):
q = self.storage.set_team_quota("myteam", 8)
used, quota = self.storage.find_team_quota("myteam")
self.assertEqual([], used)
self.assertEqual(8, quota)
self.assertEqual(used, q["used"])
self.assertEqual(quota, q["quota"])
def test_list_plans(self):
plans = self.storage.list_plans()
expected = [
{"name": "small", "description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}},
{"name": "huge", "description": "some cool huge plan",
"config": {"serviceofferingid": "abcdef123459"}},
]
self.assertEqual(expected, [p.to_dict() for p in plans])
def test_find_plan(self):
plan = self.storage.find_plan("small")
expected = {"name": "small", "description": "some cool plan",
"config": {"serviceofferingid": "abcdef123456"}}
self.assertEqual(expected, plan.to_dict())
with self.assertRaises(storage.PlanNotFoundError):
self.storage.find_plan("something that doesn't exist")
def test_store_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
got_plan = self.storage.find_plan(p.name)
self.assertEqual(p.to_dict(), got_plan.to_dict())
def test_store_plan_duplicate(self):
p = plan.Plan(name="small", description="small thing",
config={"serviceofferingid": "abcdef123"})
with self.assertRaises(storage.DuplicateError):
self.storage.store_plan(p)
def test_update_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.update_plan(p.name, description="wat?",
config={"serviceofferingid": "abcdef123459"})
p = self.storage.find_plan(p.name)
self.assertEqual("super_huge", p.name)
self.assertEqual("wat?", p.description)
self.assertEqual({"serviceofferingid": "abcdef123459"}, p.config)
def test_update_plan_partial(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.update_plan(p.name, config={"serviceofferingid": "abcdef123459"})
p = self.storage.find_plan(p.name)
self.assertEqual("super_huge", p.name)
self.assertEqual("very huge thing", p.description)
self.assertEqual({"serviceofferingid": "abcdef123459"}, p.config)
def test_update_plan_not_found(self):
with self.assertRaises(storage.PlanNotFoundError):
self.storage.update_plan("my_plan", description="woot")
def test_delete_plan(self):
p = plan.Plan(name="super_huge", description="very huge thing",
config={"serviceofferingid": "abcdef123"})
self.storage.store_plan(p)
self.storage.delete_plan(p.name)
with self.assertRaises(storage.PlanNotFoundError):
self.storage.find_plan(p.name)
def test_delete_plan_not_found(self):
with self.assertRaises(storage.PlanNotFoundError):
self.storage.delete_plan("super_huge")
def test_instance_metadata_storage(self):
self.storage.store_instance_metadata("myinstance", plan="small")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertEqual({"_id": "myinstance",
"plan": "small"}, inst_metadata)
self.storage.store_instance_metadata("myinstance", plan="medium")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertEqual({"_id": "myinstance", "plan": "medium"}, inst_metadata)
self.storage.remove_instance_metadata("myinstance")
inst_metadata = self.storage.find_instance_metadata("myinstance")
self.assertIsNone(inst_metadata)
| vfiebig/rpaas | tests/test_storage.py | Python | bsd-3-clause | 5,053 | 0.000396 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/vest/shared_vest_s03.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","vest_s03")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/wearables/vest/shared_vest_s03.py | Python | mit | 450 | 0.046667 |
import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATICFILES_URL setting, and serves those files.
"""
def __init__(self, application, media_dir=None):
self.application = application
if media_dir:
self.media_dir = media_dir
else:
self.media_dir = self.get_media_dir()
self.media_url = urlparse(self.get_media_url())
if settings.DEBUG:
utils.check_settings()
super(StaticFilesHandler, self).__init__()
def get_media_dir(self):
return settings.STATICFILES_ROOT
def get_media_url(self):
return settings.STATICFILES_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the media_url
* the request's path isn't under the media path (or equal)
* settings.DEBUG isn't True
"""
return (self.media_url[2] != path and
path.startswith(self.media_url[2]) and not self.media_url[1])
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``media_url``. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ``media_url``.
relative_url = url[len(self.media_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| writefaruq/lionface-app | django/contrib/staticfiles/handlers.py | Python | bsd-3-clause | 2,733 | 0.001098 |
import json
import hashlib
import uuid
import datetime
from valley.exceptions import ValidationException
from kev.utils import get_doc_type
from kev.query import SortingParam
class DocDB(object):
db_class = None
indexer_class = None
backend_id = None
doc_id_string = '{doc_id}:id:{backend_id}:{class_name}'
index_id_string = ''
def save(self, doc_obj):
raise NotImplementedError
def delete(self, doc_obj):
raise NotImplementedError
def get(self, doc_obj, doc_id):
raise NotImplementedError
def parse_id(self, doc_id):
try:
return doc_id.split(':')[0]
except TypeError:
return doc_id.decode().split(':')[0]
def create_pk(self, doc_obj,doc):
doc = doc.copy()
doc['_date'] = str(datetime.datetime.now())
doc['_uuid'] = str(uuid.uuid4())
hash_pk = hashlib.md5(bytes(json.dumps(doc),'utf-8')).hexdigest()[:10]
doc_obj.set_pk(self.doc_id_string.format(doc_id=hash_pk,
backend_id=self.backend_id, class_name=doc_obj.get_class_name()))
return doc_obj
def check_unique(self, doc_obj, key, value):
obj = doc_obj.objects().filter({key: value})
if len(obj) == 0:
return True
if hasattr(doc_obj, '_id') and len(obj) == 1:
if doc_obj._id == obj[0]._id:
return True
raise ValidationException(
'There is already a {key} with the value of {value}'
.format(key=key, value=value))
def prep_doc(self, doc_obj):
"""
This method Validates, gets the Python value, checks unique indexes,
gets the db value, and then returns the prepared doc dict object.
Useful for save and backup functions.
@param doc_obj:
@return:
"""
doc = doc_obj._data.copy()
for key, prop in list(doc_obj._base_properties.items()):
prop.validate(doc.get(key), key)
raw_value = prop.get_python_value(doc.get(key))
if prop.unique:
self.check_unique(doc_obj, key, raw_value)
value = prop.get_db_value(raw_value)
doc[key] = value
doc['_doc_type'] = get_doc_type(doc_obj.__class__)
return doc
def _save(self, doc_obj):
doc = self.prep_doc(doc_obj)
if '_id' not in doc:
self.create_pk(doc_obj,doc)
doc['_id'] = doc_obj._id
return (doc_obj, doc)
def get_id_list(self, filters_list):
l = self.parse_filters(filters_list)
if len(l) == 1:
return self._indexer.smembers(l[0])
else:
return self._indexer.sinter(*l)
def parse_filters(self, filters):
s = set()
for f in filters:
if '*' in f:
s.update(self._indexer.scan_iter(f))
else:
s.add(f)
if not s:
return filters
return list(s)
def sort(self, sortingp_list, docs_list, doc_class):
for sortingp in sortingp_list:
if sortingp.key not in doc_class._base_properties:
raise ValueError("Field '%s' doesn't exists in a document" % sortingp.key)
sorted_list = list(docs_list)
# check if a list can be sorted by serveral attributes with one function call
if SortingParam.needs_multiple_passes(sortingp_list):
for sortingp in sortingp_list:
sorted_list = sorted(sorted_list, key=lambda x: getattr(x, sortingp.key),
reverse=sortingp.reverse)
else:
sorted_list = sorted(sorted_list, key=SortingParam.attr_sort(sortingp_list),
reverse=sortingp_list[0].reverse)
return sorted_list
| capless/kev | kev/backends/__init__.py | Python | gpl-3.0 | 3,810 | 0.003412 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""OpenMM Reporter for saving the positions of a molecular dynamics simulation
in the HDF5 format.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
from mdtraj.formats.hdf5 import HDF5TrajectoryFile
from mdtraj.reporters.basereporter import _BaseReporter
##############################################################################
# Classes
##############################################################################
class HDF5Reporter(_BaseReporter):
"""HDF5Reporter stores a molecular dynamics trajectory in the HDF5 format.
This object supports saving all kinds of information from the simulation --
more than any other trajectory format. In addition to all of the options,
the topology of the system will also (of course) be stored in the file. All
of the information is compressed, so the size of the file is not much
different than DCD, despite the added flexibility.
Parameters
----------
file : str, or HDF5TrajectoryFile
Either an open HDF5TrajecoryFile object to write to, or a string
specifying the filename of a new HDF5 file to save the trajectory to.
reportInterval : int
The interval (in time steps) at which to write frames.
coordinates : bool
Whether to write the coordinates to the file.
time : bool
Whether to write the current time to the file.
cell : bool
Whether to write the current unit cell dimensions to the file.
potentialEnergy : bool
Whether to write the potential energy to the file.
kineticEnergy : bool
Whether to write the kinetic energy to the file.
temperature : bool
Whether to write the instantaneous temperature to the file.
velocities : bool
Whether to write the velocities to the file.
atomSubset : array_like, default=None
Only write a subset of the atoms, with these (zero based) indices
to the file. If None, *all* of the atoms will be written to disk.
enforcePeriodicBox: bool or None
Specifies whether particle positions should be translated so the
center of every molecule lies in the same periodic box. If None
(the default), it will automatically decide whether to translate
molecules based on whether the system being simulated uses periodic
boundary conditions.
Notes
-----
If you use the ``atomSubset`` option to write only a subset of the atoms
to disk, the ``kineticEnergy``, ``potentialEnergy``, and ``temperature``
fields will not change. They will still refer to the energy and temperature
of the *whole* system, and are not "subsetted" to only include the energy
of your subsystem.
Examples
--------
>>> simulation = Simulation(topology, system, integrator)
>>> h5_reporter = HDF5Reporter('traj.h5', 100)
>>> simulation.reporters.append(h5_reporter)
>>> simulation.step(10000)
>>> traj = mdtraj.trajectory.load('traj.lh5')
"""
@property
def backend(self):
return HDF5TrajectoryFile
def __init__(self, file, reportInterval, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True,
temperature=True, velocities=False, atomSubset=None,
enforcePeriodicBox=None):
"""Create a HDF5Reporter.
"""
super(HDF5Reporter, self).__init__(file, reportInterval,
coordinates, time, cell, potentialEnergy, kineticEnergy,
temperature, velocities, atomSubset,
enforcePeriodicBox)
| dwhswenson/mdtraj | mdtraj/reporters/hdf5reporter.py | Python | lgpl-2.1 | 4,749 | 0.000632 |
# -*- coding: utf-8 -*-
#
# Nagare documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 29 15:07:51 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
#import os
#import sys
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.githubpages', 'sphinxcontrib.mermaid']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nagare'
copyright = u'2017, Net-ng'
author = u'Alain Poirier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_style = 'theme.css'
html_favicon = '_static/favicon.ico'
html_logo = '_static/logo.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = dict(
logo_only=True,
collapse_navigation=True,
prev_next_buttons_location='bottom',
display_version=False,
sticky_navigation=False
)
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
html_show_copyright = False
html_show_sphinx = False
#html_use_index = False
html_show_sourcelink = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nagaredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nagare.tex', u'Nagare Documentation',
u'Alain Poirier', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nagare', u'Nagare Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Nagare', u'Nagare Documentation',
author, 'Nagare', 'One line description of project.',
'Miscellaneous'),
]
| nagareproject/core | doc/conf.py | Python | bsd-3-clause | 5,554 | 0.001801 |
def main():
"""Instantiate a DockerStats object and collect stats."""
print('Docker Service Module')
if __name__ == '__main__':
main()
| gomex/docker-zabbix | docker_service/__init__.py | Python | gpl-3.0 | 148 | 0.006757 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.