text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
"""Simple PNG Canvas for Python"""
__version__ = "0.8"
__author__ = "Rui Carmo (http://the.taoofmac.com)"
__copyright__ = "CC Attribution-NonCommercial-NoDerivs 2.0 Rui Carmo"
__contributors__ = ["http://collaboa.weed.rbse.com/repository/file/branches/pgsql/lib/spark_pr.rb"], ["Eli Bendersky"]
import zlib, struct
signature = struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)
# alpha blends two colors, using the alpha given by c2
def blend(c1, c2):
return [c1[i]*(0xFF-c2[3]) + c2[i]*c2[3] >> 8 for i in range(3)]
# calculate a new alpha given a 0-0xFF intensity
def intensity(c,i):
return [c[0],c[1],c[2],(c[3]*i) >> 8]
# calculate perceptive grayscale value
def grayscale(c):
return int(c[0]*0.3 + c[1]*0.59 + c[2]*0.11)
# calculate gradient colors
def gradientList(start,end,steps):
delta = [end[i] - start[i] for i in range(4)]
grad = []
for i in range(steps+1):
grad.append([start[j] + (delta[j]*i)/steps for j in range(4)])
return grad
class PNGCanvas:
def __init__(self, width, height,bgcolor=[0xff,0xff,0xff,0xff],color=[0,0,0,0xff]):
self.canvas = []
self.width = width
self.height = height
self.color = color #rgba
bgcolor = bgcolor[0:3] # we don't need alpha for background
for i in range(height):
self.canvas.append([bgcolor] * width)
def point(self,x,y,color=None):
if x<0 or y<0 or x>self.width-1 or y>self.height-1: return
if color == None: color = self.color
self.canvas[y][x] = blend(self.canvas[y][x],color)
def _rectHelper(self,x0,y0,x1,y1):
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
return [x0,y0,x1,y1]
def verticalGradient(self,x0,y0,x1,y1,start,end):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
grad = gradientList(start,end,y1-y0)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,grad[y-y0])
def rectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
self.polyline([[x0,y0],[x1,y0],[x1,y1],[x0,y1],[x0,y0]])
def filledRectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,self.color)
def copyRect(self,x0,y0,x1,y1,dx,dy,destination):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
destination.canvas[dy+y-y0][dx+x-x0] = self.canvas[y][x]
def blendRect(self,x0,y0,x1,y1,dx,dy,destination,alpha=0xff):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
rgba = self.canvas[y][x] + [alpha]
destination.point(dx+x-x0,dy+y-y0,rgba)
# draw a line using Xiaolin Wu's antialiasing technique
def line(self,x0, y0, x1, y1):
# clean params
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if y0>y1:
y0, y1, x0, x1 = y1, y0, x1, x0
dx = x1-x0
if dx < 0:
sx = -1
else:
sx = 1
dx *= sx
dy = y1-y0
# 'easy' cases
if dy == 0:
for x in range(x0,x1,sx):
self.point(x, y0)
return
if dx == 0:
for y in range(y0,y1):
self.point(x0, y)
self.point(x1, y1)
return
if dx == dy:
for x in range(x0,x1,sx):
self.point(x, y0)
y0 = y0 + 1
return
# main loop
self.point(x0, y0)
e_acc = 0
if dy > dx: # vertical displacement
e = (dx << 16) / dy
for i in range(y0,y1-1):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
x0 = x0 + sx
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
y0 = y0 + 1
self.point(x0 + sx, y0, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
return
# horizontal displacement
e = (dy << 16) / dx
for i in range(x0,x1-sx,sx):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
y0 = y0 + 1
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
x0 = x0 + sx
self.point(x0, y0 + 1, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
def polyline(self,arr):
for i in range(0,len(arr)-1):
self.line(arr[i][0],arr[i][1],arr[i+1][0], arr[i+1][1])
def dump(self):
raw_list = []
for y in range(self.height):
raw_list.append(chr(0)) # filter type 0 (None)
for x in range(self.width):
raw_list.append(struct.pack("!3B",*self.canvas[y][x]))
raw_data = ''.join(raw_list)
# 8-bit image represented as RGB tuples
# simple transparency, alpha is pure white
return signature + \
self.pack_chunk('IHDR', struct.pack("!2I5B",self.width,self.height,8,2,0,0,0)) + \
self.pack_chunk('tRNS', struct.pack("!6B",0xFF,0xFF,0xFF,0xFF,0xFF,0xFF)) + \
self.pack_chunk('IDAT', zlib.compress(raw_data,9)) + \
self.pack_chunk('IEND', '')
def pack_chunk(self,tag,data):
to_check = tag + data
return struct.pack("!I",len(data)) + to_check + struct.pack("!I", zlib.crc32(to_check) & 0xFFFFFFFF)
def load(self,f):
assert f.read(8) == signature
self.canvas=[]
for tag, data in self.chunks(f):
if tag == "IHDR":
( width,
height,
bitdepth,
colortype,
compression, filter, interlace ) = struct.unpack("!2I5B",data)
self.width = width
self.height = height
if (bitdepth,colortype,compression, filter, interlace) != (8,2,0,0,0):
raise TypeError('Unsupported PNG format')
# we ignore tRNS because we use pure white as alpha anyway
elif tag == 'IDAT':
raw_data = zlib.decompress(data)
rows = []
i = 0
for y in range(height):
filtertype = ord(raw_data[i])
i = i + 1
cur = [ord(x) for x in raw_data[i:i+width*3]]
if y == 0:
rgb = self.defilter(cur,None,filtertype)
else:
rgb = self.defilter(cur,prev,filtertype)
prev = cur
i = i+width*3
row = []
j = 0
for x in range(width):
pixel = rgb[j:j+3]
row.append(pixel)
j = j + 3
self.canvas.append(row)
def defilter(self,cur,prev,filtertype,bpp=3):
if filtertype == 0: # No filter
return cur
elif filtertype == 1: # Sub
xp = 0
for xc in range(bpp,len(cur)):
cur[xc] = (cur[xc] + cur[xp]) % 256
xp = xp + 1
elif filtertype == 2: # Up
for xc in range(len(cur)):
cur[xc] = (cur[xc] + prev[xc]) % 256
elif filtertype == 3: # Average
xp = 0
for xc in range(len(cur)):
cur[xc] = (cur[xc] + (cur[xp] + prev[xc])/2) % 256
xp = xp + 1
elif filtertype == 4: # Paeth
xp = 0
for i in range(bpp):
cur[i] = (cur[i] + prev[i]) % 256
for xc in range(bpp,len(cur)):
a = cur[xp]
b = prev[xc]
c = prev[xp]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
value = a
elif pb <= pc:
value = b
else:
value = c
cur[xc] = (cur[xc] + value) % 256
xp = xp + 1
else:
raise TypeError('Unrecognized scanline filter type')
return cur
def chunks(self,f):
while 1:
try:
length = struct.unpack("!I",f.read(4))[0]
tag = f.read(4)
data = f.read(length)
crc = struct.unpack("!i",f.read(4))[0]
except:
return
if zlib.crc32(tag + data) != crc:
raise IOError
yield [tag,data]
if __name__ == '__main__':
width = 128
height = 64
print "Creating Canvas..."
c = PNGCanvas(width,height)
c.color = [0xff,0,0,0xff]
c.rectangle(0,0,width-1,height-1)
print "Generating Gradient..."
c.verticalGradient(1,1,width-2, height-2,[0xff,0,0,0xff],[0x20,0,0xff,0x80])
print "Drawing Lines..."
c.color = [0,0,0,0xff]
c.line(0,0,width-1,height-1)
c.line(0,0,width/2,height-1)
c.line(0,0,width-1,height/2)
# Copy Rect to Self
print "Copy Rect"
c.copyRect(1,1,width/2-1,height/2-1,0,height/2,c)
# Blend Rect to Self
print "Blend Rect"
c.blendRect(1,1,width/2-1,height/2-1,width/2,0,c)
# Write test
print "Writing to file..."
f = open("test.png", "wb")
f.write(c.dump())
f.close()
# Read test
print "Reading from file..."
f = open("test.png", "rb")
c.load(f)
f.close()
# Write back
print "Writing to new file..."
f = open("recycle.png","wb")
f.write(c.dump())
f.close()
| ybak/myblog | app/pngcanvas.py | Python | mit | 9,000 | 0.037889 |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import subprocess
import sys
import os.path as p
import glob
PY_MAJOR, PY_MINOR = sys.version_info[ 0 : 2 ]
if not ( ( PY_MAJOR == 2 and PY_MINOR >= 6 ) or
( PY_MAJOR == 3 and PY_MINOR >= 3 ) or
PY_MAJOR > 3 ):
sys.exit( 'YouCompleteMe requires Python >= 2.6 or >= 3.3; '
'your version of Python is ' + sys.version )
DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )
DIR_OF_OLD_LIBS = p.join( DIR_OF_THIS_SCRIPT, 'python' )
def CheckCall( args, **kwargs ):
try:
subprocess.check_call( args, **kwargs )
except subprocess.CalledProcessError as error:
sys.exit( error.returncode )
def Main():
build_file = p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'ycmd', 'build.py' )
if not p.isfile( build_file ):
sys.exit(
'File {0} does not exist; you probably forgot to run:\n'
'\tgit submodule update --init --recursive\n'.format( build_file ) )
CheckCall( [ sys.executable, build_file ] + sys.argv[ 1: ] )
# Remove old YCM libs if present so that YCM can start.
old_libs = (
glob.glob( p.join( DIR_OF_OLD_LIBS, '*ycm_core.*' ) ) +
glob.glob( p.join( DIR_OF_OLD_LIBS, '*ycm_client_support.*' ) ) +
glob.glob( p.join( DIR_OF_OLD_LIBS, '*clang*.*') ) )
for lib in old_libs:
os.remove( lib )
if __name__ == "__main__":
Main()
| sunchuanleihit/vimrc | sources_non_forked/YouCompleteMe/install.py | Python | mit | 1,500 | 0.040667 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
int_or_none,
unified_timestamp,
url_or_none,
)
class DctpTvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dctp\.tv/(?:#/)?filme/(?P<id>[^/?#&]+)'
_TESTS = [{
# 4x3
'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/',
'info_dict': {
'id': '95eaa4f33dad413aa17b4ee613cccc6c',
'display_id': 'videoinstallation-fuer-eine-kaufhausfassade',
'ext': 'flv',
'title': 'Videoinstallation für eine Kaufhausfassade',
'description': 'Kurzfilm',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 71.24,
'timestamp': 1302172322,
'upload_date': '20110407',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# 16x9
'url': 'http://www.dctp.tv/filme/sind-youtuber-die-besseren-lehrer/',
'only_matching': True,
}]
_BASE_URL = 'http://dctp-ivms2-restapi.s3.amazonaws.com'
def _real_extract(self, url):
display_id = self._match_id(url)
version = self._download_json(
'%s/version.json' % self._BASE_URL, display_id,
'Downloading version JSON')
restapi_base = '%s/%s/restapi' % (
self._BASE_URL, version['version_name'])
info = self._download_json(
'%s/slugs/%s.json' % (restapi_base, display_id), display_id,
'Downloading video info JSON')
media = self._download_json(
'%s/media/%s.json' % (restapi_base, compat_str(info['object_id'])),
display_id, 'Downloading media JSON')
uuid = media['uuid']
title = media['title']
ratio = '16x9' if media.get('is_wide') else '4x3'
play_path = 'mp4:%s_dctp_0500_%s.m4v' % (uuid, ratio)
servers = self._download_json(
'http://www.dctp.tv/streaming_servers/', display_id,
note='Downloading server list JSON', fatal=False)
if servers:
endpoint = next(
server['endpoint']
for server in servers
if url_or_none(server.get('endpoint')) and
'cloudfront' in server['endpoint'])
else:
endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/'
app = self._search_regex(
r'^rtmpe?://[^/]+/(?P<app>.*)$', endpoint, 'app')
formats = [{
'url': endpoint,
'app': app,
'play_path': play_path,
'page_url': url,
'player_url': 'http://svm-prod-dctptv-static.s3.amazonaws.com/dctptv-relaunch2012-110.swf',
'ext': 'flv',
}]
thumbnails = []
images = media.get('images')
if isinstance(images, list):
for image in images:
if not isinstance(image, dict):
continue
image_url = url_or_none(image.get('url'))
if not image_url:
continue
thumbnails.append({
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
return {
'id': uuid,
'display_id': display_id,
'title': title,
'alt_title': media.get('subtitle'),
'description': media.get('description') or media.get('teaser'),
'timestamp': unified_timestamp(media.get('created')),
'duration': float_or_none(media.get('duration_in_ms'), scale=1000),
'thumbnails': thumbnails,
'formats': formats,
}
| valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/dctp.py | Python | gpl-3.0 | 3,148 | 0.02987 |
"""Config flow for Network UPS Tools (NUT) integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_ALIAS,
CONF_BASE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import PyNUTData
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
KEY_STATUS,
KEY_STATUS_DISPLAY,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
def _base_schema(discovery_info):
"""Generate base schema."""
base_schema = {}
if not discovery_info:
base_schema.update(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
)
base_schema.update(
{vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str}
)
return vol.Schema(base_schema)
def _resource_schema_base(available_resources, selected_resources):
"""Resource selection schema."""
known_available_resources = {
sensor_id: sensor_desc.name
for sensor_id, sensor_desc in SENSOR_TYPES.items()
if sensor_id in available_resources
}
if KEY_STATUS in known_available_resources:
known_available_resources[KEY_STATUS_DISPLAY] = SENSOR_TYPES[
KEY_STATUS_DISPLAY
].name
return {
vol.Required(CONF_RESOURCES, default=selected_resources): cv.multi_select(
known_available_resources
)
}
def _ups_schema(ups_list):
"""UPS selection schema."""
return vol.Schema({vol.Required(CONF_ALIAS): vol.In(ups_list)})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from _base_schema with values provided by the user.
"""
host = data[CONF_HOST]
port = data[CONF_PORT]
alias = data.get(CONF_ALIAS)
username = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
await hass.async_add_executor_job(data.update)
if not (status := data.status):
raise CannotConnect
return {"ups_list": data.ups_list, "available_resources": status}
def _format_host_port_alias(user_input):
"""Format a host, port, and alias so it can be used for comparison or display."""
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
alias = user_input.get(CONF_ALIAS)
if alias:
return f"{alias}@{host}:{port}"
return f"{host}:{port}"
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Network UPS Tools (NUT)."""
VERSION = 1
def __init__(self):
"""Initialize the nut config flow."""
self.nut_config = {}
self.available_resources = {}
self.discovery_info = {}
self.ups_list = None
self.title = None
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered nut device."""
self.discovery_info = discovery_info
await self._async_handle_discovery_without_unique_id()
self.context["title_placeholders"] = {
CONF_PORT: discovery_info.get(CONF_PORT, DEFAULT_PORT),
CONF_HOST: discovery_info[CONF_HOST],
}
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle the user input."""
errors = {}
if user_input is not None:
if self.discovery_info:
user_input.update(
{
CONF_HOST: self.discovery_info[CONF_HOST],
CONF_PORT: self.discovery_info.get(CONF_PORT, DEFAULT_PORT),
}
)
info, errors = await self._async_validate_or_error(user_input)
if not errors:
self.nut_config.update(user_input)
if len(info["ups_list"]) > 1:
self.ups_list = info["ups_list"]
return await self.async_step_ups()
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="user", data_schema=_base_schema(self.discovery_info), errors=errors
)
async def async_step_ups(self, user_input=None):
"""Handle the picking the ups."""
errors = {}
if user_input is not None:
self.nut_config.update(user_input)
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
info, errors = await self._async_validate_or_error(self.nut_config)
if not errors:
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="ups",
data_schema=_ups_schema(self.ups_list),
errors=errors,
)
async def async_step_resources(self, user_input=None):
"""Handle the picking the resources."""
if user_input is None:
return self.async_show_form(
step_id="resources",
data_schema=vol.Schema(
_resource_schema_base(self.available_resources, [])
),
)
self.nut_config.update(user_input)
title = _format_host_port_alias(self.nut_config)
return self.async_create_entry(title=title, data=self.nut_config)
def _host_port_alias_already_configured(self, user_input):
"""See if we already have a nut entry matching user input configured."""
existing_host_port_aliases = {
_format_host_port_alias(entry.data)
for entry in self._async_current_entries()
if CONF_HOST in entry.data
}
return _format_host_port_alias(user_input) in existing_host_port_aliases
async def _async_validate_or_error(self, config):
errors = {}
info = {}
try:
info = await validate_input(self.hass, config)
except CannotConnect:
errors[CONF_BASE] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors[CONF_BASE] = "unknown"
return info, errors
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for nut."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
scan_interval = self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
base_schema = {
vol.Optional(CONF_SCAN_INTERVAL, default=scan_interval): vol.All(
vol.Coerce(int), vol.Clamp(min=10, max=300)
)
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(base_schema))
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| aronsky/home-assistant | homeassistant/components/nut/config_flow.py | Python | apache-2.0 | 7,989 | 0.000876 |
"""
Tests of role and membership calculations.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.test import TestCase
from ..constants import role_kinds
from ..models import Classroom
from ..models import Facility
from ..models import FacilityUser
from ..models import KolibriAnonymousUser
from ..models import LearnerGroup
from .helpers import create_dummy_facility_data
from .helpers import create_superuser
def flatten(lst):
if lst == []:
return lst
if isinstance(lst[0], list):
return flatten(lst[0]) + flatten(lst[1:])
return lst[:1] + flatten(lst[1:])
class RolesWithinFacilityTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
def test_admin_has_admin_role_for_own_facility(self):
admin = self.data["facility_admin"]
facility = self.data["facility"]
self.assertTrue(admin.has_role_for(role_kinds.ADMIN, facility))
self.assertIn(role_kinds.ADMIN, admin.get_roles_for(facility))
def test_coach_has_coach_role_for_own_classroom(self):
coach0 = self.data["classroom_coaches"][0]
classroom0 = self.data["classrooms"][0]
self.assertTrue(coach0.has_role_for(role_kinds.COACH, classroom0))
self.assertIn(role_kinds.COACH, coach0.get_roles_for(classroom0))
def test_coach_has_no_coach_role_for_other_classroom(self):
coach0 = self.data["classroom_coaches"][0]
classroom1 = self.data["classrooms"][1]
self.assertFalse(coach0.has_role_for(role_kinds.COACH, classroom1))
self.assertNotIn(role_kinds.COACH, coach0.get_roles_for(classroom1))
def test_coach_has_coach_role_for_learner_from_own_classroom(self):
coach0 = self.data["classroom_coaches"][0]
learner0 = self.data["learners_one_group"][0][0]
self.assertTrue(coach0.has_role_for(role_kinds.COACH, learner0))
self.assertIn(role_kinds.COACH, coach0.get_roles_for(learner0))
def test_coach_has_no_coach_role_for_learner_from_other_classroom(self):
coach0 = self.data["classroom_coaches"][0]
learner1 = self.data["learners_one_group"][1][0]
self.assertFalse(coach0.has_role_for(role_kinds.COACH, learner1))
self.assertNotIn(role_kinds.COACH, coach0.get_roles_for(learner1))
class ImplicitMembershipTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="My Facility")
self.admin = FacilityUser.objects.create(
username="admin", facility=self.facility
)
self.facility.add_admin(self.admin)
self.learner = FacilityUser.objects.create(
username="learner", facility=self.facility
)
def test_has_admin_role_for_learner(self):
self.assertTrue(self.admin.has_role_for(role_kinds.ADMIN, self.learner))
def test_only_has_admin_role_for_learner(self):
self.assertEqual(
self.admin.get_roles_for(self.learner), set([role_kinds.ADMIN])
)
def test_admin_can_read_learner_object(self):
self.assertTrue(self.admin.can_read(self.learner))
def test_learner_is_in_list_of_readable_objects(self):
self.assertIn(
self.learner, self.admin.filter_readable(FacilityUser.objects.all())
)
class ExplicitMembershipTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="My Facility")
self.admin = FacilityUser.objects.create(
username="admin", facility=self.facility
)
self.classroom = Classroom.objects.create(name="Class", parent=self.facility)
self.classroom.add_admin(self.admin)
self.learner = FacilityUser.objects.create(
username="learner", facility=self.facility
)
self.group = LearnerGroup.objects.create(name="Group", parent=self.classroom)
self.group.add_member(self.learner)
def test_has_admin_role_for_learner(self):
self.assertTrue(self.admin.has_role_for(role_kinds.ADMIN, self.learner))
def test_only_has_admin_role_for_learner(self):
self.assertEqual(
self.admin.get_roles_for(self.learner), set([role_kinds.ADMIN])
)
def test_admin_can_read_learner_object(self):
self.assertTrue(self.admin.can_read(self.learner))
def test_learner_is_in_list_of_readable_objects(self):
self.assertIn(
self.learner, self.admin.filter_readable(FacilityUser.objects.all())
)
class RolesAcrossFacilitiesTestCase(TestCase):
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
def test_no_roles_between_users_across_facilities(self):
users1 = self.data1["all_users"]
users2 = self.data2["all_users"]
for user1 in users1:
for user2 in users2:
if not user1.is_superuser:
self.assertEqual(len(user1.get_roles_for(user2)), 0)
def test_no_roles_for_collections_across_facilities(self):
users1 = (
self.data1["classroom_coaches"]
+ [self.data1["facility_admin"]]
+ list(self.data1["facility"].get_members())
)
collections2 = (
[self.data2["facility"]]
+ self.data2["classrooms"]
+ flatten(self.data2["learnergroups"])
)
for user1 in users1:
for collection2 in collections2:
if not user1.is_superuser:
self.assertEqual(len(user1.get_roles_for(collection2)), 0)
class MembershipWithinFacilityTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
self.anon_user = KolibriAnonymousUser()
def test_facility_membership(self):
actual_members = flatten(
self.data["learners_one_group"]
+ [self.data["learner_all_groups"]]
+ self.data["unattached_users"]
+ [self.data["facility_admin"]]
+ [self.data["facility_coach"]]
+ self.data["classroom_admins"]
+ self.data["classroom_coaches"]
+ [self.data["superuser"]]
)
returned_members = self.data["facility"].get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
for user in actual_members:
self.assertTrue(user.is_member_of(self.data["facility"]))
self.assertFalse(self.anon_user.is_member_of(self.data["facility"]))
def test_classroom_membership(self):
for i, classroom in enumerate(self.data["classrooms"]):
actual_members = flatten(
self.data["learners_one_group"][i] + [self.data["learner_all_groups"]]
)
returned_members = classroom.get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
# ensure that `is_member` is True for all users in the classroom
for user in actual_members:
self.assertTrue(user.is_member_of(classroom))
# ensure that `is_member` is False for all users not in the classroom
for user in set(self.data["all_users"]) - set(actual_members):
self.assertFalse(user.is_member_of(classroom))
self.assertFalse(self.anon_user.is_member_of(classroom))
def test_learnergroup_membership(self):
for i, classroom_users in enumerate(self.data["learners_one_group"]):
for j, learnergroup_users in enumerate(classroom_users):
learnergroup = self.data["learnergroups"][i][j]
actual_members = [self.data["learners_one_group"][i][j]] + [
self.data["learner_all_groups"]
]
returned_members = learnergroup.get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
# ensure that `is_member` is True for all users in the learnergroup
for user in actual_members:
self.assertTrue(user.is_member_of(learnergroup))
# ensure that `is_member` is False for all users not in the learnergroup
for user in set(self.data["all_users"]) - set(actual_members):
self.assertFalse(user.is_member_of(learnergroup))
class MembershipAcrossFacilitiesTestCase(TestCase):
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
def test_users_are_not_members_of_other_facility(self):
for user in self.data1["all_users"]:
self.assertFalse(user.is_member_of(self.data2["facility"]))
def test_users_are_not_members_of_other_facility_classroom(self):
for user in self.data1["all_users"]:
self.assertFalse(user.is_member_of(self.data2["classrooms"][0]))
def test_users_are_not_members_of_other_facility_learnergroup(self):
for user in self.data1["all_users"]:
self.assertFalse(user.is_member_of(self.data2["learnergroups"][0][0]))
class SuperuserRolesTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
self.superuser = self.data["superuser"]
self.superuser2 = create_superuser(self.data["facility"], username="superuser2")
def test_superuser_has_admin_role_for_everyone(self):
for user in self.data["all_users"]:
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, user))
def test_superuser_has_admin_role_for_all_collections(self):
for coll in self.data["all_collections"]:
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, coll))
def test_superuser_has_admin_role_for_itself(self):
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, self.superuser))
def test_superuser_has_admin_role_for_other_superuser(self):
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, self.superuser2))
class AnonymousUserRolesTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
self.anon_user = KolibriAnonymousUser()
def test_anon_user_has_no_admin_role_for_anyone(self):
for user in self.data["all_users"]:
self.assertFalse(self.anon_user.has_role_for(role_kinds.ADMIN, user))
self.assertEqual(len(self.anon_user.get_roles_for(user)), 0)
def test_anon_user_has_no_admin_role_for_any_collection(self):
for coll in self.data["all_collections"]:
self.assertFalse(self.anon_user.has_role_for(role_kinds.ADMIN, coll))
self.assertEqual(len(self.anon_user.get_roles_for(coll)), 0)
def test_nobody_but_superuser_has_roles_for_anon_user(self):
for user in self.data["all_users"]:
if not user.is_superuser:
self.assertEqual(len(user.get_roles_for(self.anon_user)), 0)
| lyw07/kolibri | kolibri/core/auth/test/test_roles_and_membership.py | Python | mit | 10,992 | 0.001638 |
#!/usr/bin/env python
import socket
# Set admin server settings
UDP_IP = '' # Leave empty for Broadcast support
ADMIN_PORT = 48899
# Local settings of your Raspberry Pi, used for app discovery
INT_IP = '10.0.1.61'
INT_MAC = '111a02bf232b'
# Code Starts Here #
# Create UDP socket, bind to it
adminsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
adminsock.bind((UDP_IP, ADMIN_PORT))
# Loop forever
while True:
admindata, adminaddr = adminsock.recvfrom(64) # buffer size is 64 bytes
# Did we get a message?
if admindata is not None:
# print("admin command: ", str(admindata)) # Debugging
# If the client app is syncing to a unit
if str(admindata).find("Link_Wi-Fi") != -1:
RETURN = INT_IP + ',' + INT_MAC + ',' # Return our IP/MAC
# print("admin return: ", RETURN) # Debugging
adminsock.sendto(bytes(RETURN, "utf-8"),adminaddr) # Send Response
else:
adminsock.sendto(bytes('+ok', "utf-8"),adminaddr) # Send OK for each packet we get
else:
break | ep1cman/RFLED-Server | source/admin.py | Python | gpl-3.0 | 1,061 | 0.01131 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20150428_2142'),
]
operations = [
migrations.AddField(
model_name='parentrelation',
name='signature',
field=models.CharField(max_length=255, null=True, verbose_name='sig', blank=True),
preserve_default=True,
),
]
| oskarm91/sis | apps/users/migrations/0005_parentrelation_signature.py | Python | bsd-3-clause | 488 | 0.002049 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
from osv import osv, fields
from tools.translate import _
from tools import ustr
import pooler
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
_inherit = 'ir.wizard.screen'
logger = logging.getLogger('res_config.actions')
__logger = logging.getLogger(_name)
def get_current_progress(self, cr, uid, context=None):
'''Return a description the current progress of configuration:
a tuple of (non_open_todos:int, total_todos: int)
'''
return (self.pool.get('ir.actions.todo')\
.search_count(cr, uid, [('state','<>','open')], context),
self.pool.get('ir.actions.todo')\
.search_count(cr, uid, [], context))
def _progress(self, cr, uid, context=None):
closed, total = self.get_current_progress(cr, uid, context=context)
if total:
return round(closed*100./total)
return 100.
_columns = dict(
progress = fields.float('Configuration Progress', readonly=True),
)
_defaults = dict(
progress = _progress,
)
def _next_action(self, cr, uid, context=None):
todos = self.pool.get('ir.actions.todo')
active_todos = todos.search(cr, uid, [('state','=','open')],
limit=1)
if active_todos:
todo_obj = todos.browse(cr, uid, active_todos[0], context=None)
todo_groups = map(lambda x:x.id, todo_obj.groups_id)
dont_skip_todo = True
if todo_groups:
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid=ANY(%s)",(uid, todo_groups,))
dont_skip_todo = bool(cr.fetchone())
if dont_skip_todo:
return todos.browse(cr, uid, active_todos[0], context=None)
else:
todos.write(cr, uid, active_todos[0], {'state':'skip'}, context=None)
return self._next_action(cr, uid)
return None
def _set_previous_todo(self, cr, uid, state, context=None):
""" lookup the previous (which is still the next at this point)
ir.actions.todo, set it to whatever state was provided.
"""
# this is ultra brittle, but apart from storing the todo id
# into the res.config view, I'm not sure how to get the
# "previous" todo
if context is None:
context = {}
if context.get('active_action_todo'):
previous_todo = self.pool.get('ir.actions.todo').browse(cr, uid, context['active_action_todo'], context=context)
else:
previous_todo = self._next_action(cr, uid, context=context)
if not previous_todo:
self.__logger.warn(_("Couldn't find previous ir.actions.todo"))
return
previous_todo.write({'state':state})
def _next(self, cr, uid, context=None):
next = self._next_action(cr, uid)
if next:
action = next.action_id
return {
'view_mode': action.view_mode,
'view_type': action.view_type,
'view_id': action.view_id and [action.view_id.id] or False,
'res_model': action.res_model,
'type': action.type,
'target': action.target,
'context': {'active_action_todo': next.id},
}
self.logger.info('All configuration actions have been executed.')
current_user_menu = self.pool.get('res.users')\
.browse(cr, uid, uid).menu_id
# return the action associated with the menu
return self.pool.get(current_user_menu.type)\
.read(cr, uid, current_user_menu.id)
def start(self, cr, uid, ids, context=None):
ids2 = self.pool.get('ir.actions.todo').search(cr, uid, [], context=context)
for todo in self.pool.get('ir.actions.todo').browse(cr, uid, ids2, context=context):
if (todo.restart=='always') or (todo.restart=='onskip' and (todo.state in ('skip','cancel'))):
todo.write({'state':'open'})
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
self._set_previous_todo(cr, uid, state='done', context=context)
next = self.execute(cr, uid, ids, context=None)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
self._set_previous_todo(cr, uid, state='skip', context=context)
next = self.cancel(cr, uid, ids, context=None)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
self._set_previous_todo(cr, uid, state='cancel', context=context)
next = self.cancel(cr, uid, ids, context=None)
if next: return next
return self.next(cr, uid, ids, context=context)
res_config_configurable()
class res_config_installer(osv.osv_memory):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['project_mrp'],
}
will install both ``sale_crm`` and ``project_mrp`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
__logger = logging.getLogger(_name)
_install_if = {}
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade' or 'installed')
and if it is, check it by default
"""
modules = self.pool.get('ir.module.module')
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns[module_name]) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'%(module), None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals) - set(
map(attrgetter('name'), self._already_installed(cr, uid, context)))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
map(attrgetter('name'),
self._already_installed(cr, uid, context=context)),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access)
for module in self._already_installed(cr, uid, context=context):
if module.name not in fields:
continue
fields[module.name].update(
readonly=True,
help= ustr(fields[module.name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
modules = self.pool.get('ir.module.module')
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
self.__logger.info('Selecting addons %s to install', to_install)
modules.state_update(
cr, uid,
modules.search(cr, uid, [('name','in',to_install)]),
'to install', ['uninstalled'], context=context)
# Since we are about to restart the pool, the transaction _must_ be
# committed now.
cr.commit()
new_db, self.pool = pooler.restart_pool(cr.dbname, update_module=True)
res_config_installer()
DEPRECATION_MESSAGE = 'You are using an addon using old-style configuration '\
'wizards (ir.actions.configuration.wizard). Old-style configuration '\
'wizards have been deprecated.\n'\
'The addon should be migrated to res.config objects.'
class ir_actions_configuration_wizard(osv.osv_memory):
''' Compatibility configuration wizard
The old configuration wizard has been replaced by res.config, but in order
not to break existing but not-yet-migrated addons, the old wizard was
reintegrated and gutted.
'''
_name='ir.actions.configuration.wizard'
_inherit = 'res.config'
__logger = logging.getLogger(_name)
def _next_action_note(self, cr, uid, ids, context=None):
next = self._next_action(cr, uid)
if next:
# if the next one is also an old-style extension, you never know...
if next.note:
return next.note
return _("Click 'Continue' to configure the next addon...")
return _("Your database is now fully configured.\n\n"\
"Click 'Continue' and enjoy your OpenERP experience...")
_columns = {
'note': fields.text('Next Wizard', readonly=True),
}
_defaults = {
'note': _next_action_note,
}
def execute(self, cr, uid, ids, context=None):
self.__logger.warn(DEPRECATION_MESSAGE)
ir_actions_configuration_wizard()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| xrg/openerp-server | bin/addons/base/res/res_config.py | Python | agpl-3.0 | 18,280 | 0.002899 |
import os
import tempfile
from datetime import datetime
from listenbrainz_spark.tests import SparkTestCase
from listenbrainz_spark import utils, path, config
from pyspark.sql import Row
class UtilsTestCase(SparkTestCase):
# use path_ as prefix for all paths in this class.
path_ = "/test"
temp_path_ = "/temp"
def tearDown(self):
if utils.path_exists(self.path_):
utils.delete_dir(self.path_, recursive=True)
if utils.path_exists(self.temp_path_):
utils.delete_dir(self.temp_path_, recursive=True)
def test_append_dataframe(self):
hdfs_path = self.path_ + '/test_df.parquet'
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
utils.append(df, hdfs_path)
new_df = utils.read_files_from_HDFS(hdfs_path)
self.assertEqual(new_df.count(), 1)
df = utils.create_dataframe([Row(column1=3, column2=4)], schema=None)
utils.append(df, hdfs_path)
appended_df = utils.read_files_from_HDFS(hdfs_path)
self.assertEqual(appended_df.count(), 2)
def test_create_dataframe(self):
hdfs_path = self.path_ + '/test_df.parquet'
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
self.assertEqual(df.count(), 1)
utils.save_parquet(df, hdfs_path)
received_df = utils.read_files_from_HDFS(hdfs_path)
self.assertEqual(received_df.count(), 1)
def test_create_dir(self):
utils.create_dir(self.path_)
status = utils.path_exists(self.path_)
self.assertTrue(status)
def test_delete_dir(self):
utils.create_dir(self.path_)
utils.delete_dir(self.path_)
status = utils.path_exists(self.path_)
self.assertFalse(status)
def test_get_listens(self):
from_date = datetime(2019, 10, 1)
to_date = datetime(2019, 11, 1)
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
dest_path = self.path_ + '/{}/{}.parquet'.format(from_date.year, from_date.month)
utils.save_parquet(df, dest_path)
df = utils.create_dataframe([Row(column1=3, column2=4)], schema=None)
dest_path = self.path_ + '/{}/{}.parquet'.format(to_date.year, to_date.month)
utils.save_parquet(df, dest_path)
received_df = utils.get_listens(from_date, to_date, self.path_)
self.assertEqual(received_df.count(), 2)
def test_path_exists(self):
utils.create_dir(self.path_)
status = utils.path_exists(self.path_)
self.assertTrue(status)
def test_save_parquet(self):
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
utils.save_parquet(df, self.path_)
received_df = utils.read_files_from_HDFS(self.path_)
self.assertEqual(received_df.count(), 1)
def test_upload_to_HDFS(self):
temp_file = tempfile.mkdtemp()
local_path = os.path.join(temp_file, 'test_file.txt')
with open(local_path, 'w') as f:
f.write('test file')
self.path_ = '/test/upload.parquet'
utils.upload_to_HDFS(self.path_, local_path)
status = utils.path_exists(self.path_)
self.assertTrue(status)
def test_rename(self):
utils.create_dir(self.path_)
test_exists = utils.path_exists(self.path_)
self.assertTrue(test_exists)
utils.rename(self.path_, self.temp_path_)
test_exists = utils.path_exists(self.path_)
self.assertFalse(test_exists)
temp_exists = utils.path_exists(self.temp_path_)
self.assertTrue(temp_exists)
utils.delete_dir(self.temp_path_)
def test_copy(self):
# Test directories
utils.create_dir(self.path_)
utils.create_dir(os.path.join(self.path_, "a"))
utils.create_dir(os.path.join(self.path_, "b"))
# DataFrames to create parquets
df_a = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
df_b = utils.create_dataframe([Row(column1=3, column2=4)], schema=None)
df_c = utils.create_dataframe([Row(column1=5, column2=6)], schema=None)
# Save DataFrames in respective directories
utils.save_parquet(df_a, os.path.join(self.path_, "a", "df_a.parquet"))
utils.save_parquet(df_b, os.path.join(self.path_, "b", "df_b.parquet"))
utils.save_parquet(df_c, os.path.join(self.path_, "df_c.parquet"))
utils.copy(self.path_, self.temp_path_, overwrite=True)
# Read copied DataFrame
cp_df_a = utils.read_files_from_HDFS(os.path.join(self.temp_path_, "a", "df_a.parquet"))
cp_df_b = utils.read_files_from_HDFS(os.path.join(self.temp_path_, "b", "df_b.parquet"))
cp_df_c = utils.read_files_from_HDFS(os.path.join(self.temp_path_, "df_c.parquet"))
# Check if both DataFrames are same
self.assertListEqual(df_a.rdd.map(list).collect(), cp_df_a.rdd.map(list).collect())
self.assertListEqual(df_b.rdd.map(list).collect(), cp_df_b.rdd.map(list).collect())
self.assertListEqual(df_c.rdd.map(list).collect(), cp_df_c.rdd.map(list).collect())
| Freso/listenbrainz-server | listenbrainz_spark/utils/tests/test_init.py | Python | gpl-2.0 | 5,143 | 0.001556 |
# Created By: Virgil Dupras
# Created On: 2009-09-19
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from PyQt4.QtCore import Qt, SIGNAL, QMimeData, QByteArray
from PyQt4.QtGui import QPixmap
from hscommon.conflict import is_conflicted
from hscommon.util import dedupe, format_size, format_time
from hscommon.path import Path
from qtlib.tree_model import TreeNode, TreeModel
from core.fs_utils import smart_move
MIME_PATHS = 'application/musicguru.paths'
DESIGN_BOARD_NAME = '<design board>'
IGNORE_BOX_NAME = '<ignore box>'
class FSNode(TreeNode):
def __init__(self, model, parent, ref, row):
TreeNode.__init__(self, model, parent, row)
self.ref = ref
self._data = None
self._imageName = None
def __repr__(self):
return "<FSNode %s>" % self.ref.name
def _getData(self):
raise NotImplementedError()
def _getImageName(self):
raise NotImplementedError()
def invalidate(self, with_subnodes=False):
if with_subnodes:
for node in self.subnodes:
node.invalidate(with_subnodes=True)
self._data = None
self._imageName = None
TreeNode.invalidate(self)
@property
def data(self):
if self._data is None:
self._data = self._getData()
return self._data
@property
def imageName(self):
if self._imageName is None:
self._imageName = self._getImageName()
return self._imageName
class SongNode(FSNode):
def _getData(self):
song = self.ref
return [
song.name,
song.original.parent_volume.name,
0,
format_size(song.size, 2, 2, False),
format_time(song.duration, with_hours=False),
]
def _getImageName(self):
return 'song_conflict' if is_conflicted(self.ref.name) else 'song'
def _getChildren(self):
return []
class FolderNode(FSNode):
def _getData(self):
folder = self.ref
parent_volumes = dedupe(song.original.parent_volume for song in folder.iterallfiles())
return [
folder.name,
','.join(l.name for l in parent_volumes),
folder.get_stat('filecount'),
format_size(folder.get_stat('size'), 2, 2, False),
format_time(folder.get_stat('duration')),
]
def _getImageName(self):
return 'folder_conflict' if self.ref.allconflicts else 'folder'
def _createNode(self, ref, row):
if ref.is_container:
return FolderNode(self.model, self, ref, row)
else:
return SongNode(self.model, self, ref, row)
def _getChildren(self):
return self.ref.dirs + self.ref.files
class DummyNode(FSNode):
def _getData(self):
return [''] * 5
def _getImageName(self):
return ''
def _getChildren(self):
return []
class FSModel(TreeModel):
HEADER = ['Name', 'Location', 'Songs', 'Size (MB)', 'Time']
def __init__(self, app, ref, name):
self.app = app
self.ref = ref
self.name = name # the name is going to be the first item in the paths passed around in d&d
TreeModel.__init__(self)
def _createDummyNode(self, parent, row):
return DummyNode(self, parent, None, row)
def _createNode(self, ref, row):
if ref.is_container:
return FolderNode(self, None, ref, row)
else:
return SongNode(self, None, ref, row)
def _getChildren(self):
return self.ref.dirs
def columnCount(self, parent):
return len(self.HEADER)
def data(self, index, role):
if not index.isValid():
return None
node = index.internalPointer()
if role == Qt.DisplayRole:
return node.data[index.column()]
elif role == Qt.DecorationRole:
if index.column() == 0:
return QPixmap(":/{0}".format(node.imageName))
elif role == Qt.EditRole:
if index.column() == 0:
return node.data[index.column()]
return None
def dropMimeData(self, mimeData, action, row, column, parentIndex):
# In the test I have made, the row and column args always seem to be -1/-1 except when
# parentIndex is invalid (which means that the drop destination is the root node).
def find_path(path):
if path[0] == DESIGN_BOARD_NAME:
return self.app.board.find_path(path[1:])
elif path[0] == IGNORE_BOX_NAME:
return self.app.board.ignore_box.find_path(path[1:])
if not mimeData.hasFormat(MIME_PATHS):
return False
if parentIndex.isValid():
destNode = parentIndex.internalPointer()
else:
destNode = self
paths = str(mimeData.data(MIME_PATHS), 'utf-8').split('\n')
sourceItems = set(find_path(Path(path)) for path in paths)
sourceItems = set(item for item in sourceItems if item.parent not in sourceItems | set([destNode.ref]))
if not sourceItems:
return False
smart_move(sourceItems, destNode.ref, allow_merge=True)
destNode.invalidate()
# InsertRow calls have to be made at correct indexes or else the subsequent removeRows call
# will be made at incorrect indexes. To do so, we just go through every subitem of destNode.ref
# and if it's in sourceItems, we call insertRow.
# destNode.subnodes
for index, node in enumerate(destNode.subnodes):
if node.ref in sourceItems:
self.insertRow(index, parentIndex)
return True
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled | Qt.ItemIsDropEnabled
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled
if index.column() == 0:
flags |= Qt.ItemIsEditable
return flags
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole and section < len(self.HEADER):
return self.HEADER[section]
return None
def insertRows(self, row, count, parentIndex):
node = parentIndex.internalPointer() if parentIndex.isValid() else self
self.beginInsertRows(parentIndex, row, row + count - 1)
node.invalidate()
self.endInsertRows()
return True
def mimeData(self, indexes):
nodes = dedupe(index.internalPointer() for index in indexes)
paths = [str(self.name + node.ref.path) for node in nodes]
data = '\n'.join(paths).encode('utf-8')
mimeData = QMimeData()
mimeData.setData(MIME_PATHS, QByteArray(data))
return mimeData
def mimeTypes(self):
return [MIME_PATHS]
def removeRows(self, row, count, parentIndex):
node = parentIndex.internalPointer() if parentIndex.isValid() else self
self.beginRemoveRows(parentIndex, row, row + count - 1)
node.invalidate()
self.endRemoveRows()
return True
def refreshNode(self, node):
if node is None:
self.invalidate()
return
node.invalidate(with_subnodes=True)
self.emit(SIGNAL('layoutChanged()'))
def supportedDropActions(self):
return Qt.MoveAction
| hsoft/musicguru | qt/fs_model.py | Python | bsd-3-clause | 7,759 | 0.006702 |
#!/Users/tiradoe/Projects/Giflocker/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| tiradoe/Giflocker | bin/pilfile.py | Python | lgpl-3.0 | 2,695 | 0.001113 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import mock
import unittest
from tempfile import mkdtemp
from shutil import rmtree
from time import gmtime
from test.unit import FakeLogger
import itertools
import random
import json
from six import BytesIO
from six import StringIO
import xml.dom.minidom
from swift import __version__ as swift_version
from swift.common.swob import (Request, WsgiBytesIO, HTTPNoContent)
from swift.common import constraints
from swift.account.server import AccountController
from swift.common.utils import (normalize_timestamp, replication, public,
mkdirs, storage_directory)
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies, debug_logger
from swift.common.storage_policy import StoragePolicy, POLICIES
@patch_policies
class TestAccountController(unittest.TestCase):
"""Test swift.account.server.AccountController"""
def setUp(self):
"""Set up for testing swift.account.server.AccountController"""
self.testdir_base = mkdtemp()
self.testdir = os.path.join(self.testdir_base, 'account_server')
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.account.server.AccountController"""
try:
rmtree(self.testdir_base)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def test_OPTIONS(self):
server_handler = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
self.assertEqual(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version))
def test_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_DELETE_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_not_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
# We now allow deleting non-empty accounts
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_now_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '2',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_REPLICATE_insufficient_storage(self):
conf = {'devices': self.testdir, 'mount_check': 'true'}
self.account_controller = AccountController(conf)
def fake_check_mount(*args, **kwargs):
return False
with mock.patch("swift.common.constraints.check_mount",
fake_check_mount):
req = Request.blank('/sda1/p/suff',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
resp = req.get_response(self.account_controller)
self.assertEqual(resp.status_int, 507)
def test_REPLICATE_works(self):
mkdirs(os.path.join(self.testdir, 'sda1', 'account', 'p', 'a', 'a'))
db_file = os.path.join(self.testdir, 'sda1',
storage_directory('account', 'p', 'a'),
'a' + '.db')
open(db_file, 'w')
def fake_rsync_then_merge(self, drive, db_file, args):
return HTTPNoContent()
with mock.patch("swift.common.db_replicator.ReplicatorRpc."
"rsync_then_merge", fake_rsync_then_merge):
req = Request.blank('/sda1/p/a/',
environ={'REQUEST_METHOD': 'REPLICATE'},
headers={})
json_string = '["rsync_then_merge", "a.db"]'
inbuf = WsgiBytesIO(json_string)
req.environ['wsgi.input'] = inbuf
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
# check valuerror
wsgi_input_valuerror = '["sync" : sync, "-1"]'
inbuf1 = WsgiBytesIO(wsgi_input_valuerror)
req.environ['wsgi.input'] = inbuf1
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_HEAD_not_found(self):
# Test the case in which account does not exist (can be recreated)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
# Test the case in which account was deleted but not yet reaped
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_HEAD_empty_account(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '0')
self.assertEqual(resp.headers['x-account-object-count'], '0')
self.assertEqual(resp.headers['x-account-bytes-used'], '0')
def test_HEAD_with_containers(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '2')
self.assertEqual(resp.headers['x-account-object-count'], '0')
self.assertEqual(resp.headers['x-account-bytes-used'], '0')
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '5'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '2')
self.assertEqual(resp.headers['x-account-object-count'], '4')
self.assertEqual(resp.headers['x-account-bytes-used'], '6')
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_HEAD_invalid_content_type(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_HEAD_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank('/sda1/p/a?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_not_found(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-PUT-Timestamp': normalize_timestamp(1),
'X-DELETE-Timestamp': normalize_timestamp(0),
'X-Object-Count': '1',
'X-Bytes-Used': '1',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_PUT(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_simulated_create_race(self):
state = ['initial']
from swift.account.backend import AccountBroker as OrigAcBr
class InterceptedAcBr(OrigAcBr):
def __init__(self, *args, **kwargs):
super(InterceptedAcBr, self).__init__(*args, **kwargs)
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Save the original db_file attribute value
self._saved_db_file = self.db_file
self.db_file += '.doesnotexist'
def initialize(self, *args, **kwargs):
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Restore the original db_file attribute to get the race
# behavior
self.db_file = self._saved_db_file
return super(InterceptedAcBr, self).initialize(*args, **kwargs)
with mock.patch("swift.account.server.AccountBroker", InterceptedAcBr):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
state[0] = "race"
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_after_DELETE(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 403)
self.assertEqual(resp.body, 'Recently deleted')
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_PUT_non_utf8_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
# Set sysmeta header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Sysmeta-Access-Control': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
# Send other
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Will-Not-Be-Saved': b'\xff'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
self.assertEqual(resp.headers.get('x-account-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue('x-account-meta-test' not in resp.headers)
def test_PUT_GET_sys_metadata(self):
prefix = get_sys_meta_prefix('account')
hdr = '%stest' % prefix
hdr2 = '%stest2' % prefix
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr.title(): 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr2.title(): 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
self.assertEqual(resp.headers.get(hdr2), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
hdr.title(): 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
hdr.title(): 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
hdr.title(): ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue(hdr not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue('x-account-meta-test' not in resp.headers)
def test_POST_HEAD_sys_metadata(self):
prefix = get_sys_meta_prefix('account')
hdr = '%stest' % prefix
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr.title(): 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
hdr.title(): 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
hdr.title(): 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
hdr.title(): ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertTrue(hdr not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '0'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_GET_not_found_plain(self):
# Test the case in which account does not exist (can be recreated)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
# Test the case in which account was deleted but not yet reaped
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_GET_not_found_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_GET_not_found_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_GET_empty_account_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
def test_GET_empty_account_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
def test_GET_empty_account_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a?limit=%d' % (constraints.ACCOUNT_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_with_containers_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
# test unknown format uses default plain
req = Request.blank('/sda1/p/a?format=somethinglese',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_with_containers_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 0, 'bytes': 0, 'name': 'c1'},
{'count': 0, 'bytes': 0, 'name': 'c2'}])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 1, 'bytes': 2, 'name': 'c1'},
{'count': 3, 'bytes': 4, 'name': 'c2'}])
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_with_containers_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
self.assertEqual(listing[-1].nodeName, 'container')
container = \
[n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '1')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '4')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_xml_escapes_account_name(self):
req = Request.blank(
'/sda1/p/%22%27', # "'
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/%22%27?format=xml',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.attributes['name'].value, '"\'')
def test_GET_xml_escapes_container_name(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/%22%3Cword', # "<word
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_PUT_TIMESTAMP': '1', 'HTTP_X_OBJECT_COUNT': '0',
'HTTP_X_DELETE_TIMESTAMP': '0', 'HTTP_X_BYTES_USED': '1'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(
dom.firstChild.firstChild.nextSibling.firstChild.firstChild.data,
'"<word')
def test_GET_xml_escapes_container_name_as_subdir(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/%22%3Cword-test', # "<word-test
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_PUT_TIMESTAMP': '1', 'HTTP_X_OBJECT_COUNT': '0',
'HTTP_X_DELETE_TIMESTAMP': '0', 'HTTP_X_BYTES_USED': '1'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?format=xml&delimiter=-',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(
dom.firstChild.firstChild.nextSibling.attributes['name'].value,
'"<word-')
def test_GET_limit_marker_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c0', 'c1', 'c2'])
req = Request.blank('/sda1/p/a?limit=3&marker=c2',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c3', 'c4'])
def test_GET_limit_marker_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c0'},
{'count': 2, 'bytes': 3, 'name': 'c1'},
{'count': 2, 'bytes': 3, 'name': 'c2'}])
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(json.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c3'},
{'count': 2, 'bytes': 3, 'name': 'c4'}])
def test_GET_limit_marker_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(c)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 3)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c0')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c3')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c4')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
def test_GET_accept_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = '*/*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'c1\n')
def test_GET_accept_application_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(json.loads(resp.body)), 1)
def test_GET_accept_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(json.loads(resp.body)), 1)
def test_GET_accept_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 1)
def test_GET_accept_conflicting(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'c1\n')
def test_GET_accept_not_valid(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_prefix_delimiter_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
resp.body.strip().split('\n'),
['sub.0', 'sub.0.', 'sub.1', 'sub.1.', 'sub.2', 'sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'),
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimiter_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual([n.get('name', 's:' + n.get('subdir', 'error'))
for n in json.loads(resp.body)], ['s:sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
[n.get('name', 's:' + n.get('subdir', 'error'))
for n in json.loads(resp.body)],
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
[n.get('name', 's:' + n.get('subdir', 'error'))
for n in json.loads(resp.body)],
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimiter_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(listing, ['s:sub.'])
req = Request.blank(
'/sda1/p/a?prefix=sub.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(
listing,
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank(
'/sda1/p/a?prefix=sub.1.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(listing, ['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_through_call(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a?format=%s' % format,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'prefix', 'end_marker', 'format'):
req = Request.blank('/sda1/p/a?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_PUT_auto_create(self):
headers = {'x-put-timestamp': normalize_timestamp(1),
'x-delete-timestamp': normalize_timestamp(0),
'x-object-count': '0',
'x-bytes-used': '0'}
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
req = Request.blank('/sda1/p/a?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.charset, 'utf-8')
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEqual(AccountController(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(AccountController(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(AccountController(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEqual(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.account.server.AccountController.__call__
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir,
'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method,
new=mock_method):
mock_method.replication = False
response = self.controller(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.account.server.AccountController.__call__
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method,
new=mock_method):
mock_method.replication = True
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
def test_call_incorrect_replication_method(self):
inbuf = BytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST', 'OPTIONS']
for method in obj_methods:
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
self.controller(env, start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test__call__raise_timeout(self):
inbuf = WsgiBytesIO()
errbuf = StringIO()
outbuf = StringIO()
self.logger = debug_logger('test')
self.account_controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false', 'log_requests': 'false'},
logger=self.logger)
def start_response(*args):
# Sends args to outbuf
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
@public
def mock_put_method(*args, **kwargs):
raise Exception()
with mock.patch.object(self.account_controller, method,
new=mock_put_method):
response = self.account_controller.__call__(env, start_response)
self.assertTrue(response[0].startswith(
'Traceback (most recent call last):'))
self.assertEqual(self.logger.get_lines_for_level('error'), [
'ERROR __call__ error with %(method)s %(path)s : ' % {
'method': 'PUT', 'path': '/sda1/p/a/c'},
])
self.assertEqual(self.logger.get_lines_for_level('info'), [])
def test_GET_log_requests_true(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = True
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue(self.controller.logger.log_dict['info'])
def test_GET_log_requests_false(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = False
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse(self.controller.logger.log_dict['info'])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
self.controller.logger = FakeLogger()
with mock.patch(
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch(
'time.time',
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.controller)
self.assertEqual(
self.controller.logger.log_dict['info'],
[(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a" 404 '
'- "-" "-" "-" 2.0000 "-" 1234 -',), {})])
def test_policy_stats_with_legacy(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add a container
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# read back rollup
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
self.assertEqual(resp.headers['X-Account-Object-Count'], '2')
self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
POLICIES[0].name], '2')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
POLICIES[0].name], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
POLICIES[0].name], '1')
def test_policy_stats_non_default(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add a container
non_default_policies = [p for p in POLICIES if not p.is_default]
policy = random.choice(non_default_policies)
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# read back rollup
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
self.assertEqual(resp.headers['X-Account-Object-Count'], '2')
self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
policy.name], '2')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
policy.name], '4')
self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
policy.name], '1')
def test_empty_policy_stats(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
self.assertTrue('storage-policy' not in key.lower())
def test_empty_except_for_used_policies(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# starts empty
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
self.assertTrue('storage-policy' not in key.lower())
# add a container
policy = random.choice(POLICIES)
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# only policy of the created container should be in headers
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
if 'storage-policy' in key.lower():
self.assertTrue(policy.name.lower() in key.lower())
def test_multiple_policies_in_use(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add some containers
for policy in POLICIES:
count = policy.idx * 100 # good as any integer
container_path = '/sda1/p/a/c_%s' % policy.name
req = Request.blank(
container_path, method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': count,
'X-Bytes-Used': count,
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
# check container counts in roll up headers
total_object_count = 0
total_bytes_used = 0
for key in resp.headers:
if 'storage-policy' not in key.lower():
continue
for policy in POLICIES:
if policy.name.lower() not in key.lower():
continue
if key.lower().endswith('object-count'):
object_count = int(resp.headers[key])
self.assertEqual(policy.idx * 100, object_count)
total_object_count += object_count
if key.lower().endswith('bytes-used'):
bytes_used = int(resp.headers[key])
self.assertEqual(policy.idx * 100, bytes_used)
total_bytes_used += bytes_used
expected_total_count = sum([p.idx * 100 for p in POLICIES])
self.assertEqual(expected_total_count, total_object_count)
self.assertEqual(expected_total_count, total_bytes_used)
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', False),
StoragePolicy(3, 'three', False)])
class TestNonLegacyDefaultStoragePolicy(TestAccountController):
pass
if __name__ == '__main__':
unittest.main()
| larsbutler/swift | test/unit/account/test_server.py | Python | apache-2.0 | 99,400 | 0.00001 |
"""Проверки модуля system_wrappers."""
from logging import INFO
from unittest import TestCase
from unittest.mock import Mock, call, patch
from codestyle import system_wrappers
from codestyle.system_wrappers import (
ExitCodes,
check_output,
interrupt_program_flow,
)
class Test(TestCase):
"""Проверка функций модуля."""
@patch('codestyle.system_wrappers.sys', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_interrupt_program_flow(
self, mocked_logger: Mock, mocked_sys: Mock
):
"""Проверка interrupt_program_flow."""
mock_log = Mock()
mocked_logger.log = mock_log
mock_exit = Mock()
mocked_sys.exit = mock_exit
interrupt_program_flow(log_message='Проверка вызова функции.')
self.assertEqual(True, mock_log.called)
self.assertEqual(1, mock_log.call_count)
args, kwargs = mock_log.call_args
self.assertTupleEqual((INFO, 'Проверка вызова функции.'), args)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mock_exit.called)
self.assertEqual(1, mock_exit.call_count)
args, kwargs = mock_exit.call_args
self.assertTupleEqual((ExitCodes.SUCCESS,), args)
self.assertDictEqual({}, kwargs)
@patch('codestyle.system_wrappers.check_process_output', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_check_output(
self, mocked_logger: Mock, mocked_process_output_checker: Mock
):
"""Проверка check_output."""
mock_debug = Mock()
mocked_logger.debug = mock_debug
mock_rstrip = Mock()
mock_decode = Mock(return_value=Mock(rstrip=mock_rstrip))
mocked_process_output_checker.return_value = Mock(decode=mock_decode)
check_output(('application', 'run'))
self.assertEqual(True, mock_debug.called)
self.assertEqual(1, mock_debug.call_count)
args, kwargs = mock_debug.call_args
self.assertTupleEqual(
('Проверка наличия application в системе...',), args
)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mocked_process_output_checker.called)
self.assertEqual(1, mocked_process_output_checker.call_count)
args, kwargs = mocked_process_output_checker.call_args
self.assertTupleEqual((('application', 'run'),), args)
self.assertDictEqual({'timeout': 10}, kwargs)
self.assertEqual(True, mock_decode.called)
self.assertEqual(1, mock_decode.call_count)
args, kwargs = mock_decode.call_args
self.assertTupleEqual((), args)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mock_rstrip.called)
self.assertEqual(1, mock_rstrip.call_count)
args, kwargs = mock_rstrip.call_args
self.assertTupleEqual((), args)
self.assertDictEqual({}, kwargs)
@patch(
'codestyle.system_wrappers.interrupt_program_flow', new_callable=Mock
)
@patch('codestyle.system_wrappers.check_process_output', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_check_output_with_error(
self,
mocked_logger: Mock,
mocked_process_output_checker: Mock,
mocked_interrupt_program_flow: Mock,
):
"""Проверка check_output с ошибкой внутри."""
mock_debug = Mock()
mock_warning = Mock()
mocked_logger.debug = mock_debug
mocked_logger.warning = mock_warning
mocked_process_output_checker.side_effect = FileNotFoundError(
'Исполняемый файл application не найден.'
)
check_output(('application', 'run'))
self.assertEqual(True, mock_debug.called)
self.assertEqual(2, mock_debug.call_count)
self.assertEqual(1, mock_warning.call_count)
self.assertIn(
call('Проверка наличия application в системе...'),
mock_debug.mock_calls,
)
self.assertIn(
call('Инструмент application не найден.'), mock_warning.mock_calls
)
self.assertIn(
call('Исполняемый файл application не найден.'),
mock_debug.mock_calls,
)
self.assertEqual(True, mocked_interrupt_program_flow.called)
self.assertEqual(1, mocked_interrupt_program_flow.call_count)
args, kwargs = mocked_interrupt_program_flow.call_args
self.assertTupleEqual((ExitCodes.UNSUCCESSFUL,), args)
self.assertDictEqual({}, kwargs)
| webpp-studio/codestyle | tests/test_system_wrappers.py | Python | gpl-3.0 | 4,814 | 0 |
from pathlib import Path
import logging
logger = logging.getLogger('ipyvolume')
HERE = Path(__file__).parent
_figures = []
_watching = set()
def _update_shaders(path=None, file_changed=None):
names = ['volr-fragment', 'volr-vertex', 'mesh-vertex', 'mesh-fragment', 'scatter-vertex', 'scatter-fragment', 'shadow-vertex', 'shadow-fragment']
for figure in _figures:
shaders = {}
# TODO: only read the ones we change
for name in names:
shader_path = path / (name + ".glsl")
with shader_path.open() as f:
shaders[name] = f.read()
figure._shaders = shaders
def watch(figure, path=None):
_figures.append(figure)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
if path is None:
# this assues a editable install (pip install -e .)
path = HERE / '../js/glsl/'
class ShaderEventHandler(FileSystemEventHandler):
def on_modified(self, event):
super(ShaderEventHandler, self).on_modified(event)
if not event.is_directory:
logger.info(f'updating: {event.src_path}')
_update_shaders(path, event.src_path)
observer = Observer()
if path not in _watching:
logger.info(f'watching {path}')
observer.schedule(ShaderEventHandler(), path, recursive=True)
observer.start()
_watching.add(path)
_update_shaders(path)
| maartenbreddels/ipyvolume | ipyvolume/hotreload.py | Python | mit | 1,466 | 0.000682 |
# project.py
import signac
def classify(job):
yield 'init'
if 'V' in job.document:
yield 'volume-computed'
def next_operation(job):
if 'volume-computed' not in classify(job):
return 'compute_volume'
if __name__ == '__main__':
project = signac.get_project()
print(project)
for job in project.find_jobs():
labels = ','.join(classify(job))
p = '{:04.1f}'.format(job.statepoint()['p'])
print(job, p, labels)
| csadorf/signac | examples/ideal_gas_project/project.py | Python | bsd-3-clause | 472 | 0.006356 |
# Copyright 2017 The 'Scalable Private Learning with PATE' Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Plots three graphs illustrating cost of privacy per answered query.
A script in support of the paper "Scalable Private Learning with PATE" by
Nicolas Papernot, Shuang Song, Ilya Mironov, Ananth Raghunathan, Kunal Talwar,
Ulfar Erlingsson (https://arxiv.org/abs/1802.08908).
The input is a file containing a numpy array of votes, one query per row, one
class per column. Ex:
43, 1821, ..., 3
31, 16, ..., 0
...
0, 86, ..., 438
The output is written to a specified directory and consists of three pdf files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import pickle
import sys
sys.path.append('..') # Main modules reside in the parent directory.
from absl import app
from absl import flags
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import core as pate
plt.style.use('ggplot')
FLAGS = flags.FLAGS
flags.DEFINE_boolean('cache', False,
'Read results of privacy analysis from cache.')
flags.DEFINE_string('counts_file', None, 'Counts file.')
flags.DEFINE_string('figures_dir', '', 'Path where figures are written to.')
flags.mark_flag_as_required('counts_file')
def run_analysis(votes, mechanism, noise_scale, params):
"""Computes data-dependent privacy.
Args:
votes: A matrix of votes, where each row contains votes in one instance.
mechanism: A name of the mechanism ('lnmax', 'gnmax', or 'gnmax_conf')
noise_scale: A mechanism privacy parameter.
params: Other privacy parameters.
Returns:
Four lists: cumulative privacy cost epsilon, how privacy budget is split,
how many queries were answered, optimal order.
"""
def compute_partition(order_opt, eps):
order_opt_idx = np.searchsorted(orders, order_opt)
if mechanism == 'gnmax_conf':
p = (rdp_select_cum[order_opt_idx],
rdp_cum[order_opt_idx] - rdp_select_cum[order_opt_idx],
-math.log(delta) / (order_opt - 1))
else:
p = (rdp_cum[order_opt_idx], -math.log(delta) / (order_opt - 1))
return [x / eps for x in p] # Ensures that sum(x) == 1
# Short list of orders.
# orders = np.round(np.concatenate((np.arange(2, 50 + 1, 1),
# np.logspace(np.log10(50), np.log10(1000), num=20))))
# Long list of orders.
orders = np.concatenate((np.arange(2, 100 + 1, .5),
np.logspace(np.log10(100), np.log10(500), num=100)))
delta = 1e-8
n = votes.shape[0]
eps_total = np.zeros(n)
partition = [None] * n
order_opt = np.full(n, np.nan, dtype=float)
answered = np.zeros(n, dtype=float)
rdp_cum = np.zeros(len(orders))
rdp_sqrd_cum = np.zeros(len(orders))
rdp_select_cum = np.zeros(len(orders))
answered_sum = 0
for i in range(n):
v = votes[i,]
if mechanism == 'lnmax':
logq_lnmax = pate.compute_logq_laplace(v, noise_scale)
rdp_query = pate.rdp_pure_eps(logq_lnmax, 2. / noise_scale, orders)
rdp_sqrd = rdp_query ** 2
pr_answered = 1
elif mechanism == 'gnmax':
logq_gmax = pate.compute_logq_gaussian(v, noise_scale)
rdp_query = pate.rdp_gaussian(logq_gmax, noise_scale, orders)
rdp_sqrd = rdp_query ** 2
pr_answered = 1
elif mechanism == 'gnmax_conf':
logq_step1 = pate.compute_logpr_answered(params['t'], params['sigma1'], v)
logq_step2 = pate.compute_logq_gaussian(v, noise_scale)
q_step1 = np.exp(logq_step1)
logq_step1_min = min(logq_step1, math.log1p(-q_step1))
rdp_gnmax_step1 = pate.rdp_gaussian(logq_step1_min,
2 ** .5 * params['sigma1'], orders)
rdp_gnmax_step2 = pate.rdp_gaussian(logq_step2, noise_scale, orders)
rdp_query = rdp_gnmax_step1 + q_step1 * rdp_gnmax_step2
# The expression below evaluates
# E[(cost_of_step_1 + Bernoulli(pr_of_step_2) * cost_of_step_2)^2]
rdp_sqrd = (
rdp_gnmax_step1 ** 2 + 2 * rdp_gnmax_step1 * q_step1 * rdp_gnmax_step2
+ q_step1 * rdp_gnmax_step2 ** 2)
rdp_select_cum += rdp_gnmax_step1
pr_answered = q_step1
else:
raise ValueError(
'Mechanism must be one of ["lnmax", "gnmax", "gnmax_conf"]')
rdp_cum += rdp_query
rdp_sqrd_cum += rdp_sqrd
answered_sum += pr_answered
answered[i] = answered_sum
eps_total[i], order_opt[i] = pate.compute_eps_from_delta(
orders, rdp_cum, delta)
partition[i] = compute_partition(order_opt[i], eps_total[i])
if i > 0 and (i + 1) % 1000 == 0:
rdp_var = rdp_sqrd_cum / i - (
rdp_cum / i) ** 2 # Ignore Bessel's correction.
order_opt_idx = np.searchsorted(orders, order_opt[i])
eps_std = ((i + 1) * rdp_var[order_opt_idx]) ** .5 # Std of the sum.
print(
'queries = {}, E[answered] = {:.2f}, E[eps] = {:.3f} (std = {:.5f}) '
'at order = {:.2f} (contribution from delta = {:.3f})'.format(
i + 1, answered_sum, eps_total[i], eps_std, order_opt[i],
-math.log(delta) / (order_opt[i] - 1)))
sys.stdout.flush()
return eps_total, partition, answered, order_opt
def print_plot_small(figures_dir, eps_lap, eps_gnmax, answered_gnmax):
"""Plots a graph of LNMax vs GNMax.
Args:
figures_dir: A name of the directory where to save the plot.
eps_lap: The cumulative privacy costs of the Laplace mechanism.
eps_gnmax: The cumulative privacy costs of the Gaussian mechanism
answered_gnmax: The cumulative count of queries answered.
"""
xlim = 6000
x_axis = range(0, int(xlim), 10)
y_lap = np.zeros(len(x_axis), dtype=float)
y_gnmax = np.full(len(x_axis), np.nan, dtype=float)
for i in range(len(x_axis)):
x = x_axis[i]
y_lap[i] = eps_lap[x]
idx = np.searchsorted(answered_gnmax, x)
if idx < len(eps_gnmax):
y_gnmax[i] = eps_gnmax[idx]
fig, ax = plt.subplots()
fig.set_figheight(4.5)
fig.set_figwidth(4.7)
ax.plot(
x_axis, y_lap, color='r', ls='--', label='LNMax', alpha=.5, linewidth=5)
ax.plot(
x_axis,
y_gnmax,
color='g',
ls='-',
label='Confident-GNMax',
alpha=.5,
linewidth=5)
plt.xticks(np.arange(0, 7000, 1000))
plt.xlim([0, 6000])
plt.ylim([0, 6.])
plt.xlabel('Number of queries answered', fontsize=16)
plt.ylabel(r'Privacy cost $\varepsilon$ at $\delta=10^{-8}$', fontsize=16)
plt.legend(loc=2, fontsize=13) # loc=2 -- upper left
ax.tick_params(labelsize=14)
fout_name = os.path.join(figures_dir, 'lnmax_vs_gnmax.pdf')
print('Saving the graph to ' + fout_name)
fig.savefig(fout_name, bbox_inches='tight')
plt.show()
def print_plot_large(figures_dir, eps_lap, eps_gnmax1, answered_gnmax1,
eps_gnmax2, partition_gnmax2, answered_gnmax2):
"""Plots a graph of LNMax vs GNMax with two parameters.
Args:
figures_dir: A name of the directory where to save the plot.
eps_lap: The cumulative privacy costs of the Laplace mechanism.
eps_gnmax1: The cumulative privacy costs of the Gaussian mechanism (set 1).
answered_gnmax1: The cumulative count of queries answered (set 1).
eps_gnmax2: The cumulative privacy costs of the Gaussian mechanism (set 2).
partition_gnmax2: Allocation of eps for set 2.
answered_gnmax2: The cumulative count of queries answered (set 2).
"""
xlim = 6000
x_axis = range(0, int(xlim), 10)
lenx = len(x_axis)
y_lap = np.zeros(lenx)
y_gnmax1 = np.full(lenx, np.nan, dtype=float)
y_gnmax2 = np.full(lenx, np.nan, dtype=float)
y1_gnmax2 = np.full(lenx, np.nan, dtype=float)
for i in range(lenx):
x = x_axis[i]
y_lap[i] = eps_lap[x]
idx1 = np.searchsorted(answered_gnmax1, x)
if idx1 < len(eps_gnmax1):
y_gnmax1[i] = eps_gnmax1[idx1]
idx2 = np.searchsorted(answered_gnmax2, x)
if idx2 < len(eps_gnmax2):
y_gnmax2[i] = eps_gnmax2[idx2]
fraction_step1, fraction_step2, _ = partition_gnmax2[idx2]
y1_gnmax2[i] = eps_gnmax2[idx2] * fraction_step1 / (
fraction_step1 + fraction_step2)
fig, ax = plt.subplots()
fig.set_figheight(4.5)
fig.set_figwidth(4.7)
ax.plot(
x_axis,
y_lap,
color='r',
ls='dashed',
label='LNMax',
alpha=.5,
linewidth=5)
ax.plot(
x_axis,
y_gnmax1,
color='g',
ls='-',
label='Confident-GNMax (moderate)',
alpha=.5,
linewidth=5)
ax.plot(
x_axis,
y_gnmax2,
color='b',
ls='-',
label='Confident-GNMax (aggressive)',
alpha=.5,
linewidth=5)
ax.fill_between(
x_axis, [0] * lenx,
y1_gnmax2.tolist(),
facecolor='b',
alpha=.3,
hatch='\\')
ax.plot(
x_axis,
y1_gnmax2,
color='b',
ls='-',
label='_nolegend_',
alpha=.5,
linewidth=1)
ax.fill_between(
x_axis, y1_gnmax2.tolist(), y_gnmax2.tolist(), facecolor='b', alpha=.3)
plt.xticks(np.arange(0, 7000, 1000))
plt.xlim([0, xlim])
plt.ylim([0, 1.])
plt.xlabel('Number of queries answered', fontsize=16)
plt.ylabel(r'Privacy cost $\varepsilon$ at $\delta=10^{-8}$', fontsize=16)
plt.legend(loc=2, fontsize=13) # loc=2 -- upper left
ax.tick_params(labelsize=14)
fout_name = os.path.join(figures_dir, 'lnmax_vs_2xgnmax_large.pdf')
print('Saving the graph to ' + fout_name)
fig.savefig(fout_name, bbox_inches='tight')
plt.show()
def run_all_analyses(votes, lambda_laplace, gnmax_parameters, sigma2):
"""Sequentially runs all analyses.
Args:
votes: A matrix of votes, where each row contains votes in one instance.
lambda_laplace: The scale of the Laplace noise (lambda).
gnmax_parameters: A list of parameters for GNMax.
sigma2: Shared parameter for the GNMax mechanisms.
Returns:
Five lists whose length is the number of queries.
"""
print('=== Laplace Mechanism ===')
eps_lap, _, _, _ = run_analysis(votes, 'lnmax', lambda_laplace, None)
print()
# Does not go anywhere, for now
# print('=== Gaussian Mechanism (simple) ===')
# eps, _, _, _ = run_analysis(votes[:n,], 'gnmax', sigma1, None)
eps_gnmax = [[] for p in gnmax_parameters]
partition_gmax = [[] for p in gnmax_parameters]
answered = [[] for p in gnmax_parameters]
order_opt = [[] for p in gnmax_parameters]
for i, p in enumerate(gnmax_parameters):
print('=== Gaussian Mechanism (confident) {}: ==='.format(p))
eps_gnmax[i], partition_gmax[i], answered[i], order_opt[i] = run_analysis(
votes, 'gnmax_conf', sigma2, p)
print()
return eps_lap, eps_gnmax, partition_gmax, answered, order_opt
def main(argv):
del argv # Unused.
lambda_laplace = 50. # corresponds to eps = 1. / lambda_laplace
# Paramaters of the GNMax
gnmax_parameters = ({
't': 1000,
'sigma1': 500
}, {
't': 3500,
'sigma1': 1500
}, {
't': 5000,
'sigma1': 1500
})
sigma2 = 100 # GNMax parameters differ only in Step 1 (selection).
ftemp_name = '/tmp/precomputed.pkl'
figures_dir = os.path.expanduser(FLAGS.figures_dir)
if FLAGS.cache and os.path.isfile(ftemp_name):
print('Reading from cache ' + ftemp_name)
with open(ftemp_name, 'rb') as f:
(eps_lap, eps_gnmax, partition_gmax, answered_gnmax,
orders_opt_gnmax) = pickle.load(f)
else:
fin_name = os.path.expanduser(FLAGS.counts_file)
print('Reading raw votes from ' + fin_name)
sys.stdout.flush()
votes = np.load(fin_name)
(eps_lap, eps_gnmax, partition_gmax,
answered_gnmax, orders_opt_gnmax) = run_all_analyses(
votes, lambda_laplace, gnmax_parameters, sigma2)
print('Writing to cache ' + ftemp_name)
with open(ftemp_name, 'wb') as f:
pickle.dump((eps_lap, eps_gnmax, partition_gmax, answered_gnmax,
orders_opt_gnmax), f)
print_plot_small(figures_dir, eps_lap, eps_gnmax[0], answered_gnmax[0])
print_plot_large(figures_dir, eps_lap, eps_gnmax[1], answered_gnmax[1],
eps_gnmax[2], partition_gmax[2], answered_gnmax[2])
plt.close('all')
if __name__ == '__main__':
app.run(main)
| tensorflow/privacy | research/pate_2018/ICLR2018/rdp_cumulative.py | Python | apache-2.0 | 12,995 | 0.011081 |
import copy
from django import forms
from django.db import models
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.db.models.fields.subclassing import Creator
from djangae.forms.fields import ListFormField
from django.utils.text import capfirst
class _FakeModel(object):
"""
An object of this class can pass itself off as a model instance
when used as an arguments to Field.pre_save method (item_fields
of iterable fields are not actually fields of any model).
"""
def __init__(self, field, value):
setattr(self, field.attname, value)
class IterableField(models.Field):
__metaclass__ = models.SubfieldBase
@property
def _iterable_type(self): raise NotImplementedError()
def db_type(self, connection):
return 'list'
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if value is None:
raise ValueError("You can't query an iterable field with None")
if lookup_type == 'isnull' and value in (True, False):
return value
if lookup_type != 'exact' and lookup_type != 'in':
raise ValueError("You can only query using exact and in lookups on iterable fields")
if isinstance(value, (list, set)):
return [ self.item_field_type.to_python(x) for x in value ]
return self.item_field_type.to_python(value)
def get_prep_value(self, value):
if value is None:
raise ValueError("You can't set a {} to None (did you mean {}?)".format(
self.__class__.__name__, str(self._iterable_type())
))
if isinstance(value, basestring):
# Catch accidentally assigning a string to a ListField
raise ValueError("Tried to assign a string to a {}".format(self.__class__.__name__))
return super(IterableField, self).get_prep_value(value)
def __init__(self, item_field_type, *args, **kwargs):
# This seems bonkers, we shout at people for specifying null=True, but then do it ourselves. But this is because
# *we* abuse None values for our own purposes (to represent an empty iterable) if someone else tries to then
# all hell breaks loose
if kwargs.get("null", False):
raise RuntimeError("IterableFields cannot be set as nullable (as the datastore doesn't differentiate None vs []")
kwargs["null"] = True
default = kwargs.get("default", [])
self._original_item_field_type = copy.deepcopy(item_field_type) # For deconstruction purposes
if default is not None and not callable(default):
kwargs["default"] = lambda: self._iterable_type(default)
if hasattr(item_field_type, 'attname'):
item_field_type = item_field_type.__class__
if callable(item_field_type):
item_field_type = item_field_type()
if isinstance(item_field_type, models.ForeignKey):
raise ImproperlyConfigured("Lists of ForeignKeys aren't supported, use RelatedSetField instead")
self.item_field_type = item_field_type
# We'll be pretending that item_field is a field of a model
# with just one "value" field.
assert not hasattr(self.item_field_type, 'attname')
self.item_field_type.set_attributes_from_name('value')
super(IterableField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IterableField, self).deconstruct()
args = (self._original_item_field_type,)
del kwargs["null"]
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
self.item_field_type.model = cls
self.item_field_type.name = name
super(IterableField, self).contribute_to_class(cls, name)
# If items' field uses SubfieldBase we also need to.
item_metaclass = getattr(self.item_field_type, '__metaclass__', None)
if item_metaclass and issubclass(item_metaclass, models.SubfieldBase):
setattr(cls, self.name, Creator(self))
def _map(self, function, iterable, *args, **kwargs):
return self._iterable_type(function(element, *args, **kwargs) for element in iterable)
def to_python(self, value):
if value is None:
return self._iterable_type([])
# Because a set cannot be defined in JSON, we must allow a list to be passed as the value
# of a SetField, as otherwise SetField data can't be loaded from fixtures
if not hasattr(value, "__iter__"): # Allows list/set, not string
raise ValueError("Tried to assign a {} to a {}".format(value.__class__.__name__, self.__class__.__name__))
return self._map(self.item_field_type.to_python, value)
def pre_save(self, model_instance, add):
"""
Gets our value from the model_instance and passes its items
through item_field's pre_save (using a fake model instance).
"""
value = getattr(model_instance, self.attname)
if value is None:
return None
return self._map(lambda item: self.item_field_type.pre_save(_FakeModel(self.item_field_type, item), add), value)
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
if value is None:
return None
# If the value is an empty iterable, store None
if value == self._iterable_type([]):
return None
return self._map(self.item_field_type.get_db_prep_save, value,
connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Passes the value through get_db_prep_lookup of item_field.
"""
return self.item_field_type.get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
def validate(self, value_list, model_instance):
""" We want to override the default validate method from django.db.fields.Field, because it
is only designed to deal with a single choice from the user.
"""
if not self.editable:
# Skip validation for non-editable fields
return
# Validate choices
if self.choices:
valid_values = []
for choice in self.choices:
if isinstance(choice[0], (list, tuple)):
# this is an optgroup, so look inside it for the options
for optgroup_choice in choice[0]:
valid_values.append(optgroup_choice[0])
else:
valid_values.append(choice[0])
for value in value_list:
if value not in valid_values:
# TODO: if there is more than 1 invalid value then this should show all of the invalid values
raise ValidationError(self.error_messages['invalid_choice'] % value)
# Validate null-ness
if value_list is None and not self.null:
raise ValidationError(self.error_messages['null'])
if not self.blank and not value_list:
raise ValidationError(self.error_messages['blank'])
# apply the default items validation rules
for value in value_list:
self.item_field_type.clean(value, model_instance)
def formfield(self, **kwargs):
""" If this field has choices, then we can use a multiple choice field.
NB: The choices must be set on *this* field, e.g. this_field = ListField(CharField(), choices=x)
as opposed to: this_field = ListField(CharField(choices=x))
"""
#Largely lifted straight from Field.formfield() in django.models.__init__.py
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default(): #No idea what this does
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
form_field_class = forms.MultipleChoiceField
defaults['choices'] = self.get_choices(include_blank=False) #no empty value on a multi-select
else:
form_field_class = ListFormField
defaults.update(**kwargs)
return form_field_class(**defaults)
class ListField(IterableField):
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r." % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
value = super(ListField, self).pre_save(model_instance, add)
if value and self.ordering:
value.sort(key=self.ordering)
return value
@property
def _iterable_type(self):
return list
def deconstruct(self):
name, path, args, kwargs = super(ListField, self).deconstruct()
kwargs['ordering'] = self.ordering
return name, path, args, kwargs
class SetField(IterableField):
@property
def _iterable_type(self):
return set
def db_type(self, connection):
return 'set'
def get_db_prep_save(self, *args, **kwargs):
ret = super(SetField, self).get_db_prep_save(*args, **kwargs)
if ret:
ret = list(ret)
return ret
def get_db_prep_lookup(self, *args, **kwargs):
ret = super(SetField, self).get_db_prep_lookup(*args, **kwargs)
if ret:
ret = list(ret)
return ret
def value_to_string(self, obj):
"""
Custom method for serialization, as JSON doesn't support
serializing sets.
"""
return str(list(self._get_val_from_obj(obj)))
| nealedj/djangae | djangae/fields/iterable.py | Python | bsd-3-clause | 10,309 | 0.003104 |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all request handlers.
Provides functionality useful to all request handlers, including extraction and
validation of request parameters.
"""
import os
import urllib2
# pylint: disable-msg=C6204
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class Error(urllib2.HTTPError):
"""Base class for all exceptions defined in this module."""
pass
class MissingRequiredParameterError(Error):
"""Raised when the request is missing a required parameter."""
def __init__(self, parameter_name):
msg = 'Request missing required parameter: %s' % parameter_name
Error.__init__(self, code=400, msg=msg, url='', hdrs='', fp=None)
class InvalidIntValueError(Error):
"""Raised when a request parameter is expected to be an int, but it isn't."""
def __init__(self, parameter_name, parameter_value):
msg = ('The specified value for parameter "%s" is not '
'a valid int: %s' % (parameter_name, parameter_value))
Error.__init__(self, code=400, msg=msg, url='', hdrs='', fp=None)
class InvalidParameterValueError(Error):
"""Raised when a request parameter has an invalid value."""
def __init__(self, parameter_name, parameter_value):
msg = ('The specified value for parameter "%s" is not '
'valid: %s' % (parameter_name, parameter_value))
Error.__init__(self, code=400, msg=msg, url='', hdrs='', fp=None)
class BaseHandler(webapp.RequestHandler):
"""Base class for the application handlers.
Defines common functionality used by various handlers. As a rule of thumb,
most code that extracts and validates parameters from the request belongs to
this class.
If any of the validations fails, one of the exceptions defined in this module
is raised; all of which inherits from the Error class, also defined in this
module.
The most basic type of retrieval is to retrieve an optional str
argument from the request. This is accomplished by calling
GetOptionalParameter, for example:
value = self.GetOptionalParameter('optional_param_name')
value = self.GetOptionalParameter('optional_param_name', 'default_value')
If the parameter is required by the request handler, this can be enforced
by calling GetRequiredParameter, for example
value = self.GetRequiredParameter('required_param_name')
In addition to enforcing whether a parameter is required or not, there are
variations to enforce the parameter value is of a specific type. Some of
the methods we have implemented at the moment retrieve an optional int
and a required URL, for example:
# Note that 10 is just an optional default value.
value = self.GetOptionalIntParameter('int_parameter_name', 10)
"""
def GetRequiredParameter(self, parameter_name):
"""Retrieves the value of a required request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
Returns:
The value of the specified parameter as a str.
Raises:
MissingRequiredParameterError: The specified parameter was not found in
the request.
"""
str_value = self.GetOptionalParameter(parameter_name)
if not str_value:
raise MissingRequiredParameterError(parameter_name)
return str_value
def GetOptionalParameter(self, parameter_name, default_value=None):
"""Retrieves the value of an optional request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
default_value: Value to return if the parameter is not found.
Returns:
The value of the specified parameter as a str, or default_value
if the parameter was not present in the request.
"""
return self.request.get(parameter_name, default_value)
def GetOptionalIntParameter(self, parameter_name, default_value):
"""Retrieves the value of an optional request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
default_value: Value to return if the parameter is not found.
Returns:
An int object with the value of the specified parameter as a str.
Raises:
InvalidIntValueError: The value of the specified parameter is
not a valid integer number.
"""
str_value = self.GetOptionalParameter(parameter_name)
# If the following line raises a ValueError, the calling code
# has a bug where they passed an invalid default_value. We let
# that exception propagate, causing a 500 response to client and
# sufficient error logging.
if not str_value:
return int(default_value)
try:
return int(str_value)
except ValueError:
raise InvalidIntValueError(parameter_name, str_value)
def RenderTemplate(self, name, template_args):
"""Renders the specified django template.
Assumes the hander and templates are on different folders:
- root
- handlers
- templates
Args:
name: Str name of the file template.
template_args: Dict argument passed to the template.
"""
path = os.path.join(os.path.dirname(__file__), '..', 'templates', name)
self.response.out.write(template.render(path, template_args))
# Register filter
template.register_template_library(
'filters.filters')
| wonderful4228/qualitybots | src/appengine/handlers/base.py | Python | apache-2.0 | 5,928 | 0.003036 |
# Generated by Django 2.2.11 on 2020-03-12 11:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("documents", "0003_auto_20200122_1624"),
]
operations = [
migrations.RemoveField(
model_name="document",
name="flattr_disabled",
),
]
| studentenportal/web | apps/documents/migrations/0004_remove_document_flattr_disabled.py | Python | agpl-3.0 | 342 | 0 |
from flask import Response
from flask.views import View
from urllib2 import urlopen
from gpv import utils
class ProcurementType(View):
def dispatch_request(self, komuna=None, year=None, company_slug=None):
api_base_url = utils.get_api_url()
url = "%s/procurement-type" % api_base_url
result = []
if komuna != None and year != None:
url = url + "/%s/%d" % (komuna, year)
result = urlopen(url).read()
elif company_slug != None:
url = url + "/%s" % (company_slug)
result = urlopen(url).read()
# Build response object.
resp = Response(
response=result,
mimetype='application/json')
# Return response.
return resp
| opendatakosovo/municipality-procurement-visualizer | gpv/views/json/procurementtype.py | Python | gpl-2.0 | 764 | 0.005236 |
# -*- coding: utf-8 -*-
#
# DiracDocs documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 25 17:34:37 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
import subprocess
sys.path.insert(0, ".")
try:
import fakeEnvironment
except ImportError:
pass
try:
import fakeEnv
except ImportError:
pass
diracRelease = os.environ.get( 'DIRACVERSION', 'integration' )
if os.environ.get('READTHEDOCS') == 'True':
diracRelease = os.path.basename( os.path.abspath( "../../" ) )
if diracRelease.startswith("rel-"):
diracRelease = diracRelease[4:]
print 'conf.py: %s as DIRACVERSION' % diracRelease
#...............................................................................
# configuration
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if os.environ.get('READTHEDOCS') == 'True':
sys.path.append(os.path.abspath('.'))
diracPath = os.path.abspath( os.path.join( os.getcwd(), "../..") )
print "DiracPath",diracPath
buildfolder ="_build"
try:
os.mkdir( os.path.abspath( "../"+buildfolder) )
except:
pass
##We need to have the DIRAC module somewhere, or we cannot import it, as readtheDocs clones the repo into something based on the branchname
if not os.path.exists( "../../DIRAC" ):
diracLink = os.path.abspath( os.path.join( os.getcwd() , "../" , buildfolder, "DIRAC" ) )
print "DiracLink",diracLink
if not os.path.exists( diracLink ):
RES = subprocess.check_output( ["ln","-s", diracPath, diracLink ] )
diracPath = os.path.abspath( os.path.join( diracLink, ".." ) )
sys.path.insert(0, diracPath)
for path in sys.path:
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '')+":"+path
## this is not working at the moment because the DIRAC folder is not found by the buildScriptsDOC script
# print "Pythonpath",os.environ['PYTHONPATH']
# buildCommand = os.path.join( os.getcwd() , "../Tools/buildScriptsDOC.py" )
# scriptdir = os.path.abspath(os.path.join( os.getcwd() , "../", buildfolder, "scripts" ))
# try:
# os.mkdir( scriptdir )
# except:
# pass
# print "command", buildCommand
# code = subprocess.Popen( ["python", buildCommand, scriptdir ], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout , err = code.communicate()
# print "script",stdout
# print "script",err
os.environ["DIRAC"] = diracPath
print "DIRAC ENVIRON", os.environ["DIRAC"]
##singlehtml build needs too much memory, so we need to create less code documentation
buildtype = "limited" if any("singlehtml" in arg for arg in sys.argv ) else "full"
print "Chosing build type:", buildtype
buildCommand =os.path.join( os.getcwd() , "../Tools/MakeDoc.py" )
code = subprocess.Popen( ["python",buildCommand, buildtype], env = os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout , err = code.communicate()
print "code",stdout
print "code",err
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DIRAC'
copyright = u'%s, DIRAC Project' % datetime.datetime.utcnow().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = diracRelease
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%H:%M %d/%m/%Y %Z'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
#ADRI: Ignore old stuff that is not included in the compilation
exclude_trees = [ 'AdministratorGuide/Configuration/ConfigurationReference' ]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
html_style = 'dirac.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {
# 'sidebarbgcolor':'#D5E2F2'
#}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "DIRAC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/DIRAC-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d/%m/%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DiracDocsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DiracDocs.tex', u'DIRAC Documentation',
u'DIRAC Project.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
## link with the python standard library docs
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7', None),
}
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| arrabito/DIRAC | docs/source/conf.py | Python | gpl-3.0 | 9,724 | 0.015014 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module contains Indexed mixin class"""
import itertools
from collections import namedtuple
from sqlalchemy import inspect, orm
from ggrc import db
from ggrc import fulltext
ReindexRule = namedtuple("ReindexRule", ["model", "rule"])
# pylint: disable=too-few-public-methods
class Indexed(object):
"""Mixin for Index And auto reindex current model instance"""
AUTO_REINDEX_RULES = [
# Usage: ReindexRule("ModelName", lambda x: x.value)
]
PROPERTY_TEMPLATE = u"{}"
def delete_record(self):
fulltext.get_indexer().delete_record(
self.id,
self.__class__.__name__,
False
)
def create_record(self):
indexer = fulltext.get_indexer()
indexer.create_record(indexer.fts_record_for(self), False)
def update_indexer(self):
"""Update indexer for current instance"""
if self.__class__.__name__ not in fulltext.get_indexed_model_names():
return
self.delete_record()
self.create_record()
def get_reindex_pair(self):
return (self.__class__.__name__, self.id)
@classmethod
def get_insert_query_for(cls, ids):
"""Return insert class record query. It will return None, if it's empty."""
if not ids:
return
instances = cls.indexed_query().filter(cls.id.in_(ids))
indexer = fulltext.get_indexer()
keys = inspect(indexer.record_type).c
records = (indexer.fts_record_for(i) for i in instances)
rows = itertools.chain(*[indexer.records_generator(i) for i in records])
values = [{c.name: getattr(r, a) for a, c in keys.items()} for r in rows]
if values:
return indexer.record_type.__table__.insert().values(values)
@classmethod
def get_delete_query_for(cls, ids):
"""Return delete class record query. If ids are empty, will return None."""
if not ids:
return
indexer = fulltext.get_indexer()
return indexer.record_type.__table__.delete().where(
indexer.record_type.type == cls.__name__
).where(
indexer.record_type.key.in_(ids)
)
@classmethod
def bulk_record_update_for(cls, ids):
"""Bulky update index records for current class"""
delete_query = cls.get_delete_query_for(ids)
insert_query = cls.get_insert_query_for(ids)
for query in [delete_query, insert_query]:
if query is not None:
db.session.execute(query)
@classmethod
def indexed_query(cls):
return cls.query.options(
orm.Load(cls).load_only("id"),
)
| AleksNeStu/ggrc-core | src/ggrc/fulltext/mixin.py | Python | apache-2.0 | 2,559 | 0.007816 |
import glob
import os
import shutil
import sys
import unittest
import warnings
from test.test_support import run_unittest, TESTFN
def fsdecode(s):
return unicode(s, sys.getfilesystemencoding())
class GlobTests(unittest.TestCase):
def norm(self, *parts):
return os.path.normpath(os.path.join(self.tempdir, *parts))
def mktemp(self, *parts):
filename = self.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
os.makedirs(base)
f = open(filename, 'w')
f.close()
def setUp(self):
self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('.aa', 'G')
self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if hasattr(os, 'symlink'):
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink('broken', self.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), self.norm('sym3'))
def tearDown(self):
try:
shutil.rmtree(self.tempdir)
except OSError:
warnings.warn("Failed to remove " + self.tempdir)
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertEqual(list(glob.iglob(p)), res)
ures = [fsdecode(x) for x in res]
self.assertEqual(glob.glob(fsdecode(p)), ures)
self.assertEqual(list(glob.iglob(fsdecode(p))), ures)
return res
def assertSequencesEqual_noorder(self, l1, l2):
l1 = list(l1)
l2 = list(l2)
self.assertEqual(set(l1), set(l2))
self.assertEqual(sorted(l1), sorted(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a'), [self.norm('a')])
eq(self.glob('a', 'D'), [self.norm('a', 'D')])
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
res = glob.glob('*')
# For a clean checkout, the next two assertions would never
# have failed, even with the change with Jython in
# https://hg.python.org/jython/rev/ea036792f304
#
# But for developers playing with things, we should not have
# it fail either
self.assertLessEqual({type(r) for r in res}, {str, unicode})
res = glob.glob(os.path.join(os.curdir, '*'))
self.assertLessEqual({type(r) for r in res}, {str, unicode})
# test return types are unicode, but only if os.listdir
# returns unicode filenames
tmp = os.listdir(fsdecode(os.curdir))
if {type(x) for x in tmp} == {unicode}:
res = glob.glob(u'*')
self.assertEqual({type(r) for r in res}, {unicode})
res = glob.glob(os.path.join(fsdecode(os.curdir), u'*'))
self.assertEqual({type(r) for r in res}, {unicode})
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
eq(self.glob('.*'), map(self.norm, ['.aa', '.bb']))
eq(self.glob('?aa'), map(self.norm, ['aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
def test_glob_nested_directory(self):
eq = self.assertSequencesEqual_noorder
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
self.norm('a', 'bcd', 'efg')])
eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('*', 'D'), [self.norm('a', 'D')])
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'),
self.norm('aab', 'F')])
def test_glob_directory_with_trailing_slash(self):
# Patterns ending with a slash shouldn't match non-dirs
res = glob.glob(self.norm('Z*Z') + os.sep)
self.assertEqual(res, [])
res = glob.glob(self.norm('ZZZ') + os.sep)
self.assertEqual(res, [])
# When there is a wildcard pattern which ends with os.sep, glob()
# doesn't blow up.
res = glob.glob(self.norm('aa*') + os.sep)
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{self.norm('aaa'), self.norm('aab')},
{self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
])
def test_glob_unicode_directory_with_trailing_slash(self):
# Same as test_glob_directory_with_trailing_slash, but with an
# unicode argument.
res = glob.glob(fsdecode(self.norm('Z*Z') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('ZZZ') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('aa*') + os.sep))
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{fsdecode(self.norm('aaa')), fsdecode(self.norm('aab'))},
{fsdecode(self.norm('aaa') + os.sep),
fsdecode(self.norm('aab') + os.sep)},
])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym3'), [self.norm('sym3')])
eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'),
self.norm('sym3', 'efg')])
self.assertIn(self.glob('sym3' + os.sep),
[[self.norm('sym3')], [self.norm('sym3') + os.sep]])
eq(self.glob('*', '*F'),
[self.norm('aaa', 'zzzF'), self.norm('aab', 'F'),
self.norm('sym3', 'EF')])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_broken_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'),
self.norm('sym3')])
eq(self.glob('sym1'), [self.norm('sym1')])
eq(self.glob('sym2'), [self.norm('sym2')])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_glob_magic_in_drive(self):
eq = self.assertSequencesEqual_noorder
eq(glob.glob('*:'), [])
eq(glob.glob(u'*:'), [])
eq(glob.glob('?:'), [])
eq(glob.glob(u'?:'), [])
def test_main():
run_unittest(GlobTests)
if __name__ == "__main__":
test_main()
| alvin319/CarnotKE | jyhton/Lib/test/test_glob.py | Python | apache-2.0 | 7,373 | 0 |
# -*- coding: utf-8 -*-
def before_related(adminform):
adminform.fieldsets_before = adminform.fieldsets
adminform.fieldsets_after = []
try:
adminform.fieldsets_before = adminform.fieldsets[:adminform.fieldsets.index(('related_go_here', {'fields': []}))]
adminform.fieldsets_after = adminform.fieldsets[adminform.fieldsets.index(('related_go_here', {'fields': []}))+1:]
adminform.fieldsets = adminform.fieldsets_before
return adminform
except:
return adminform
def after_related(adminform):
try:
adminform.fieldsets = adminform.fieldsets_after
adminform.fieldsets_before = adminform.fieldsets[:adminform.fieldsets.index(('related_go_here', {'fields': []}))]
adminform.fieldsets_after = adminform.fieldsets[adminform.fieldsets.index(('related_go_here', {'fields': []}))+1:]
adminform.fieldsets = adminform.fieldsets_after
return adminform
except:
return adminform
| Depado/starmato-admin | starmato/admin/templatetags/_fieldset_related.py | Python | mit | 976 | 0.008197 |
#!/usr/bin/python
"""
title : testtermopi.py
description : This program runs the termopi.py
: Displays the status of the resources (cpu load and memory usage) consumed by a Raspberry Pi
computer and the resources consumed by one or more containers instantiated in the Pi.
source :
author : Carlos Molina-Jimenez (Carlos.Molina@cl.cam.ac.uk)
date : 27 Mar 2017
institution : Computer Laboratory, University of Cambridge
version : 1.0
usage :
notes :
compile and run : % python termopi.py
: It imports pidict.py, dockerctl.py and picheck.py which are found in
: ./modules.
: You need to include "./modules" in the PYTHONPATH environment variable to
: indicate python where to find the pidict.py, dockerctl.py and picheck.py.
: For example, in a bash shell, you need to include the following lines
: in your .bash_profile file located in you home directory (you can see it with
: (# ls -la).
:
: PYTHONPATH="./modules"
: export PYTHONPATH
python_version : Python 2.7.12
====================================================
"""
from modules.tools.termopi import termopi # class with dictionary data structure
# Threshold of cpu exhaustion
cpuUsageThreshold= 50
cpuLoadThreshold= 3
termo= termopi()
termo.prt_pi_resources()
termo.create_jsonfile_with_pi_status()
#termo.check_pi_resource_status(cpuUsageThreshold)
| AdL1398/PiCasso | source/modules/tester/testtermopi.py | Python | mit | 1,603 | 0.009981 |
from setuptools import setup
reqs = [
'myhdl>=0.9.0',
'click',
'wrapt'
]
test_reqs = ['pytest', 'hypothesis']
requires = {
'setup_requires': ['setuptools_scm'],
'install_requires': reqs,
'tests_require': test_reqs,
'extras_require': {
'testing': test_reqs,
}
}
setup(
name='uhdl',
use_scm_version=True,
description='Python Hardware Description for Humans.',
long_description=open('README.md').read(),
url='https://github.com/jck/uhdl',
author='Keerthan Jaic',
author_email='jckeerthan@gmail.com',
license="BSD",
packages=['uhdl'],
entry_points={
'console_scripts': [
'uhdl = uhdl.cli:cli'
]
},
zip_safe=False,
classifiers=[
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)'
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
keywords='myhdl uhdl',
**requires
)
| jck/uhdl | setup.py | Python | bsd-3-clause | 1,201 | 0 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import seresnext_net
from seresnext_test_base import TestResnetBase, DeviceType
from functools import partial
class TestResnetGPU(TestResnetBase):
def test_seresnext_with_learning_rate_decay(self):
# NOTE(zcd): This test is compare the result of use parallel_executor
# and executor, and the result of drop_out op and batch_norm op in
# this two executor have diff, so the two ops should be removed
# from the model.
check_func = partial(
self.check_network_convergence,
optimizer=seresnext_net.optimizer,
use_parallel_executor=False)
self._compare_result_with_origin_model(
check_func, use_device=DeviceType.CUDA, compare_seperately=False)
if __name__ == '__main__':
unittest.main()
| PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_base_gpu.py | Python | apache-2.0 | 1,457 | 0 |
"""SCons.Variables.PathVariable
This file defines an option type for SCons implementing path settings.
To be used whenever a a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined
validators are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which
should return True or False to indicate if the path
is valid. The arguments to the validator function
are: (key, val, env). The key is the name of the
option, the val is the path specified for the option,
and the env is the env to which the Otions have been
added.
Usage example:
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PathVariable.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass(object):
def PathAccept(self, key, val, env):
"""Accepts any path, no checking done."""
pass
def PathIsDir(self, key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathIsDirCreate(self, key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
def PathIsFile(self, key, val, env):
"""validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathExists(self, key, val, env):
"""validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| stefanklug/mapnik | scons/scons-local-2.3.6/SCons/Variables/PathVariable.py | Python | lgpl-2.1 | 5,646 | 0.000886 |
# List of modules to import when celery starts.
# CELERY_IMPORTS = ('libcloud_sandbox.tasks.code_execute', )
# Result store settings.
CELERY_RESULT_BACKEND = 'database'
CELERY_RESULT_DBURI = 'sqlite:///mydatabase.db'
# Broker settings.
BROKER_TRANSPORT = 'sqlalchemy'
BROKER_HOST = 'sqlite:///tasks.db'
BROKER_PORT = 5672
BROKER_VHOST = '/'
BROKER_USER = 'guest'
BROKER_PASSWORD = 'guest'
## Worker settings
CELERYD_CONCURRENCY = 1
CELERYD_TASK_TIME_LIMIT = 20
# CELERYD_LOG_FILE = 'celeryd.log'
CELERYD_LOG_LEVEL = 'INFO' | texib/bitcoin-zoo | test/celeryconfig.py | Python | mit | 525 | 0.00381 |
#!/adsc/DDEA_PROTO/bin/python
from df_data_analysis_ddea import ddea_analysis
from datetime import datetime
import traceback
import sys
if __name__ == '__main__':
try:
if 3 <= len(sys.argv):
###urls = open(sys.argv[1]).readlines()
start_time = sys.argv[1]
end_time = sys.argv[2]
stime = datetime.strptime(start_time, "%y-%m-%d")
etime = datetime.strptime(end_time, "%y-%m-%d")
ddea_analysis('', stime, etime)
else:
raise "Invalid Arguments"
except:
print traceback.print_exc()
print("Example: %s 14-01-01 14-02-02" % sys.argv[0])
raise SystemExit
| TinyOS-Camp/DDEA-DEV | Archive/[14_10_11] Dr_Jung_Update/ddea_cli.py | Python | gpl-2.0 | 688 | 0.002907 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Custom `Jinja2` extensions."""
from jinja2 import nodes
from jinja2.ext import Extension
from flask import g
class LangExtension(Extension):
"""Ease transition from legacy templates using ``<lang>...</lang>``."""
tags = set(['lang'])
def parse(self, parser):
"""Parse the template."""
lineno = parser.stream.next().lineno
body = parser.parse_statements(['name:endlang'], drop_needle=True)
return nodes.CallBlock(self.call_method('_lang'),
[], [], body).set_lineno(lineno)
@staticmethod
def _lang(caller):
"""Return current language string using `filter_languages`."""
from invenio.modules.formatter.engine import filter_languages
return filter_languages('<lang>' + caller() + '</lang>', g.ln)
| MSusik/invenio | invenio/ext/template/extensions.py | Python | gpl-2.0 | 1,614 | 0.008055 |
#!/usr/bin/env python
from .HTMLElement import HTMLElement
class HTMLFieldSetElement(HTMLElement):
def __init__(self, doc, tag):
HTMLElement.__init__(self, doc, tag)
@property
def form(self):
pass
| tweemeterjop/thug | thug/DOM/W3C/HTML/HTMLFieldSetElement.py | Python | gpl-2.0 | 229 | 0 |
#!/usr/bin/env python
#
# Copyright 2012 Communications Engineering Lab (CEL) / KIT (Karlsruhe Institute of Technology)
# Author: Felix Wunsch
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
#
from gnuradio import gr, gr_unittest
import drm
#import drm_swig
class qa_add_tailbits_vbvb (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.src = gr.vector_source_b((1,1,0,1), True, 4)
self.head = gr.head(4,3)
self.add_tailbits = drm.add_tailbits_vbvb(4,2)
self.snk = gr.vector_sink_b(6)
self.tb.connect(self.src, self.head, self.add_tailbits, self.snk)
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
self.assertTupleEqual(self.snk.data(), (1,1,0,1,0,0,1,1,0,1,0,0,1,1,0,1,0,0))
if __name__ == '__main__':
gr_unittest.main ()
| fewu/gnuradio_drm | gr-drm/python/qa_drm_add_tailbits_vbvb.py | Python | gpl-3.0 | 1,583 | 0.025268 |
from cmdtest import Program, assert_hook
echo = Program('echo')
@echo.test
def echo_string_should_output_string():
assert echo('foo').out == 'foo\n'
if __name__ == '__main__':
echo.run()
| chromy/cmdtest | examples/echo.py | Python | mit | 198 | 0.010101 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Database models for collections."""
# General imports.
import re
from operator import itemgetter
from flask import g, url_for
from intbitset import intbitset
from invenio.base.globals import cfg
from invenio.base.i18n import _, gettext_set_language
from invenio.ext.sqlalchemy import db
from invenio.ext.sqlalchemy.utils import attribute_multi_dict_collection
from invenio.modules.formatter.registry import output_formats
from invenio.modules.search.models import Field, Fieldvalue
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm.collections import attribute_mapped_collection
from werkzeug.utils import cached_property
# Create your models here.
external_collection_mapper = attribute_multi_dict_collection(
creator=lambda k, v: CollectionExternalcollection(type=k,
externalcollection=v),
key_attr=lambda obj: obj.type,
val_attr=lambda obj: obj.externalcollection)
class Collection(db.Model):
"""Represent a Collection record."""
def __repr__(self):
"""Return class representation."""
return 'Collection <id: {0.id}, name: {0.name}, dbquery: {0.query}, ' \
'nbrecs: {0.nbrecs}>'.format(self)
def __unicode__(self):
suffix = ' ({0})'.format(_('default')) if self.id == 1 else ''
return u"{0.id}. {0.name}{1}".format(self, suffix)
def __str__(self):
return unicode(self).encode('utf-8')
__tablename__ = 'collection'
id = db.Column(db.MediumInteger(9, unsigned=True),
primary_key=True)
name = db.Column(db.String(255), unique=True, index=True,
nullable=False)
dbquery = db.Column(db.Text(20), nullable=True,
index=True)
@property
def nbrecs(self):
"""Number of records in the collection."""
from .cache import get_collection_nbrecs
return get_collection_nbrecs(self.name)
@property
def reclist(self):
"""Return hit set with record identifiers."""
from .cache import get_collection_reclist
return get_collection_reclist(self.name)
@property
def is_hosted(self):
"""Return True if collection is hosted elsewhere."""
return self.dbquery.startswith('hostedcollection:') if self.dbquery \
else False
_names = db.relationship(lambda: Collectionname,
backref='collection',
collection_class=attribute_mapped_collection(
'ln_type'),
cascade="all, delete, delete-orphan")
names = association_proxy(
'_names', 'value',
creator=lambda k, v: Collectionname(ln_type=k, value=v)
)
_boxes = db.relationship(lambda: Collectionboxname,
backref='collection',
collection_class=attribute_mapped_collection(
'ln_type'),
cascade="all, delete, delete-orphan")
boxes = association_proxy(
'_boxes', 'value',
creator=lambda k, v: Collectionboxname(ln_type=k, value=v)
)
_formatoptions = association_proxy('formats', 'format')
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def formatoptions(self):
"""Return list of format options."""
if len(self._formatoptions):
return [dict(f) for f in self._formatoptions]
else:
return [{'code': u'hb',
'name': _("HTML %(format)s", format=_("brief")),
'content_type': u'text/html'}]
formatoptions = property(formatoptions)
_examples_example = association_proxy('_examples', 'example')
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def examples(self):
"""Return list of example queries."""
return list(self._examples_example)
@property
def name_ln(self):
from invenio.legacy.search_engine import get_coll_i18nname
return get_coll_i18nname(self.name,
getattr(g, 'ln', cfg['CFG_SITE_LANG']))
# Another possible implementation with cache memoize
# @cache.memoize
# try:
# return db.object_session(self).query(Collectionname).\
# with_parent(self).filter(db.and_(Collectionname.ln==g.ln,
# Collectionname.type=='ln')).first().value
# except Exception:
# return self.name
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def portalboxes_ln(self):
return db.object_session(self).query(CollectionPortalbox).\
with_parent(self).\
options(db.joinedload_all(CollectionPortalbox.portalbox)).\
filter(CollectionPortalbox.ln == g.ln).\
order_by(db.desc(CollectionPortalbox.score)).all()
@property
def most_specific_dad(self):
results = sorted(
db.object_session(self).query(Collection).join(
Collection.sons
).filter(CollectionCollection.id_son == self.id).all(),
key=lambda c: c.nbrecs)
return results[0] if len(results) else None
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def is_restricted(self):
"""Return ``True`` if the collection is restricted."""
from invenio.legacy.search_engine import collection_restricted_p
return collection_restricted_p(self.name)
@property
def type(self):
"""Return relation type."""
p = re.compile("\d+:.*")
if self.dbquery is not None and \
p.match(self.dbquery.lower()):
return 'r'
else:
return 'v'
_collection_children = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: Collection.id == CollectionCollection.id_dad,
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
_collection_children_r = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: db.and_(
Collection.id == CollectionCollection.id_dad,
CollectionCollection.type == 'r'),
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
_collection_children_v = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: db.and_(
Collection.id == CollectionCollection.id_dad,
CollectionCollection.type == 'v'),
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
collection_parents = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: Collection.id == CollectionCollection.id_son,
foreign_keys=lambda: CollectionCollection.id_son,
order_by=lambda: db.asc(CollectionCollection.score)
)
collection_children = association_proxy('_collection_children', 'son')
collection_children_r = association_proxy(
'_collection_children_r', 'son',
creator=lambda son: CollectionCollection(id_son=son.id, type='r')
)
collection_children_v = association_proxy(
'_collection_children_v', 'son',
creator=lambda son: CollectionCollection(id_son=son.id, type='v')
)
_externalcollections = db.relationship(
lambda: CollectionExternalcollection,
cascade="all, delete, delete-orphan"
)
def _externalcollections_type(type_):
return association_proxy(
'_externalcollections_' + str(type_),
'externalcollection',
creator=lambda ext: CollectionExternalcollection(
externalcollection=ext, type=type_))
externalcollections_0 = _externalcollections_type(0)
externalcollections_1 = _externalcollections_type(1)
externalcollections_2 = _externalcollections_type(2)
externalcollections = db.relationship(
lambda: CollectionExternalcollection,
collection_class=external_collection_mapper,
cascade="all, delete, delete-orphan"
)
# Search options
def _make_field_fieldvalue(type_):
return db.relationship(
lambda: CollectionFieldFieldvalue,
primaryjoin=lambda: db.and_(
Collection.id == CollectionFieldFieldvalue.id_collection,
CollectionFieldFieldvalue.type == type_),
order_by=lambda: CollectionFieldFieldvalue.score)
_search_within = _make_field_fieldvalue('sew')
_search_options = _make_field_fieldvalue('seo')
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def search_within(self):
"""
Collect search within options.
"""
default = [('', _('any field'))]
found = [(o.field.code, o.field.name_ln) for o in self._search_within]
if not found:
found = [(f.name.replace(' ', ''), f.name_ln)
for f in Field.query.filter(Field.name.in_(
cfg['CFG_WEBSEARCH_SEARCH_WITHIN'])).all()]
return default + sorted(found, key=itemgetter(1))
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def search_options(self):
"""Return search options."""
return self._search_options
@cached_property
def ancestors(self):
"""Get list of parent collection ids."""
output = set([self])
for c in self.dads:
output |= c.dad.ancestors
return output
@cached_property
def ancestors_ids(self):
"""Get list of parent collection ids."""
output = intbitset([self.id])
for c in self.dads:
ancestors = c.dad.ancestors_ids
if self.id in ancestors:
raise
output |= ancestors
return output
@cached_property
def descendants_ids(self):
"""Get list of child collection ids."""
output = intbitset([self.id])
for c in self.sons:
descendants = c.son.descendants_ids
if self.id in descendants:
raise
output |= descendants
return output
# Gets the list of localized names as an array
collection_names = db.relationship(
lambda: Collectionname,
primaryjoin=lambda: Collection.id == Collectionname.id_collection,
foreign_keys=lambda: Collectionname.id_collection
)
def translation(self, lang):
"""Get the translation according to the language code."""
try:
return db.object_session(self).query(Collectionname).\
with_parent(self).filter(db.and_(
Collectionname.ln == lang,
Collectionname.type == 'ln'
)).first().value
except Exception:
return ""
@property
def sort_methods(self):
"""Get sort methods for collection.
If not sort methods are defined for a collection the root collections
sort methods are retuned. If not methods are defined for the root
collection, all possible sort methods are returned.
Note: Noth sorting methods and ranking methods are now defined via
the sorter.
"""
from invenio.modules.sorter.models import BsrMETHOD, \
Collection_bsrMETHOD
for coll_id in (self.id, 1):
methods = Collection_bsrMETHOD.query.filter_by(
id_collection=coll_id
).order_by(
Collection_bsrMETHOD.score
).options(
db.joinedload(Collection_bsrMETHOD.bsrMETHOD)
).all()
if len(methods) > 0:
return map(lambda obj: obj.bsrMETHOD, methods)
return BsrMETHOD.query.order_by(BsrMETHOD.name).all()
def get_collectionbox_name(self, ln=None, box_type="r"):
"""Return collection-specific labelling subtrees.
- 'Focus on': regular collection
- 'Narrow by': virtual collection
- 'Latest addition': boxes
If translation for given language does not exist, use label
for CFG_SITE_LANG. If no custom label is defined for
CFG_SITE_LANG, return default label for the box.
:param ln: the language of the label
:param box_type: can be 'r' (=Narrow by), 'v' (=Focus on),
'l' (=Latest additions)
"""
if ln is None:
ln = g.ln
collectionboxnamequery = db.object_session(self).query(
Collectionboxname).with_parent(self)
try:
collectionboxname = collectionboxnamequery.filter(db.and_(
Collectionboxname.ln == ln,
Collectionboxname.type == box_type,
)).one()
except Exception:
try:
collectionboxname = collectionboxnamequery.filter(db.and_(
Collectionboxname.ln == ln,
Collectionboxname.type == box_type,
)).one()
except Exception:
collectionboxname = None
if collectionboxname is None:
# load the right message language
_ = gettext_set_language(ln)
return _(Collectionboxname.TYPES.get(box_type, ''))
else:
return collectionboxname.value
portal_boxes_ln = db.relationship(
lambda: CollectionPortalbox,
collection_class=ordering_list('score'),
primaryjoin=lambda:
Collection.id == CollectionPortalbox.id_collection,
foreign_keys=lambda: CollectionPortalbox.id_collection,
order_by=lambda: db.asc(CollectionPortalbox.score))
def breadcrumbs(self, builder=None, ln=None):
"""Return breadcrumbs for collection."""
ln = cfg.get('CFG_SITE_LANG') if ln is None else ln
breadcrumbs = []
# Get breadcrumbs for most specific dad if it exists.
if self.most_specific_dad is not None:
breadcrumbs = self.most_specific_dad.breadcrumbs(builder=builder,
ln=ln)
if builder is not None:
crumb = builder(self)
else:
crumb = dict(
text=self.name_ln,
url=url_for('collections.collection', name=self.name))
breadcrumbs.append(crumb)
return breadcrumbs
class Collectionname(db.Model):
"""Represent a Collectionname record."""
__tablename__ = 'collectionname'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False, primary_key=True)
ln = db.Column(db.Char(5), nullable=False, primary_key=True,
server_default='')
type = db.Column(db.Char(3), nullable=False, primary_key=True,
server_default='sn')
value = db.Column(db.String(255), nullable=False)
@db.hybrid_property
def ln_type(self):
return (self.ln, self.type)
@ln_type.setter
def set_ln_type(self, value):
(self.ln, self.type) = value
class Collectionboxname(db.Model):
"""Represent a Collectionboxname record."""
__tablename__ = 'collectionboxname'
TYPES = {
'v': 'Focus on:',
'r': 'Narrow by collection:',
'l': 'Latest additions:',
}
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False, primary_key=True)
ln = db.Column(db.Char(5), nullable=False, primary_key=True,
server_default='')
type = db.Column(db.Char(3), nullable=False, primary_key=True,
server_default='r')
value = db.Column(db.String(255), nullable=False)
@db.hybrid_property
def ln_type(self):
return (self.ln, self.type)
@ln_type.setter
def set_ln_type(self, value):
(self.ln, self.type) = value
class Collectiondetailedrecordpagetabs(db.Model):
"""Represent a Collectiondetailedrecordpagetabs record."""
__tablename__ = 'collectiondetailedrecordpagetabs'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False, primary_key=True)
tabs = db.Column(db.String(255), nullable=False,
server_default='')
collection = db.relationship(Collection,
backref='collectiondetailedrecordpagetabs')
class CollectionCollection(db.Model):
"""Represent a CollectionCollection record."""
__tablename__ = 'collection_collection'
id_dad = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
id_son = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
type = db.Column(db.Char(1), nullable=False,
server_default='r')
score = db.Column(db.TinyInteger(4, unsigned=True), nullable=False,
server_default='0')
son = db.relationship(Collection, primaryjoin=id_son == Collection.id,
backref='dads',
# FIX
# collection_class=db.attribute_mapped_collection('score'),
order_by=db.asc(score))
dad = db.relationship(Collection, primaryjoin=id_dad == Collection.id,
backref='sons', order_by=db.asc(score))
class Example(db.Model):
"""Represent a Example record."""
__tablename__ = 'example'
id = db.Column(db.MediumInteger(9, unsigned=True), primary_key=True,
autoincrement=True)
type = db.Column(db.Text, nullable=False)
body = db.Column(db.Text, nullable=False)
class CollectionExample(db.Model):
"""Represent a CollectionExample record."""
__tablename__ = 'collection_example'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
id_example = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Example.id), primary_key=True)
score = db.Column(db.TinyInteger(4, unsigned=True), nullable=False,
server_default='0')
collection = db.relationship(Collection, backref='_examples',
order_by=score)
example = db.relationship(Example, backref='collections', order_by=score)
class Portalbox(db.Model):
"""Represent a Portalbox record."""
__tablename__ = 'portalbox'
id = db.Column(db.MediumInteger(9, unsigned=True), autoincrement=True,
primary_key=True)
title = db.Column(db.Text, nullable=False)
body = db.Column(db.Text, nullable=False)
def get_pbx_pos():
"""Returns a list of all the positions for a portalbox"""
position = {}
position["rt"] = "Right Top"
position["lt"] = "Left Top"
position["te"] = "Title Epilog"
position["tp"] = "Title Prolog"
position["ne"] = "Narrow by coll epilog"
position["np"] = "Narrow by coll prolog"
return position
class CollectionPortalbox(db.Model):
"""Represent a CollectionPortalbox record."""
__tablename__ = 'collection_portalbox'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
id_portalbox = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Portalbox.id), primary_key=True)
ln = db.Column(db.Char(5), primary_key=True, server_default='',
nullable=False)
position = db.Column(db.Char(3), nullable=False,
server_default='top')
score = db.Column(db.TinyInteger(4, unsigned=True),
nullable=False,
server_default='0')
collection = db.relationship(Collection, backref='portalboxes',
order_by=score)
portalbox = db.relationship(Portalbox, backref='collections',
order_by=score)
class Externalcollection(db.Model):
"""Represent a Externalcollection record."""
__tablename__ = 'externalcollection'
id = db.Column(db.MediumInteger(9, unsigned=True),
primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False,
server_default='')
@property
def engine(self):
from invenio.legacy.websearch_external_collections.searcher import (
external_collections_dictionary
)
if self.name in external_collections_dictionary:
return external_collections_dictionary[self.name]
class CollectionExternalcollection(db.Model):
"""Represent a CollectionExternalcollection record."""
__tablename__ = 'collection_externalcollection'
id_collection = db.Column(db.MediumInteger(9,
unsigned=True),
db.ForeignKey(Collection.id), primary_key=True,
server_default='0')
id_externalcollection = db.Column(db.MediumInteger(9,
unsigned=True),
db.ForeignKey(Externalcollection.id),
primary_key=True,
server_default='0')
type = db.Column(db.TinyInteger(4, unsigned=True),
server_default='0',
nullable=False)
def _collection_type(type_):
return db.relationship(
Collection,
primaryjoin=lambda: db.and_(
CollectionExternalcollection.id_collection == Collection.id,
CollectionExternalcollection.type == type_),
backref='_externalcollections_{0}'.format(str(type_))
)
collection_0 = _collection_type(0)
collection_1 = _collection_type(1)
collection_2 = _collection_type(2)
externalcollection = db.relationship(Externalcollection)
class CollectionFormat(db.Model):
"""Represent a CollectionFormat record."""
__tablename__ = 'collection_format'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
format_code = db.Column('format', db.String(10), primary_key=True)
score = db.Column(db.TinyInteger(4, unsigned=True),
nullable=False, server_default='0')
collection = db.relationship(
Collection, backref=db.backref(
'formats', order_by=db.desc(score)
), order_by=db.desc(score))
@property
def format(self):
"""Return output format definition."""
return output_formats[self.format_code]
class CollectionFieldFieldvalue(db.Model):
"""Represent a CollectionFieldFieldvalue record."""
__tablename__ = 'collection_field_fieldvalue'
id = db.Column(db.MediumInteger(9, unsigned=True), autoincrement=True,
primary_key=True, nullable=False)
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False)
id_field = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Field.id),
nullable=False)
_id_fieldvalue = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Fieldvalue.id),
nullable=True, default=None,
name="id_fieldvalue")
type = db.Column(db.Char(3), nullable=False,
server_default='src')
score = db.Column(db.TinyInteger(4, unsigned=True), nullable=False,
server_default='0')
score_fieldvalue = db.Column(db.TinyInteger(4, unsigned=True),
nullable=False, server_default='0')
collection = db.relationship(Collection, backref='field_fieldvalues',
order_by=score)
field = db.relationship(Field, backref='collection_fieldvalues',
lazy='joined')
fieldvalue = db.relationship(Fieldvalue, backref='collection_fields',
lazy='joined')
@db.hybrid_property
def id_fieldvalue(self):
"""Get id_fieldvalue."""
return self._id_fieldvalue
@id_fieldvalue.setter
def id_fieldvalue(self, value):
"""Set id_fieldvalue."""
self._id_fieldvalue = value or None
class FacetCollection(db.Model):
"""Facet configuration for collection."""
__tablename__ = 'facet_collection'
id = db.Column(db.Integer, primary_key=True)
id_collection = db.Column(db.Integer, db.ForeignKey(Collection.id))
order = db.Column(db.Integer)
facet_name = db.Column(db.String(80))
collection = db.relationship(Collection, backref='facets')
def __repr__(self):
"""Return class representation."""
return ('FacetCollection <id: {0.id}, id_collection: '
'{0.id_collection}, order: {0.order}, '
'facet_name: {0.facet_name}>'.format(self))
@classmethod
def is_place_taken(cls, id_collection, order):
"""Check if there is already a facet on the given position.
.. note:: This works well as a pre-check, however saving can still fail
if somebody else creates the same record in other session
(phantom reads).
"""
return bool(cls.query.filter(
cls.id_collection == id_collection,
cls.order == order).count())
@classmethod
def is_duplicated(cls, id_collection, facet_name):
"""Check if the given facet is already assigned to this collection.
.. note:: This works well as a pre-check, however saving can still fail
if somebody else creates the same record in other session
(phantom reads).
"""
return bool(cls.query.filter(
cls.id_collection == id_collection,
cls.facet_name == facet_name).count())
__all__ = (
'Collection',
'Collectionname',
'Collectiondetailedrecordpagetabs',
'CollectionCollection',
'Example',
'CollectionExample',
'Portalbox',
'CollectionPortalbox',
'Externalcollection',
'CollectionExternalcollection',
'CollectionFormat',
'CollectionFieldFieldvalue',
'FacetCollection',
)
| chokribr/invenio | invenio/modules/collections/models.py | Python | gpl-2.0 | 28,007 | 0.000036 |
"""
Django settings for sample_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hj6+-%d0cv@&x%bbb1_t%^+#lkuk2+-5@uci#zrt&xdw2ki&y*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'easy',
'test_app',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR, '/static')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
| ebertti/django-admin-easy | test_project/settings.py | Python | mit | 2,792 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from collections import namedtuple
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.udf import UserDefinedFunction
from pyspark.sql.types import IntegerType, StringType, StructType
Database = namedtuple("Database", "name description locationUri")
Table = namedtuple("Table", "name database description tableType isTemporary")
Column = namedtuple("Column", "name description dataType nullable isPartition isBucket")
Function = namedtuple("Function", "name description className isTemporary")
class Catalog(object):
"""User-facing catalog API, accessible through `SparkSession.catalog`.
This is a thin wrapper around its Scala implementation org.apache.spark.sql.catalog.Catalog.
"""
def __init__(self, sparkSession):
"""Create a new Catalog that wraps the underlying JVM object."""
self._sparkSession = sparkSession
self._jsparkSession = sparkSession._jsparkSession
self._jcatalog = sparkSession._jsparkSession.catalog()
@ignore_unicode_prefix
@since(2.0)
def currentDatabase(self):
"""Returns the current default database in this session."""
return self._jcatalog.currentDatabase()
@ignore_unicode_prefix
@since(2.0)
def setCurrentDatabase(self, dbName):
"""Sets the current default database in this session."""
return self._jcatalog.setCurrentDatabase(dbName)
@ignore_unicode_prefix
@since(2.0)
def listDatabases(self):
"""Returns a list of databases available across all sessions."""
iter = self._jcatalog.listDatabases().toLocalIterator()
databases = []
while iter.hasNext():
jdb = iter.next()
databases.append(Database(
name=jdb.name(),
description=jdb.description(),
locationUri=jdb.locationUri()))
return databases
@ignore_unicode_prefix
@since(2.0)
def listTables(self, dbName=None):
"""Returns a list of tables/views in the specified database.
If no database is specified, the current database is used.
This includes all temporary views.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listTables(dbName).toLocalIterator()
tables = []
while iter.hasNext():
jtable = iter.next()
tables.append(Table(
name=jtable.name(),
database=jtable.database(),
description=jtable.description(),
tableType=jtable.tableType(),
isTemporary=jtable.isTemporary()))
return tables
@ignore_unicode_prefix
@since(2.0)
def listFunctions(self, dbName=None):
"""Returns a list of functions registered in the specified database.
If no database is specified, the current database is used.
This includes all temporary functions.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listFunctions(dbName).toLocalIterator()
functions = []
while iter.hasNext():
jfunction = iter.next()
functions.append(Function(
name=jfunction.name(),
description=jfunction.description(),
className=jfunction.className(),
isTemporary=jfunction.isTemporary()))
return functions
@ignore_unicode_prefix
@since(2.0)
def listColumns(self, tableName, dbName=None):
"""Returns a list of columns for the given table/view in the specified database.
If no database is specified, the current database is used.
Note: the order of arguments here is different from that of its JVM counterpart
because Python does not support method overloading.
"""
if dbName is None:
dbName = self.currentDatabase()
iter = self._jcatalog.listColumns(dbName, tableName).toLocalIterator()
columns = []
while iter.hasNext():
jcolumn = iter.next()
columns.append(Column(
name=jcolumn.name(),
description=jcolumn.description(),
dataType=jcolumn.dataType(),
nullable=jcolumn.nullable(),
isPartition=jcolumn.isPartition(),
isBucket=jcolumn.isBucket()))
return columns
@since(2.0)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
warnings.warn(
"createExternalTable is deprecated since Spark 2.2, please use createTable instead.",
DeprecationWarning)
return self.createTable(tableName, path, source, schema, **options)
@since(2.2)
def createTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used. When ``path`` is specified, an external table is
created from the data at the given path. Otherwise a managed table is created.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created table.
:return: :class:`DataFrame`
"""
if path is not None:
options["path"] = path
if source is None:
source = self._sparkSession.conf.get(
"spark.sql.sources.default", "org.apache.spark.sql.parquet")
if schema is None:
df = self._jcatalog.createTable(tableName, source, options)
else:
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
scala_datatype = self._jsparkSession.parseDataType(schema.json())
df = self._jcatalog.createTable(tableName, source, scala_datatype, options)
return DataFrame(df, self._sparkSession._wrapped)
@since(2.0)
def dropTempView(self, viewName):
"""Drops the local temporary view with the given view name in the catalog.
If the view has been cached before, then it will also be uncached.
Returns true if this view is dropped successfully, false otherwise.
Note that, the return type of this method was None in Spark 2.0, but changed to Boolean
in Spark 2.1.
>>> spark.createDataFrame([(1, 1)]).createTempView("my_table")
>>> spark.table("my_table").collect()
[Row(_1=1, _2=1)]
>>> spark.catalog.dropTempView("my_table")
>>> spark.table("my_table") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: ...
"""
self._jcatalog.dropTempView(viewName)
@since(2.1)
def dropGlobalTempView(self, viewName):
"""Drops the global temporary view with the given view name in the catalog.
If the view has been cached before, then it will also be uncached.
Returns true if this view is dropped successfully, false otherwise.
>>> spark.createDataFrame([(1, 1)]).createGlobalTempView("my_table")
>>> spark.table("global_temp.my_table").collect()
[Row(_1=1, _2=1)]
>>> spark.catalog.dropGlobalTempView("my_table")
>>> spark.table("global_temp.my_table") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: ...
"""
self._jcatalog.dropGlobalTempView(viewName)
@since(2.0)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self._sparkSession.udf.register(name, f, returnType)
@since(2.0)
def isCached(self, tableName):
"""Returns true if the table is currently cached in-memory."""
return self._jcatalog.isCached(tableName)
@since(2.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._jcatalog.cacheTable(tableName)
@since(2.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._jcatalog.uncacheTable(tableName)
@since(2.0)
def clearCache(self):
"""Removes all cached tables from the in-memory cache."""
self._jcatalog.clearCache()
@since(2.0)
def refreshTable(self, tableName):
"""Invalidates and refreshes all the cached data and metadata of the given table."""
self._jcatalog.refreshTable(tableName)
@since('2.1.1')
def recoverPartitions(self, tableName):
"""Recovers all the partitions of the given table and update the catalog.
Only works with a partitioned table, and not a view.
"""
self._jcatalog.recoverPartitions(tableName)
@since('2.2.0')
def refreshByPath(self, path):
"""Invalidates and refreshes all the cached data (and the associated metadata) for any
DataFrame that contains the given data source path.
"""
self._jcatalog.refreshByPath(path)
def _reset(self):
"""(Internal use only) Drop all existing databases (except "default"), tables,
partitions and functions, and set the current database to "default".
This is mainly used for tests.
"""
self._jsparkSession.sessionState().catalog().reset()
def _test():
import os
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.catalog
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.catalog.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.catalog tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.catalog,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| brad-kaiser/spark | python/pyspark/sql/catalog.py | Python | apache-2.0 | 11,982 | 0.00192 |
# Copyright 2017 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_report_py3o_fusion_server
| OCA/reporting-engine | report_py3o_fusion_server/tests/__init__.py | Python | agpl-3.0 | 158 | 0 |
#
# Copyright 2013 Nicolas Lamirault <nicolas.lamirault@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from cliff import command
logger = logging.getLogger(__name__)
class FreeboxCommand(command.Command):
"""Default Freebox command."""
pass
class FreeboxApiVersion(FreeboxCommand):
"""Retrieve the Freebox OS api version."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] API_Version")
api_version = self.app.freebox_client.version()
#print "Result: %s" % api_version
logger.info('[FreeboxOS] %s\n' % api_version['api_version'])
class FreeboxLogin(FreeboxCommand):
"""Login to the Freebox OS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Login")
self.app.freebox_client.login()
# self.app.stdout.write('FreeboxOS: %s\n' %
# self.app.freebox_client)
logger.info('[FreeboxOS] Login response: %s' % self.app.freebox_client)
class FreeboxAuthorize(FreeboxCommand):
"""Request authorization for this application."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Authorization request")
self.app.freebox_client.ask_authorization()
class FreeboxCheckAuthorization(FreeboxCommand):
"""Request informations about authorization for this application."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Check Authorization ")
self.app.freebox_client.check_authorization()
class FreeboxOpenSession(FreeboxCommand):
"""Open a new session to the FreeboxOS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Open sesion")
self.app.freebox_client.open_session()
class FreeboxCloseSession(FreeboxCommand):
"""Close the current session to the FreeboxOS."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Close sesion")
self.app.freebox_client.close_session()
class FreeboxWifiStatus(FreeboxCommand):
"""Retrieve the WIFI status."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi status")
wifi_status = self.app.freebox_client.get_wifi_status()
logger.info("[FreeboxOS] Wifi status:\n %s" % wifi_status)
class FreeboxWifiConfiguration(FreeboxCommand):
"""Retrieve the current WIFI configuration."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi configuration")
wifi_config = self.app.freebox_client.get_wifi_config()
logger.info("[FreeboxOS] Wifi configuration:\n %s" % wifi_config)
class FreeboxWifiStations(FreeboxCommand):
"""Retrieve a list of wifi stations."""
def take_action(self, parsed_args):
logger.info("[FreeboxOS] Wifi stations")
wifi_stations = self.app.freebox_client.get_wifi_stations()
logger.info("[FreefoxOS] Wifi stations:\n %s" % wifi_stations)
| nlamirault/python-freeboxclient | freeboxclient/client.py | Python | apache-2.0 | 3,447 | 0.00029 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import Group
from django.db import migrations
def initial_data(apps, schema_editor):
staff = Group.objects.create(name="Staff")
staff.save()
def delete_staff_group(apps, schema_editor):
staff = Group.objects.get(name="Staff")
staff.delete()
class Migration(migrations.Migration):
dependencies = [
('labshare', '0012_auto_20161026_1453'),
]
operations = [
migrations.RunPython(initial_data, delete_staff_group),
]
| Bartzi/LabShare | labshare/migrations/0013_initial_groups.py | Python | gpl-2.0 | 566 | 0.001767 |
"""
This page is in the table of contents.
Raft is a plugin to create a raft, elevate the nozzle and set the temperature. A raft is a flat base structure on top of which your object is being build and has a few different purposes. It fills irregularities like scratches and pits in your printbed and gives you a nice base parallel to the printheads movement. It also glues your object to the bed so to prevent warping in bigger object. The rafts base layer performs these tricks while the sparser interface layer(s) help you removing the object from the raft after printing. It is based on the Nophead's reusable raft, which has a base layer running one way, and a couple of perpendicular layers above. Each set of layers can be set to a different temperature. There is the option of having the extruder orbit the raft for a while, so the heater barrel has time to reach a different temperature, without ooze accumulating around the nozzle.
The raft manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Raft
The important values for the raft settings are the temperatures of the raft, the first layer and the next layers. These will be different for each material. The default settings for ABS, HDPE, PCL & PLA are extrapolated from Nophead's experiments.
You don't necessarily need a raft and especially small object will print fine on a flat bed without one, sometimes its even better when you need a water tight base to print directly on the bed. If you want to only set the temperature or only create support material or only elevate the nozzle without creating a raft, set the Base Layers and Interface Layers to zero.
<gallery perRow="1">
Image:Raft.jpg|Raft
</gallery>
Example of a raft on the left with the interface layers partially removed exposing the base layer. Notice that the first line of the base is rarely printed well because of the startup time of the extruder. On the right you see an object with its raft still attached.
The Raft panel has some extra settings, it probably made sense to have them there but they have not that much to do with the actual Raft. First are the Support material settings. Since close to all RepRap style printers have no second extruder for support material Skeinforge offers the option to print support structures with the same material set at a different speed and temperature. The idea is that the support sticks less to the actual object when it is extruded around the minimum possible working temperature. This results in a temperature change EVERY layer so build time will increase seriously.
Allan Ecker aka The Masked Retriever's has written two quicktips for raft which follow below.
"Skeinforge Quicktip: The Raft, Part 1" at:
http://blog.thingiverse.com/2009/07/14/skeinforge-quicktip-the-raft-part-1/
"Skeinforge Quicktip: The Raft, Part II" at:
http://blog.thingiverse.com/2009/08/04/skeinforge-quicktip-the-raft-part-ii/
Nophead has written about rafts on his blog:
http://hydraraptor.blogspot.com/2009/07/thoughts-on-rafts.html
More pictures of rafting in action are available from the Metalab blog at:
http://reprap.soup.io/?search=rafting
==Operation==
Default: On
When it is on, the functions described below will work, when it is off, nothing will be done, so no temperatures will be set, nozzle will not be lifted..
==Settings==
===Add Raft, Elevate Nozzle, Orbit===
Default: On
When selected, the script will also create a raft, elevate the nozzle, orbit and set the altitude of the bottom of the raft. It also turns on support generation.
===Base===
Base layer is the part of the raft that touches the bed.
====Base Feed Rate Multiplier====
Default is one.
Defines the base feed rate multiplier. The greater the 'Base Feed Rate Multiplier', the thinner the base, the lower the 'Base Feed Rate Multiplier', the thicker the base.
====Base Flow Rate Multiplier====
Default is one.
Defines the base flow rate multiplier. The greater the 'Base Flow Rate Multiplier', the thicker the base, the lower the 'Base Flow Rate Multiplier', the thinner the base.
====Base Infill Density====
Default is 0.5.
Defines the infill density ratio of the base of the raft.
====Base Layer Height over Layer Thickness====
Default is two.
Defines the ratio of the height & width of the base layer compared to the height and width of the object infill. The feed rate will be slower for raft layers which have thicker extrusions than the object infill.
====Base Layers====
Default is one.
Defines the number of base layers.
====Base Nozzle Lift over Base Layer Thickness====
Default is 0.4.
Defines the amount the nozzle is above the center of the base extrusion divided by the base layer thickness.
===Initial Circling===
Default is off.
When selected, the extruder will initially circle around until it reaches operating temperature.
===Infill Overhang over Extrusion Width===
Default is 0.05.
Defines the ratio of the infill overhang over the the extrusion width of the raft.
===Interface===
====Interface Feed Rate Multiplier====
Default is one.
Defines the interface feed rate multiplier. The greater the 'Interface Feed Rate Multiplier', the thinner the interface, the lower the 'Interface Feed Rate Multiplier', the thicker the interface.
====Interface Flow Rate Multiplier====
Default is one.
Defines the interface flow rate multiplier. The greater the 'Interface Flow Rate Multiplier', the thicker the interface, the lower the 'Interface Flow Rate Multiplier', the thinner the interface.
====Interface Infill Density====
Default is 0.5.
Defines the infill density ratio of the interface of the raft.
====Interface Layer Thickness over Extrusion Height====
Default is one.
Defines the ratio of the height & width of the interface layer compared to the height and width of the object infill. The feed rate will be slower for raft layers which have thicker extrusions than the object infill.
====Interface Layers====
Default is two.
Defines the number of interface layers to print.
====Interface Nozzle Lift over Interface Layer Thickness====
Default is 0.45.
Defines the amount the nozzle is above the center of the interface extrusion divided by the interface layer thickness.
===Name of Alteration Files===
If support material is generated, raft looks for alteration files in the alterations folder in the .skeinforge folder in the home directory. Raft does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
====Name of Support End File====
Default is support_end.gcode.
If support material is generated and if there is a file with the name of the "Name of Support End File" setting, it will be added to the end of the support gcode.
====Name of Support Start File====
If support material is generated and if there is a file with the name of the "Name of Support Start File" setting, it will be added to the start of the support gcode.
===Operating Nozzle Lift over Layer Thickness===
Default is 0.5.
Defines the amount the nozzle is above the center of the operating extrusion divided by the layer height.
===Raft Size===
The raft fills a rectangle whose base size is the rectangle around the bottom layer of the object expanded on each side by the 'Raft Margin' plus the 'Raft Additional Margin over Length (%)' percentage times the length of the side.
====Raft Additional Margin over Length====
Default is 1 percent.
====Raft Margin====
Default is three millimeters.
===Support===
Good articles on support material are at:
http://davedurant.wordpress.com/2010/07/31/skeinforge-support-part-1/
http://davedurant.wordpress.com/2010/07/31/skeinforge-support-part-2/
====Support Cross Hatch====
Default is off.
When selected, the support material will cross hatched. Cross hatching the support makes it stronger and harder to remove, which is why the default is off.
====Support Flow Rate over Operating Flow Rate====
Default: 0.9.
Defines the ratio of the flow rate when the support is extruded over the operating flow rate. With a number less than one, the support flow rate will be smaller so the support will be thinner and easier to remove.
====Support Gap over Perimeter Extrusion Width====
Default: 0.5.
Defines the gap between the support material and the object over the edge extrusion width.
====Support Material Choice====
Default is 'None' because the raft takes time to generate.
=====Empty Layers Only=====
When selected, support material will be only on the empty layers. This is useful when making identical objects in a stack.
=====Everywhere=====
When selected, support material will be added wherever there are overhangs, even inside the object. Because support material inside objects is hard or impossible to remove, this option should only be chosen if the object has a cavity that needs support and there is some way to extract the support material.
=====Exterior Only=====
When selected, support material will be added only the exterior of the object. This is the best option for most objects which require support material.
=====None=====
When selected, raft will not add support material.
====Support Minimum Angle====
Default is sixty degrees.
Defines the minimum angle that a surface overhangs before support material is added. If angle is lower then this value the support will be generated. This angle is defined from the vertical, so zero is a vertical wall, ten is a wall with a bit of overhang, thirty is the typical safe angle for filament extrusion, sixty is a really high angle for extrusion and ninety is an unsupported horizontal ceiling.
==Examples==
The following examples raft the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and raft.py.
> python raft.py
This brings up the raft dialog.
> python raft.py Screw Holder Bottom.stl
The raft tool is parsing the file:
Screw Holder Bottom.stl
..
The raft tool has created the file:
Screw Holder Bottom_raft.gcode
"""
from __future__ import absolute_import
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
#maybe later wide support
#raft outline temperature http://hydraraptor.blogspot.com/2008/09/screw-top-pot.html
def getCraftedText( fileName, text='', repository=None):
'Raft the file or text.'
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText(gcodeText, repository=None):
'Raft a gcode linear move text.'
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'raft'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( RaftRepository() )
if not repository.activateRaft.value:
return gcodeText
return RaftSkein().getCraftedGcode(gcodeText, repository)
def getCrossHatchPointLine( crossHatchPointLineTable, y ):
'Get the cross hatch point line.'
if not crossHatchPointLineTable.has_key(y):
crossHatchPointLineTable[ y ] = {}
return crossHatchPointLineTable[ y ]
def getEndpointsFromYIntersections( x, yIntersections ):
'Get endpoints from the y intersections.'
endpoints = []
for yIntersectionIndex in xrange( 0, len( yIntersections ), 2 ):
firstY = yIntersections[ yIntersectionIndex ]
secondY = yIntersections[ yIntersectionIndex + 1 ]
if firstY != secondY:
firstComplex = complex( x, firstY )
secondComplex = complex( x, secondY )
endpointFirst = euclidean.Endpoint()
endpointSecond = euclidean.Endpoint().getFromOtherPoint( endpointFirst, secondComplex )
endpointFirst.getFromOtherPoint( endpointSecond, firstComplex )
endpoints.append( endpointFirst )
endpoints.append( endpointSecond )
return endpoints
def getExtendedLineSegment(extensionDistance, lineSegment, loopXIntersections):
'Get extended line segment.'
pointBegin = lineSegment[0].point
pointEnd = lineSegment[1].point
segment = pointEnd - pointBegin
segmentLength = abs(segment)
if segmentLength <= 0.0:
print('This should never happen in getExtendedLineSegment in raft, the segment should have a length greater than zero.')
print(lineSegment)
return None
segmentExtend = segment * extensionDistance / segmentLength
lineSegment[0].point -= segmentExtend
lineSegment[1].point += segmentExtend
for loopXIntersection in loopXIntersections:
setExtendedPoint(lineSegment[0], pointBegin, loopXIntersection)
setExtendedPoint(lineSegment[1], pointEnd, loopXIntersection)
return lineSegment
def getLoopsBySegmentsDictionary(segmentsDictionary, width):
'Get loops from a horizontal segments dictionary.'
points = []
for endpoint in getVerticalEndpoints(segmentsDictionary, width, 0.1 * width, width):
points.append(endpoint.point)
for endpoint in euclidean.getEndpointsFromSegmentTable(segmentsDictionary):
points.append(endpoint.point)
return triangle_mesh.getDescendingAreaOrientedLoops(points, points, width + width)
def getNewRepository():
'Get new repository.'
return RaftRepository()
def getVerticalEndpoints(horizontalSegmentsTable, horizontalStep, verticalOverhang, verticalStep):
'Get vertical endpoints.'
interfaceSegmentsTableKeys = horizontalSegmentsTable.keys()
interfaceSegmentsTableKeys.sort()
verticalTableTable = {}
for interfaceSegmentsTableKey in interfaceSegmentsTableKeys:
interfaceSegments = horizontalSegmentsTable[interfaceSegmentsTableKey]
for interfaceSegment in interfaceSegments:
begin = int(round(interfaceSegment[0].point.real / verticalStep))
end = int(round(interfaceSegment[1].point.real / verticalStep))
for stepIndex in xrange(begin, end + 1):
if stepIndex not in verticalTableTable:
verticalTableTable[stepIndex] = {}
verticalTableTable[stepIndex][interfaceSegmentsTableKey] = None
verticalTableTableKeys = verticalTableTable.keys()
verticalTableTableKeys.sort()
verticalEndpoints = []
for verticalTableTableKey in verticalTableTableKeys:
verticalTable = verticalTableTable[verticalTableTableKey]
verticalTableKeys = verticalTable.keys()
verticalTableKeys.sort()
xIntersections = []
for verticalTableKey in verticalTableKeys:
y = verticalTableKey * horizontalStep
if verticalTableKey - 1 not in verticalTableKeys:
xIntersections.append(y - verticalOverhang)
if verticalTableKey + 1 not in verticalTableKeys:
xIntersections.append(y + verticalOverhang)
for segment in euclidean.getSegmentsFromXIntersections(xIntersections, verticalTableTableKey * verticalStep):
for endpoint in segment:
endpoint.point = complex(endpoint.point.imag, endpoint.point.real)
verticalEndpoints.append(endpoint)
return verticalEndpoints
def setExtendedPoint( lineSegmentEnd, pointOriginal, x ):
'Set the point in the extended line segment.'
if x > min( lineSegmentEnd.point.real, pointOriginal.real ) and x < max( lineSegmentEnd.point.real, pointOriginal.real ):
lineSegmentEnd.point = complex( x, pointOriginal.imag )
def writeOutput(fileName, shouldAnalyze=True):
'Raft a gcode linear move file.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'raft', shouldAnalyze)
class RaftRepository(object):
'A class to handle the raft settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.raft.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName(
fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Raft', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute(
'http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Raft')
self.activateRaft = settings.BooleanSetting().getFromValue('Activate Raft', self, True)
self.addRaftElevateNozzleOrbitSetAltitude = settings.BooleanSetting().getFromValue(
'Add Raft, Elevate Nozzle, Orbit:', self, True)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Base -', self)
self.baseFeedRateMultiplier = settings.FloatSpin().getFromValue(0.7, 'Base Feed Rate Multiplier (ratio):', self, 1.1, 1.0)
self.baseFlowRateMultiplier = settings.FloatSpin().getFromValue(0.7, 'Base Flow Rate Multiplier (ratio):', self, 1.1, 1.0)
self.baseInfillDensity = settings.FloatSpin().getFromValue(0.3, 'Base Infill Density (ratio):', self, 0.9, 0.5)
self.baseLayerThicknessOverLayerThickness = settings.FloatSpin().getFromValue(
1.0, 'Base Layer Thickness over Layer Thickness:', self, 3.0, 2.0)
self.baseLayers = settings.IntSpin().getFromValue(0, 'Base Layers (integer):', self, 3, 0)
self.baseNozzleLiftOverBaseLayerThickness = settings.FloatSpin().getFromValue(
0.2, 'Base Nozzle Lift over Base Layer Thickness (ratio):', self, 0.8, 0.4)
settings.LabelSeparator().getFromRepository(self)
self.initialCircling = settings.BooleanSetting().getFromValue('Initial Circling:', self, False)
self.infillOverhangOverExtrusionWidth = settings.FloatSpin().getFromValue(
0.0, 'Infill Overhang over Extrusion Width (ratio):', self, 0.5, 0.05)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Interface -', self)
self.interfaceFeedRateMultiplier = settings.FloatSpin().getFromValue(
0.7, 'Interface Feed Rate Multiplier (ratio):', self, 1.1, 1.0)
self.interfaceFlowRateMultiplier = settings.FloatSpin().getFromValue(
0.7, 'Interface Flow Rate Multiplier (ratio):', self, 1.1, 1.0)
self.interfaceInfillDensity = settings.FloatSpin().getFromValue(
0.3, 'Interface Infill Density (ratio):', self, 0.9, 0.5)
self.interfaceLayerThicknessOverLayerThickness = settings.FloatSpin().getFromValue(
1.0, 'Interface Layer Thickness over Layer Thickness:', self, 3.0, 1.0)
self.interfaceLayers = settings.IntSpin().getFromValue(
0, 'Interface Layers (integer):', self, 3, 0)
self.interfaceNozzleLiftOverInterfaceLayerThickness = settings.FloatSpin().getFromValue(
0.25, 'Interface Nozzle Lift over Interface Layer Thickness (ratio):', self, 0.85, 0.45)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Name of Alteration Files -', self)
self.nameOfSupportEndFile = settings.StringSetting().getFromValue('Name of Support End File:', self, 'support_end.gcode')
self.nameOfSupportStartFile = settings.StringSetting().getFromValue(
'Name of Support Start File:', self, 'support_start.gcode')
settings.LabelSeparator().getFromRepository(self)
self.operatingNozzleLiftOverLayerThickness = settings.FloatSpin().getFromValue(
0.3, 'Operating Nozzle Lift over Layer Thickness (ratio):', self, 0.7, 0.5)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Raft Size -', self)
self.raftAdditionalMarginOverLengthPercent = settings.FloatSpin().getFromValue(
0.5, 'Raft Additional Margin over Length (%):', self, 1.5, 1.0)
self.raftMargin = settings.FloatSpin().getFromValue(
1.0, 'Raft Margin (mm):', self, 5.0, 3.0)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Support -', self)
self.supportCrossHatch = settings.BooleanSetting().getFromValue('Support Cross Hatch', self, False)
self.supportFlowRateOverOperatingFlowRate = settings.FloatSpin().getFromValue(
0.7, 'Support Flow Rate over Operating Flow Rate (ratio):', self, 1.1, 1.0)
self.supportGapOverPerimeterExtrusionWidth = settings.FloatSpin().getFromValue(
0.5, 'Support Gap over Perimeter Extrusion Width (ratio):', self, 1.5, 1.0)
self.supportMaterialChoice = settings.MenuButtonDisplay().getFromName('Support Material Choice: ', self)
self.supportChoiceNone = settings.MenuRadio().getFromMenuButtonDisplay(self.supportMaterialChoice, 'None', self, True)
self.supportChoiceEmptyLayersOnly = settings.MenuRadio().getFromMenuButtonDisplay(self.supportMaterialChoice, 'Empty Layers Only', self, False)
self.supportChoiceEverywhere = settings.MenuRadio().getFromMenuButtonDisplay(self.supportMaterialChoice, 'Everywhere', self, False)
self.supportChoiceExteriorOnly = settings.MenuRadio().getFromMenuButtonDisplay(self.supportMaterialChoice, 'Exterior Only', self, False)
self.supportMinimumAngle = settings.FloatSpin().getFromValue(40.0, 'Support Minimum Angle (degrees):', self, 80.0, 60.0)
self.executeTitle = 'Raft'
self.supportMargin = settings.FloatSpin().getFromValue(
1.0, 'Support Margin (mm):', self, 5.0, 3.0)
self.supportOffsetX = settings.FloatSpin().getFromValue(0.0, 'Support Offset X (mm):', self, 100.0, 0.0)
self.supportOffsetY = settings.FloatSpin().getFromValue(0.0, 'Support Offset Y (mm):', self, 100.0, 0.0)
def execute(self):
'Raft button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class RaftSkein(object):
'A class to raft a skein of extrusions.'
def __init__(self):
self.addLineLayerStart = True
self.baseTemperature = None
self.beginLoop = None
self.boundaryLayers = []
self.coolingRate = None
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.edgeWidth = 0.6
self.extrusionStart = True
self.extrusionTop = 0.0
self.feedRateMinute = 961.0
self.heatingRate = None
self.insetTable = {}
self.interfaceTemperature = None
self.isEdgePath = False
self.isNestedRing = True
self.isStartupEarly = False
self.layerIndex = - 1
self.layerStarted = False
self.layerHeight = 0.4
self.lineIndex = 0
self.lines = None
self.objectFirstLayerInfillTemperature = None
self.objectFirstLayerPerimeterTemperature = None
self.objectNextLayersTemperature = None
self.oldFlowRate = None
self.oldLocation = None
self.oldTemperatureOutputString = None
self.operatingFeedRateMinute = None
self.operatingFlowRate = None
self.operatingLayerEndLine = '(<operatingLayerEnd> </operatingLayerEnd>)'
self.operatingJump = None
self.orbitalFeedRatePerSecond = 2.01
self.sharpestProduct = 0.94
self.supportFlowRate = None
self.supportLayers = []
self.supportLayersTemperature = None
self.supportedLayersTemperature = None
self.travelFeedRateMinute = None
def addBaseLayer(self):
'Add a base layer.'
baseLayerThickness = self.layerHeight * self.baseLayerThicknessOverLayerThickness
zCenter = self.extrusionTop + 0.5 * baseLayerThickness
z = zCenter + baseLayerThickness * self.repository.baseNozzleLiftOverBaseLayerThickness.value
if len(self.baseEndpoints) < 1:
print('This should never happen, the base layer has a size of zero.')
return
self.addLayerFromEndpoints(
self.baseEndpoints,
self.repository.baseFeedRateMultiplier.value,
self.repository.baseFlowRateMultiplier.value,
baseLayerThickness,
self.baseLayerThicknessOverLayerThickness,
self.baseStep,
z)
def addBaseSegments(self, baseExtrusionWidth):
'Add the base segments.'
baseOverhang = self.repository.infillOverhangOverExtrusionWidth.value * baseExtrusionWidth
self.baseEndpoints = getVerticalEndpoints(self.interfaceSegmentsTable, self.interfaceStep, baseOverhang, self.baseStep)
def addEmptyLayerSupport( self, boundaryLayerIndex ):
'Add support material to a layer if it is empty.'
supportLayer = SupportLayer([])
self.supportLayers.append(supportLayer)
if len( self.boundaryLayers[ boundaryLayerIndex ].loops ) > 0:
return
aboveXIntersectionsTable = {}
euclidean.addXIntersectionsFromLoopsForTable( self.getInsetLoopsAbove(boundaryLayerIndex), aboveXIntersectionsTable, self.interfaceStep )
belowXIntersectionsTable = {}
euclidean.addXIntersectionsFromLoopsForTable( self.getInsetLoopsBelow(boundaryLayerIndex), belowXIntersectionsTable, self.interfaceStep )
supportLayer.xIntersectionsTable = euclidean.getIntersectionOfXIntersectionsTables( [ aboveXIntersectionsTable, belowXIntersectionsTable ] )
def addFlowRate(self, flowRate):
'Add a flow rate value if different.'
if flowRate != None:
self.distanceFeedRate.addLine('M108 S' + euclidean.getFourSignificantFigures(flowRate))
def addInterfaceLayer(self):
'Add an interface layer.'
interfaceLayerThickness = self.layerHeight * self.interfaceLayerThicknessOverLayerThickness
zCenter = self.extrusionTop + 0.5 * interfaceLayerThickness
z = zCenter + interfaceLayerThickness * self.repository.interfaceNozzleLiftOverInterfaceLayerThickness.value
if len(self.interfaceEndpoints) < 1:
print('This should never happen, the interface layer has a size of zero.')
return
self.addLayerFromEndpoints(
self.interfaceEndpoints,
self.repository.interfaceFeedRateMultiplier.value,
self.repository.interfaceFlowRateMultiplier.value,
interfaceLayerThickness,
self.interfaceLayerThicknessOverLayerThickness,
self.interfaceStep,
z)
def addInterfaceTables(self, interfaceExtrusionWidth):
'Add interface tables.'
overhang = self.repository.infillOverhangOverExtrusionWidth.value * interfaceExtrusionWidth
self.interfaceEndpoints = []
self.interfaceIntersectionsTableKeys = self.interfaceIntersectionsTable.keys()
self.interfaceSegmentsTable = {}
for yKey in self.interfaceIntersectionsTableKeys:
self.interfaceIntersectionsTable[yKey].sort()
y = yKey * self.interfaceStep
lineSegments = euclidean.getSegmentsFromXIntersections(self.interfaceIntersectionsTable[yKey], y)
xIntersectionIndexList = []
for lineSegmentIndex in xrange(len(lineSegments)):
lineSegment = lineSegments[lineSegmentIndex]
endpointBegin = lineSegment[0]
endpointEnd = lineSegment[1]
endpointBegin.point = complex(self.baseStep * math.floor(endpointBegin.point.real / self.baseStep) - overhang, y)
endpointEnd.point = complex(self.baseStep * math.ceil(endpointEnd.point.real / self.baseStep) + overhang, y)
if endpointEnd.point.real > endpointBegin.point.real:
euclidean.addXIntersectionIndexesFromSegment(lineSegmentIndex, lineSegment, xIntersectionIndexList)
xIntersections = euclidean.getJoinOfXIntersectionIndexes(xIntersectionIndexList)
joinedSegments = euclidean.getSegmentsFromXIntersections(xIntersections, y)
if len(joinedSegments) > 0:
self.interfaceSegmentsTable[yKey] = joinedSegments
for joinedSegment in joinedSegments:
self.interfaceEndpoints += joinedSegment
def addLayerFromEndpoints(
self,
endpoints,
feedRateMultiplier,
flowRateMultiplier,
layerLayerThickness,
layerThicknessRatio,
step,
z):
'Add a layer from endpoints and raise the extrusion top.'
layerThicknessRatioSquared = layerThicknessRatio * layerThicknessRatio
feedRateMinute = self.feedRateMinute * feedRateMultiplier / layerThicknessRatioSquared
if len(endpoints) < 1:
return
aroundPixelTable = {}
aroundWidth = 0.34321 * step
paths = euclidean.getPathsFromEndpoints(endpoints, 1.5 * step, aroundPixelTable, self.sharpestProduct, aroundWidth)
self.addLayerLine(z)
if self.operatingFlowRate != None:
self.addFlowRate(flowRateMultiplier * self.operatingFlowRate)
for path in paths:
simplifiedPath = euclidean.getSimplifiedPath(path, step)
self.distanceFeedRate.addGcodeFromFeedRateThreadZ(feedRateMinute, simplifiedPath, self.travelFeedRateMinute, z)
self.extrusionTop += layerLayerThickness
self.addFlowRate(self.oldFlowRate)
def addLayerLine(self, z):
'Add the layer gcode line and close the last layer gcode block.'
if self.layerStarted:
self.distanceFeedRate.addLine('(</layer>)')
self.distanceFeedRate.addLine('(<layer> %s )' % self.distanceFeedRate.getRounded(z)) # Indicate that a new layer is starting.
if self.beginLoop != None:
zBegin = self.extrusionTop + self.layerHeight
intercircle.addOrbitsIfLarge(self.distanceFeedRate, self.beginLoop, self.orbitalFeedRatePerSecond, self.temperatureChangeTimeBeforeRaft, zBegin)
self.beginLoop = None
self.layerStarted = True
def addOperatingOrbits(self, boundaryLoops, pointComplex, temperatureChangeTime, z):
'Add the orbits before the operating layers.'
if len(boundaryLoops) < 1:
return
insetBoundaryLoops = intercircle.getInsetLoopsFromLoops(boundaryLoops, self.edgeWidth)
if len(insetBoundaryLoops) < 1:
insetBoundaryLoops = boundaryLoops
largestLoop = euclidean.getLargestLoop(insetBoundaryLoops)
if pointComplex != None:
largestLoop = euclidean.getLoopStartingClosest(self.edgeWidth, pointComplex, largestLoop)
intercircle.addOrbitsIfLarge(self.distanceFeedRate, largestLoop, self.orbitalFeedRatePerSecond, temperatureChangeTime, z)
def addRaft(self):
'Add the raft.'
self.baseLayerThicknessOverLayerThickness = self.repository.baseLayerThicknessOverLayerThickness.value
baseExtrusionWidth = self.edgeWidth * self.baseLayerThicknessOverLayerThickness
self.baseStep = baseExtrusionWidth / self.repository.baseInfillDensity.value
self.interfaceLayerThicknessOverLayerThickness = self.repository.interfaceLayerThicknessOverLayerThickness.value
interfaceExtrusionWidth = self.edgeWidth * self.interfaceLayerThicknessOverLayerThickness
self.interfaceStep = interfaceExtrusionWidth / self.repository.interfaceInfillDensity.value
self.setCornersZ()
self.cornerMinimumComplex = self.cornerMinimum.dropAxis()
originalExtent = self.cornerMaximumComplex - self.cornerMinimumComplex
self.raftOutsetRadius = self.repository.raftMargin.value + self.repository.raftAdditionalMarginOverLengthPercent.value * 0.01 * max(originalExtent.real, originalExtent.imag)
self.supportOutsetRadius = self.repository.supportMargin.value
self.setBoundaryLayers()
if len(self.boundaryLayers) < 1:
print('this should never happen, there are no boundary layers in addRaft')
return
outsetSeparateLoops = intercircle.getInsetSeparateLoopsFromLoops(self.boundaryLayers[0].loops, -self.raftOutsetRadius, 0.8)
self.interfaceIntersectionsTable = {}
euclidean.addXIntersectionsFromLoopsForTable(outsetSeparateLoops, self.interfaceIntersectionsTable, self.interfaceStep)
if len(self.supportLayers) > 0:
supportIntersectionsTable = self.supportLayers[0].xIntersectionsTable
euclidean.joinXIntersectionsTables(supportIntersectionsTable, self.interfaceIntersectionsTable)
self.addInterfaceTables(interfaceExtrusionWidth)
self.addRaftPerimeters()
self.baseIntersectionsTable = {}
complexRadius = complex(self.raftOutsetRadius, self.raftOutsetRadius)
self.complexHigh = complexRadius + self.cornerMaximumComplex
self.complexLow = self.cornerMinimumComplex - complexRadius
self.beginLoop = euclidean.getSquareLoopWiddershins(self.cornerMinimumComplex, self.cornerMaximumComplex)
if not intercircle.orbitsAreLarge(self.beginLoop, self.temperatureChangeTimeBeforeRaft):
self.beginLoop = None
if self.repository.baseLayers.value > 0:
self.addTemperatureLineIfDifferent(self.baseTemperature)
self.addBaseSegments(baseExtrusionWidth)
for baseLayerIndex in xrange(self.repository.baseLayers.value):
self.addBaseLayer()
if self.repository.interfaceLayers.value > 0:
self.addTemperatureLineIfDifferent(self.interfaceTemperature)
self.interfaceIntersectionsTableKeys.sort()
for interfaceLayerIndex in xrange(self.repository.interfaceLayers.value):
self.addInterfaceLayer()
self.operatingJump = self.extrusionTop + self.layerHeight * self.repository.operatingNozzleLiftOverLayerThickness.value
for boundaryLayer in self.boundaryLayers:
if self.operatingJump != None:
boundaryLayer.z += self.operatingJump
if self.repository.baseLayers.value > 0 or self.repository.interfaceLayers.value > 0:
boundaryZ = self.boundaryLayers[0].z
if self.layerStarted:
self.distanceFeedRate.addLine('(</layer>)')
self.layerStarted = False
self.distanceFeedRate.addLine('(<raftLayerEnd> </raftLayerEnd>)')
self.addLayerLine(boundaryZ)
temperatureChangeTimeBeforeFirstLayer = self.getTemperatureChangeTime(self.objectFirstLayerPerimeterTemperature)
self.addTemperatureLineIfDifferent(self.objectFirstLayerPerimeterTemperature)
largestOutsetLoop = intercircle.getLargestInsetLoopFromLoop(euclidean.getLargestLoop(outsetSeparateLoops), -self.raftOutsetRadius)
intercircle.addOrbitsIfLarge(self.distanceFeedRate, largestOutsetLoop, self.orbitalFeedRatePerSecond, temperatureChangeTimeBeforeFirstLayer, boundaryZ)
self.addLineLayerStart = False
def addRaftedLine( self, splitLine ):
'Add elevated gcode line with operating feed rate.'
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine)
z = self.oldLocation.z
if self.operatingJump != None:
z += self.operatingJump
temperature = self.objectNextLayersTemperature
if self.layerIndex == 0:
if self.isEdgePath:
temperature = self.objectFirstLayerPerimeterTemperature
else:
temperature = self.objectFirstLayerInfillTemperature
self.addTemperatureLineIfDifferent(temperature)
self.distanceFeedRate.addGcodeMovementZWithFeedRate(self.feedRateMinute, self.oldLocation.dropAxis(), z)
def addRaftPerimeters(self):
'Add raft edges if there is a raft.'
interfaceOutset = self.halfEdgeWidth * self.interfaceLayerThicknessOverLayerThickness
for supportLayer in self.supportLayers:
supportSegmentTable = supportLayer.supportSegmentTable
if len(supportSegmentTable) > 0:
outset = interfaceOutset
self.addRaftPerimetersByLoops(getLoopsBySegmentsDictionary(supportSegmentTable, self.interfaceStep), outset)
if self.repository.baseLayers.value < 1 and self.repository.interfaceLayers.value < 1:
return
overhangMultiplier = 1.0 + self.repository.infillOverhangOverExtrusionWidth.value + self.repository.infillOverhangOverExtrusionWidth.value
outset = self.halfEdgeWidth
if self.repository.interfaceLayers.value > 0:
outset = max(interfaceOutset * overhangMultiplier, outset)
if self.repository.baseLayers.value > 0:
outset = max(self.halfEdgeWidth * self.baseLayerThicknessOverLayerThickness * overhangMultiplier, outset)
self.addRaftPerimetersByLoops(getLoopsBySegmentsDictionary(self.interfaceSegmentsTable, self.interfaceStep), outset)
def addRaftPerimetersByLoops(self, loops, outset):
'Add raft edges to the gcode for loops.'
loops = intercircle.getInsetSeparateLoopsFromLoops(loops, -outset)
for loop in loops:
self.distanceFeedRate.addLine('(<raftPerimeter>)')
for point in loop:
roundedX = self.distanceFeedRate.getRounded(point.real)
roundedY = self.distanceFeedRate.getRounded(point.imag)
self.distanceFeedRate.addTagBracketedLine('raftPoint', 'X%s Y%s' % (roundedX, roundedY))
self.distanceFeedRate.addLine('(</raftPerimeter>)')
def addSegmentTablesToSupportLayers(self):
'Add segment tables to the support layers.'
for supportLayer in self.supportLayers:
supportLayer.supportSegmentTable = {}
xIntersectionsTable = supportLayer.xIntersectionsTable
for xIntersectionsTableKey in xIntersectionsTable:
y = xIntersectionsTableKey * self.interfaceStep
supportLayer.supportSegmentTable[ xIntersectionsTableKey ] = euclidean.getSegmentsFromXIntersections( xIntersectionsTable[ xIntersectionsTableKey ], y )
def addSupportLayerTemperature(self, endpoints, z):
'Add support layer and temperature before the object layer.'
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.supportStartLines)
self.distanceFeedRate.addLine('(<supportLayer>)')
#self.addTemperatureOrbits(endpoints, self.supportedLayersTemperature, z)
aroundPixelTable = {}
aroundWidth = 0.34321 * self.interfaceStep
boundaryLoops = self.boundaryLayers[self.layerIndex].loops
halfSupportOutset = 0.5 * self.supportOutset
aroundBoundaryLoops = intercircle.getAroundsFromLoops(boundaryLoops, halfSupportOutset)
for aroundBoundaryLoop in aroundBoundaryLoops:
euclidean.addLoopToPixelTable(aroundBoundaryLoop, aroundPixelTable, aroundWidth)
paths = euclidean.getPathsFromEndpoints(endpoints, 1.5 * self.interfaceStep, aroundPixelTable, self.sharpestProduct, aroundWidth)
feedRateMinuteMultiplied = self.operatingFeedRateMinute
supportFlowRateMultiplied = self.supportFlowRate
if self.layerIndex == 0:
feedRateMinuteMultiplied *= self.objectFirstLayerFeedRateInfillMultiplier
if supportFlowRateMultiplied != None:
supportFlowRateMultiplied = self.operatingFlowRate * self.objectFirstLayerFlowRateInfillMultiplier
self.addFlowRate(supportFlowRateMultiplied)
for path in paths:
path = map(lambda p: p + complex(self.supportOffsetX, self.supportOffsetY), path)
self.distanceFeedRate.addGcodeFromFeedRateThreadZ(feedRateMinuteMultiplied, path, self.travelFeedRateMinute, z)
self.addFlowRate(self.oldFlowRate)
#self.addTemperatureOrbits(endpoints, self.supportLayersTemperature, z)
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.supportEndLines)
self.distanceFeedRate.addLine('(</supportLayer>)')
def addSupportSegmentTable( self, layerIndex ):
'Add support segments from the boundary layers.'
aboveLayer = self.boundaryLayers[ layerIndex + 1 ]
aboveLoops = aboveLayer.loops
supportLayer = self.supportLayers[layerIndex]
if len( aboveLoops ) < 1:
return
boundaryLayer = self.boundaryLayers[layerIndex]
rise = aboveLayer.z - boundaryLayer.z
outsetSupportLoops = intercircle.getInsetSeparateLoopsFromLoops(boundaryLayer.loops, -self.minimumSupportRatio * rise)
numberOfSubSteps = 4
subStepSize = self.interfaceStep / float( numberOfSubSteps )
aboveIntersectionsTable = {}
euclidean.addXIntersectionsFromLoopsForTable( aboveLoops, aboveIntersectionsTable, subStepSize )
outsetIntersectionsTable = {}
euclidean.addXIntersectionsFromLoopsForTable( outsetSupportLoops, outsetIntersectionsTable, subStepSize )
euclidean.subtractXIntersectionsTable( aboveIntersectionsTable, outsetIntersectionsTable )
for aboveIntersectionsTableKey in aboveIntersectionsTable.keys():
supportIntersectionsTableKey = int( round( float( aboveIntersectionsTableKey ) / numberOfSubSteps ) )
xIntersectionIndexList = []
if supportIntersectionsTableKey in supportLayer.xIntersectionsTable:
euclidean.addXIntersectionIndexesFromXIntersections( 0, xIntersectionIndexList, supportLayer.xIntersectionsTable[ supportIntersectionsTableKey ] )
euclidean.addXIntersectionIndexesFromXIntersections( 1, xIntersectionIndexList, aboveIntersectionsTable[ aboveIntersectionsTableKey ] )
supportLayer.xIntersectionsTable[ supportIntersectionsTableKey ] = euclidean.getJoinOfXIntersectionIndexes( xIntersectionIndexList )
def addTemperatureLineIfDifferent(self, temperature):
'Add a line of temperature if different.'
if temperature == None:
return
temperatureOutputString = euclidean.getRoundedToThreePlaces(temperature)
if temperatureOutputString == self.oldTemperatureOutputString:
return
if temperatureOutputString != None:
self.distanceFeedRate.addLine('M104 S' + temperatureOutputString) # Set temperature.
self.oldTemperatureOutputString = temperatureOutputString
def addTemperatureOrbits( self, endpoints, temperature, z ):
'Add the temperature and orbits around the support layer.'
if self.layerIndex < 0:
return
boundaryLoops = self.boundaryLayers[self.layerIndex].loops
temperatureTimeChange = self.getTemperatureChangeTime( temperature )
self.addTemperatureLineIfDifferent( temperature )
if len( boundaryLoops ) < 1:
layerCornerHigh = complex(-987654321.0, -987654321.0)
layerCornerLow = complex(987654321.0, 987654321.0)
for endpoint in endpoints:
layerCornerHigh = euclidean.getMaximum( layerCornerHigh, endpoint.point )
layerCornerLow = euclidean.getMinimum( layerCornerLow, endpoint.point )
squareLoop = euclidean.getSquareLoopWiddershins( layerCornerLow, layerCornerHigh )
intercircle.addOrbitsIfLarge( self.distanceFeedRate, squareLoop, self.orbitalFeedRatePerSecond, temperatureTimeChange, z )
return
edgeInset = 0.4 * self.edgeWidth
insetBoundaryLoops = intercircle.getInsetLoopsFromLoops(boundaryLoops, edgeInset)
if len( insetBoundaryLoops ) < 1:
insetBoundaryLoops = boundaryLoops
largestLoop = euclidean.getLargestLoop( insetBoundaryLoops )
intercircle.addOrbitsIfLarge( self.distanceFeedRate, largestLoop, self.orbitalFeedRatePerSecond, temperatureTimeChange, z )
def addToFillXIntersectionIndexTables( self, supportLayer ):
'Add fill segments from the boundary layers.'
supportLoops = supportLayer.supportLoops
supportLayer.fillXIntersectionsTable = {}
if len(supportLoops) < 1:
return
euclidean.addXIntersectionsFromLoopsForTable( supportLoops, supportLayer.fillXIntersectionsTable, self.interfaceStep )
def extendXIntersections( self, loops, radius, xIntersectionsTable ):
'Extend the support segments.'
xIntersectionsTableKeys = xIntersectionsTable.keys()
for xIntersectionsTableKey in xIntersectionsTableKeys:
lineSegments = euclidean.getSegmentsFromXIntersections( xIntersectionsTable[ xIntersectionsTableKey ], xIntersectionsTableKey )
xIntersectionIndexList = []
loopXIntersections = []
euclidean.addXIntersectionsFromLoops( loops, loopXIntersections, xIntersectionsTableKey )
for lineSegmentIndex in xrange( len( lineSegments ) ):
lineSegment = lineSegments[ lineSegmentIndex ]
extendedLineSegment = getExtendedLineSegment( radius, lineSegment, loopXIntersections )
if extendedLineSegment != None:
euclidean.addXIntersectionIndexesFromSegment( lineSegmentIndex, extendedLineSegment, xIntersectionIndexList )
xIntersections = euclidean.getJoinOfXIntersectionIndexes( xIntersectionIndexList )
if len( xIntersections ) > 0:
xIntersectionsTable[ xIntersectionsTableKey ] = xIntersections
else:
del xIntersectionsTable[ xIntersectionsTableKey ]
def getCraftedGcode(self, gcodeText, repository):
'Parse gcode text and store the raft gcode.'
self.repository = repository
self.minimumSupportRatio = math.tan( math.radians( repository.supportMinimumAngle.value ) )
self.supportEndLines = settings.getAlterationFileLines(repository.nameOfSupportEndFile.value)
self.supportStartLines = settings.getAlterationFileLines(repository.nameOfSupportStartFile.value)
self.supportOffsetX = repository.supportOffsetX.value
self.supportOffsetY = repository.supportOffsetY.value
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
self.temperatureChangeTimeBeforeRaft = 0.0
if self.repository.initialCircling.value:
maxBaseInterfaceTemperature = max(self.baseTemperature, self.interfaceTemperature)
firstMaxTemperature = max(maxBaseInterfaceTemperature, self.objectFirstLayerPerimeterTemperature)
self.temperatureChangeTimeBeforeRaft = self.getTemperatureChangeTime(firstMaxTemperature)
if repository.addRaftElevateNozzleOrbitSetAltitude.value:
self.addRaft()
self.addTemperatureLineIfDifferent( self.objectFirstLayerPerimeterTemperature )
for line in self.lines[self.lineIndex :]:
self.parseLine(line)
return gcodec.getGcodeWithoutDuplication('M108', self.distanceFeedRate.output.getvalue())
def getElevatedBoundaryLine( self, splitLine ):
'Get elevated boundary gcode line.'
location = gcodec.getLocationFromSplitLine(None, splitLine)
if self.operatingJump != None:
location.z += self.operatingJump
return self.distanceFeedRate.getBoundaryLine( location )
def getInsetLoops( self, boundaryLayerIndex ):
'Inset the support loops if they are not already inset.'
if boundaryLayerIndex not in self.insetTable:
self.insetTable[ boundaryLayerIndex ] = intercircle.getInsetSeparateLoopsFromLoops(self.boundaryLayers[ boundaryLayerIndex ].loops, self.quarterEdgeWidth)
return self.insetTable[ boundaryLayerIndex ]
def getInsetLoopsAbove( self, boundaryLayerIndex ):
'Get the inset loops above the boundary layer index.'
for aboveLayerIndex in xrange( boundaryLayerIndex + 1, len(self.boundaryLayers) ):
if len( self.boundaryLayers[ aboveLayerIndex ].loops ) > 0:
return self.getInsetLoops( aboveLayerIndex )
return []
def getInsetLoopsBelow( self, boundaryLayerIndex ):
'Get the inset loops below the boundary layer index.'
for belowLayerIndex in xrange( boundaryLayerIndex - 1, - 1, - 1 ):
if len( self.boundaryLayers[ belowLayerIndex ].loops ) > 0:
return self.getInsetLoops( belowLayerIndex )
return []
def getStepsUntilEnd( self, begin, end, stepSize ):
'Get steps from the beginning until the end.'
step = begin
steps = []
while step < end:
steps.append( step )
step += stepSize
return steps
def getSupportEndpoints(self):
'Get the support layer segments.'
if len(self.supportLayers) <= self.layerIndex:
return []
supportSegmentTable = self.supportLayers[self.layerIndex].supportSegmentTable
if self.layerIndex % 2 == 1 and self.repository.supportCrossHatch.value:
return getVerticalEndpoints(supportSegmentTable, self.interfaceStep, 0.1 * self.edgeWidth, self.interfaceStep)
return euclidean.getEndpointsFromSegmentTable(supportSegmentTable)
def getTemperatureChangeTime( self, temperature ):
'Get the temperature change time.'
if temperature == None:
return 0.0
oldTemperature = 25.0 # typical chamber temperature
if self.oldTemperatureOutputString != None:
oldTemperature = float( self.oldTemperatureOutputString )
if temperature == oldTemperature:
return 0.0
if temperature > oldTemperature:
return ( temperature - oldTemperature ) / self.heatingRate
return ( oldTemperature - temperature ) / abs( self.coolingRate )
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(<baseTemperature>':
self.baseTemperature = float(splitLine[1])
elif firstWord == '(<coolingRate>':
self.coolingRate = float(splitLine[1])
elif firstWord == '(<edgeWidth>':
self.edgeWidth = float(splitLine[1])
self.halfEdgeWidth = 0.5 * self.edgeWidth
self.quarterEdgeWidth = 0.25 * self.edgeWidth
self.supportOutset = self.edgeWidth + self.edgeWidth * self.repository.supportGapOverPerimeterExtrusionWidth.value
elif firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('raft')
elif firstWord == '(<heatingRate>':
self.heatingRate = float(splitLine[1])
elif firstWord == '(<interfaceTemperature>':
self.interfaceTemperature = float(splitLine[1])
elif firstWord == '(<layer>':
return
elif firstWord == '(<layerHeight>':
self.layerHeight = float(splitLine[1])
elif firstWord == 'M108':
self.oldFlowRate = float(splitLine[1][1 :])
elif firstWord == '(<objectFirstLayerFeedRateInfillMultiplier>':
self.objectFirstLayerFeedRateInfillMultiplier = float(splitLine[1])
elif firstWord == '(<objectFirstLayerFlowRateInfillMultiplier>':
self.objectFirstLayerFlowRateInfillMultiplier = float(splitLine[1])
elif firstWord == '(<objectFirstLayerInfillTemperature>':
self.objectFirstLayerInfillTemperature = float(splitLine[1])
elif firstWord == '(<objectFirstLayerPerimeterTemperature>':
self.objectFirstLayerPerimeterTemperature = float(splitLine[1])
elif firstWord == '(<objectNextLayersTemperature>':
self.objectNextLayersTemperature = float(splitLine[1])
elif firstWord == '(<orbitalFeedRatePerSecond>':
self.orbitalFeedRatePerSecond = float(splitLine[1])
elif firstWord == '(<operatingFeedRatePerSecond>':
self.operatingFeedRateMinute = 60.0 * float(splitLine[1])
self.feedRateMinute = self.operatingFeedRateMinute
elif firstWord == '(<operatingFlowRate>':
self.operatingFlowRate = float(splitLine[1])
self.oldFlowRate = self.operatingFlowRate
self.supportFlowRate = self.operatingFlowRate * self.repository.supportFlowRateOverOperatingFlowRate.value
elif firstWord == '(<sharpestProduct>':
self.sharpestProduct = float(splitLine[1])
elif firstWord == '(<supportLayersTemperature>':
self.supportLayersTemperature = float(splitLine[1])
elif firstWord == '(<supportedLayersTemperature>':
self.supportedLayersTemperature = float(splitLine[1])
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRateMinute = 60.0 * float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
'Parse a gcode line and add it to the raft skein.'
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
if self.extrusionStart:
self.addRaftedLine(splitLine)
return
elif firstWord == 'M101':
if self.isStartupEarly:
self.isStartupEarly = False
return
elif firstWord == 'M108':
self.oldFlowRate = float(splitLine[1][1 :])
elif firstWord == '(<boundaryPoint>':
line = self.getElevatedBoundaryLine(splitLine)
elif firstWord == '(</crafting>)':
self.extrusionStart = False
self.distanceFeedRate.addLine( self.operatingLayerEndLine )
elif firstWord == '(<layer>':
self.layerIndex += 1
settings.printProgress(self.layerIndex, 'raft')
boundaryLayer = None
layerZ = self.extrusionTop + float(splitLine[1])
if len(self.boundaryLayers) > 0:
boundaryLayer = self.boundaryLayers[self.layerIndex]
layerZ = boundaryLayer.z
if self.operatingJump != None:
line = '(<layer> %s )' % self.distanceFeedRate.getRounded( layerZ )
if self.layerStarted and self.addLineLayerStart:
self.distanceFeedRate.addLine('(</layer>)')
self.layerStarted = False
if self.layerIndex > len(self.supportLayers) + 1:
self.distanceFeedRate.addLine( self.operatingLayerEndLine )
self.operatingLayerEndLine = ''
if self.addLineLayerStart:
self.distanceFeedRate.addLine(line)
self.addLineLayerStart = True
line = ''
endpoints = self.getSupportEndpoints()
if self.layerIndex == 1:
if len(endpoints) < 1:
temperatureChangeTimeBeforeNextLayers = self.getTemperatureChangeTime( self.objectNextLayersTemperature )
self.addTemperatureLineIfDifferent( self.objectNextLayersTemperature )
#if self.repository.addRaftElevateNozzleOrbitSetAltitude.value and boundaryLayer != None and len( boundaryLayer.loops ) > 0:
# self.addOperatingOrbits( boundaryLayer.loops, euclidean.getXYComplexFromVector3( self.oldLocation ), temperatureChangeTimeBeforeNextLayers, layerZ )
if len(endpoints) > 0:
self.addSupportLayerTemperature( endpoints, layerZ )
elif firstWord == '(<edge>' or firstWord == '(<edgePath>)':
self.isEdgePath = True
elif firstWord == '(</edge>)' or firstWord == '(</edgePath>)':
self.isEdgePath = False
self.distanceFeedRate.addLine(line)
def setBoundaryLayers(self):
'Set the boundary layers.'
if self.repository.supportChoiceNone.value:
return
if len(self.boundaryLayers) < 2:
return
if self.repository.supportChoiceEmptyLayersOnly.value:
supportLayer = SupportLayer([])
self.supportLayers.append(supportLayer)
for boundaryLayerIndex in xrange(1, len(self.boundaryLayers) -1):
self.addEmptyLayerSupport(boundaryLayerIndex)
self.truncateSupportSegmentTables()
self.addSegmentTablesToSupportLayers()
return
for boundaryLayer in self.boundaryLayers:
# thresholdRadius of 0.8 is needed to avoid the ripple inset bug http://hydraraptor.blogspot.com/2010/12/crackers.html
supportLoops = intercircle.getInsetSeparateLoopsFromLoops(boundaryLayer.loops, -self.supportOutset, 0.8)
supportLayer = SupportLayer(supportLoops)
self.supportLayers.append(supportLayer)
for supportLayerIndex in xrange(len(self.supportLayers) - 1):
self.addSupportSegmentTable(supportLayerIndex)
self.truncateSupportSegmentTables()
for supportLayerIndex in xrange(len(self.supportLayers) - 1):
boundaryLoops = self.boundaryLayers[supportLayerIndex].loops
self.extendXIntersections( boundaryLoops, self.supportOutset, self.supportLayers[supportLayerIndex].xIntersectionsTable)
for supportLayer in self.supportLayers:
self.addToFillXIntersectionIndexTables(supportLayer)
if self.repository.supportChoiceExteriorOnly.value:
for supportLayerIndex in xrange(1, len(self.supportLayers)):
self.subtractJoinedFill(supportLayerIndex)
for supportLayer in self.supportLayers:
euclidean.subtractXIntersectionsTable(supportLayer.xIntersectionsTable, supportLayer.fillXIntersectionsTable)
for supportLayerIndex in xrange(len(self.supportLayers) - 2, -1, -1):
xIntersectionsTable = self.supportLayers[supportLayerIndex].xIntersectionsTable
aboveXIntersectionsTable = self.supportLayers[supportLayerIndex + 1].xIntersectionsTable
euclidean.joinXIntersectionsTables(aboveXIntersectionsTable, xIntersectionsTable)
for supportLayerIndex in xrange(len(self.supportLayers)):
supportLayer = self.supportLayers[supportLayerIndex]
self.extendXIntersections(supportLayer.supportLoops, self.supportOutsetRadius, supportLayer.xIntersectionsTable)
for supportLayer in self.supportLayers:
euclidean.subtractXIntersectionsTable(supportLayer.xIntersectionsTable, supportLayer.fillXIntersectionsTable)
self.addSegmentTablesToSupportLayers()
def setCornersZ(self):
'Set maximum and minimum corners and z.'
boundaryLoop = None
boundaryLayer = None
layerIndex = - 1
self.cornerMaximumComplex = complex(-912345678.0, -912345678.0)
self.cornerMinimum = Vector3(912345678.0, 912345678.0, 912345678.0)
self.firstLayerLoops = []
for line in self.lines[self.lineIndex :]:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == '(</boundaryPerimeter>)':
boundaryLoop = None
elif firstWord == '(<boundaryPoint>':
location = gcodec.getLocationFromSplitLine(None, splitLine)
if boundaryLoop == None:
boundaryLoop = []
boundaryLayer.loops.append(boundaryLoop)
boundaryLoop.append(location.dropAxis())
self.cornerMaximumComplex = euclidean.getMaximum(self.cornerMaximumComplex, location.dropAxis())
self.cornerMinimum.minimize(location)
elif firstWord == '(<layer>':
z = float(splitLine[1])
boundaryLayer = euclidean.LoopLayer(z)
self.boundaryLayers.append(boundaryLayer)
elif firstWord == '(<layer>':
layerIndex += 1
if self.repository.supportChoiceNone.value:
if layerIndex > 1:
return
def subtractJoinedFill( self, supportLayerIndex ):
'Join the fill then subtract it from the support layer table.'
supportLayer = self.supportLayers[supportLayerIndex]
fillXIntersectionsTable = supportLayer.fillXIntersectionsTable
belowFillXIntersectionsTable = self.supportLayers[ supportLayerIndex - 1 ].fillXIntersectionsTable
euclidean.joinXIntersectionsTables( belowFillXIntersectionsTable, supportLayer.fillXIntersectionsTable )
euclidean.subtractXIntersectionsTable( supportLayer.xIntersectionsTable, supportLayer.fillXIntersectionsTable )
def truncateSupportSegmentTables(self):
'Truncate the support segments after the last support segment which contains elements.'
for supportLayerIndex in xrange( len(self.supportLayers) - 1, - 1, - 1 ):
if len( self.supportLayers[supportLayerIndex].xIntersectionsTable ) > 0:
self.supportLayers = self.supportLayers[ : supportLayerIndex + 1 ]
return
self.supportLayers = []
class SupportLayer(object):
'Support loops with segment tables.'
def __init__( self, supportLoops ):
self.supportLoops = supportLoops
self.supportSegmentTable = {}
self.xIntersectionsTable = {}
def __repr__(self):
'Get the string representation of this loop layer.'
return '%s' % ( self.supportLoops )
def main():
'Display the raft dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
| tinkerinestudio/Tinkerine-Suite | TinkerineSuite/Cura/cura_sf/skeinforge_application/skeinforge_plugins/craft_plugins/raft.py | Python | agpl-3.0 | 56,776 | 0.02251 |
import time
import pytest
import logging
from cassandra import Unauthorized
from ccmlib.common import is_win
from ccmlib.node import Node
from dtest_setup_overrides import DTestSetupOverrides
from dtest import Tester
from tools.assertions import assert_all, assert_invalid
from tools.misc import ImmutableMapping
since = pytest.mark.since
logger = logging.getLogger(__name__)
@pytest.mark.upgrade_test
@since('2.2')
class TestAuthUpgrade(Tester):
@pytest.fixture(scope='function', autouse=True)
def fixture_dtest_setup_overrides(self, dtest_config):
dtest_setup_overrides = DTestSetupOverrides()
dtest_setup_overrides.cluster_options = ImmutableMapping({'authenticator': 'PasswordAuthenticator',
'authorizer': 'CassandraAuthorizer'})
return dtest_setup_overrides
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
)
def test_upgrade_to_22(self):
self.do_upgrade_with_internal_auth("github:apache/cassandra-2.2")
@since('3.0')
@pytest.mark.no_offheap_memtables
def test_upgrade_to_30(self):
self.do_upgrade_with_internal_auth("github:apache/cassandra-3.0")
@since('2.2', max_version='3.X')
def test_upgrade_legacy_table(self):
"""
Upgrade with bringing up the legacy tables after the newer nodes (without legacy tables)
were started.
@jira_ticket CASSANDRA-12813
"""
cluster = self.cluster
# Forcing cluster version on purpose
cluster.set_install_dir(version="2.1.16")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# Wait for default user to get created on one of the nodes
time.sleep(15)
# Upgrade to current version
for node in [node1, node2, node3]:
node.drain()
node.watch_log_for("DRAINED")
node.stop(gently=True)
self.set_node_to_current_version(node)
cluster.start()
# Make sure the system_auth table will get replicated to the node that we're going to replace
session = self.patient_cql_connection(node1, user='cassandra', password='cassandra')
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 };")
cluster.repair()
cluster.stop()
# Replace the node
cluster.seeds.remove(node1)
cluster.remove(node1)
replacement_address = node1.address()
replacement_node = Node('replacement', cluster=self.cluster, auto_bootstrap=True,
thrift_interface=(replacement_address, 9160),
storage_interface=(replacement_address, 7000),
jmx_port='7400', remote_debug_port='0', initial_token=None,
binary_interface=(replacement_address, 9042))
self.set_node_to_current_version(replacement_node)
cluster.add(replacement_node, True)
replacement_node.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
replacement_node.watch_log_for('Initializing system_auth.credentials')
replacement_node.watch_log_for('Initializing system_auth.permissions')
replacement_node.watch_log_for('Initializing system_auth.users')
cluster.repair()
replacement_node.watch_log_for('Repair command')
# Should succeed. Will throw an NPE on pre-12813 code.
self.patient_cql_connection(replacement_node, user='cassandra', password='cassandra')
def do_upgrade_with_internal_auth(self, target_version):
"""
Tests upgrade between 2.1->2.2 & 2.1->3.0 as the schema and apis around authn/authz changed
@jira_ticket CASSANDRA-7653
"""
cluster = self.cluster
# Forcing cluster version on purpose
cluster.set_install_dir(version="github:apache/cassandra-2.1")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
# wait for default superuser creation
# The log message
# node.watch_log_for('Created default superuser')
# will only appear on one of the three nodes, and we don't know
# which ahead of time. Grepping all three in parallel is unpleasant.
# See auth_test and auth_roles test for instances of this as well.
# Should be fixed by C*-6177
time.sleep(15)
session = self.patient_cql_connection(node1, user='cassandra', password='cassandra')
session.execute("CREATE USER klaus WITH PASSWORD '12345' SUPERUSER")
session.execute("CREATE USER michael WITH PASSWORD '54321' NOSUPERUSER")
session.execute("CREATE KEYSPACE ks WITH replication = {'class':'SimpleStrategy', 'replication_factor':1}")
session.execute("CREATE TABLE ks.cf1 (id int primary key, val int)")
session.execute("CREATE TABLE ks.cf2 (id int primary key, val int)")
session.execute("GRANT MODIFY ON ks.cf1 TO michael")
session.execute("GRANT SELECT ON ks.cf2 TO michael")
self.check_permissions(node1, False)
session.cluster.shutdown()
# upgrade node1 to 2.2
self.upgrade_to_version(target_version, node1)
# run the permissions checking queries on the upgraded node
# this will be using the legacy tables as the conversion didn't complete
# but the output format should be updated on the upgraded node
self.check_permissions(node1, True)
# and check on those still on the old version
self.check_permissions(node2, False)
self.check_permissions(node3, False)
# now upgrade the remaining nodes
self.upgrade_to_version(target_version, node2)
self.upgrade_to_version(target_version, node3)
self.check_permissions(node2, True)
self.check_permissions(node3, True)
# we should now be able to drop the old auth tables
session = self.patient_cql_connection(node1, user='cassandra', password='cassandra')
session.execute('DROP TABLE system_auth.users', timeout=60)
session.execute('DROP TABLE system_auth.credentials', timeout=60)
session.execute('DROP TABLE system_auth.permissions', timeout=60)
# and we should still be able to authenticate and check authorization
self.check_permissions(node1, True)
logger.debug('Test completed successfully')
def check_permissions(self, node, upgraded):
# use an exclusive connection to ensure we only talk to the specified node
klaus = self.patient_exclusive_cql_connection(node, user='klaus', password='12345', timeout=20)
# klaus is a superuser, so should be able to list all permissions
# the output of LIST PERMISSIONS changes slightly with #7653 adding
# a new role column to results, so we need to tailor our check
# based on whether the node has been upgraded or not
if not upgraded:
assert_all(klaus,
'LIST ALL PERMISSIONS',
[['michael', '<table ks.cf1>', 'MODIFY'],
['michael', '<table ks.cf2>', 'SELECT']],
timeout=60)
else:
assert_all(klaus,
'LIST ALL PERMISSIONS',
[['michael', 'michael', '<table ks.cf1>', 'MODIFY'],
['michael', 'michael', '<table ks.cf2>', 'SELECT']],
timeout=60)
klaus.cluster.shutdown()
michael = self.patient_exclusive_cql_connection(node, user='michael', password='54321')
michael.execute('INSERT INTO ks.cf1 (id, val) VALUES (0,0)')
michael.execute('SELECT * FROM ks.cf2')
assert_invalid(michael,
'SELECT * FROM ks.cf1',
'User michael has no SELECT permission on <table ks.cf1> or any of its parents',
Unauthorized)
michael.cluster.shutdown()
def upgrade_to_version(self, tag, node):
format_args = {'node': node.name, 'tag': tag}
logger.debug('Upgrading node {node} to {tag}'.format(**format_args))
# drain and shutdown
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
logger.debug('{node} stopped'.format(**format_args))
# Ignore errors before upgrade on Windows
if is_win():
node.mark_log_for_errors()
# Update Cassandra Directory
logger.debug('Updating version to tag {tag}'.format(**format_args))
node.set_install_dir(version=tag, verbose=True)
logger.debug('Set new cassandra dir for {node}: {tag}'.format(**format_args))
# Restart node on new version
logger.debug('Starting {node} on new version ({tag})'.format(**format_args))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=True)
# wait for the conversion of legacy data to either complete or fail
# (because not enough upgraded nodes are available yet)
logger.debug('Waiting for conversion of legacy data to complete or fail')
node.watch_log_for('conversion of legacy permissions')
logger.debug('Running upgradesstables')
node.nodetool('upgradesstables -a')
logger.debug('Upgrade of {node} complete'.format(**format_args))
| beobal/cassandra-dtest | upgrade_internal_auth_test.py | Python | apache-2.0 | 9,980 | 0.002405 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mediawiki-utilities documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 10 17:31:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import mw
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mediawiki-utilities'
copyright = '2014, Aaron Halfaker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mw.__version__
# The full version, including alpha/beta/rc tags.
release = mw.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mediawiki-utilitiesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'mediawiki-utilities.tex', 'mediawiki-utilities Documentation',
'Aaron Halfaker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mediawiki-utilities', 'mediawiki-utilities Documentation',
['Aaron Halfaker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mediawiki-utilities', 'mediawiki-utilities Documentation',
'Aaron Halfaker', 'mediawiki-utilities', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mediawiki-utilities/python-mediawiki-utilities | doc/conf.py | Python | mit | 8,467 | 0.006023 |
# Lint as: python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for types.py."""
import unittest
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from frozendict import frozendict
from tools.ctexplain.types import Configuration
class TypesTest(unittest.TestCase):
def testConfigurationIsHashable(self):
options = frozendict({'o1': frozendict({'k1': 'v1'})})
c = Configuration(fragments=('F1'), options=options)
some_dict = {}
some_dict[c] = 4
def testConfigurationHashAccuracy(self):
d = {}
options1 = frozendict({'o1': frozendict({'k1': 'v1'})})
d[Configuration(fragments=('F1'), options=options1)] = 4
self.assertEqual(len(d), 1)
options2 = frozendict({'o1': frozendict({'k1': 'v1'})})
d[Configuration(fragments=('F1'), options=options2)] = 4
self.assertEqual(len(d), 1)
options3 = frozendict({'o1': frozendict({'k1': 'v1'})})
d[Configuration(fragments=('F2'), options=options3)] = 4
self.assertEqual(len(d), 2)
options4 = frozendict({'o2': frozendict({'k1': 'v1'})})
d[Configuration(fragments=('F2'), options=options4)] = 4
self.assertEqual(len(d), 3)
options5 = frozendict({'o2': frozendict({'k2': 'v1'})})
d[Configuration(fragments=('F2'), options=options5)] = 4
self.assertEqual(len(d), 4)
options6 = frozendict({'o2': frozendict({'k2': 'v2'})})
d[Configuration(fragments=('F2'), options=options6)] = 4
self.assertEqual(len(d), 5)
def testConfigurationEquality(self):
c1 = Configuration(fragments=('F1'), options={'o1': {'k1': 'v1'}})
c2 = Configuration(fragments=('F1'), options={'o1': {'k1': 'v1'}})
c3 = Configuration(fragments=('F2'), options={'o1': {'k1': 'v1'}})
c4 = Configuration(fragments=('F1'), options={'o2': {'k2': 'v2'}})
self.assertEqual(c1, c2)
self.assertNotEqual(c1, c3)
self.assertNotEqual(c1, c4)
self.assertNotEqual(c3, c4)
if __name__ == '__main__':
unittest.main()
| twitter-forks/bazel | tools/ctexplain/types_test.py | Python | apache-2.0 | 2,530 | 0.001581 |
# -*- coding: utf-8 -*-
import sae.const
DEBUG = False
SITE_TITLE = u"博客标题"
SITE_SUB_TITLE = u"博客副标题"
SITE_KEYWORDS = u"博客关键字"
SITE_DECRIPTION = u"博客描述"
AUTHOR_NAME = u"博客作者" #显示在RSS订阅里面
#CONACT_MAIL = "xxx@gmail.com" #暂未用到
THEMES = ['octopress','admin']
LINK_BROLL = [
{'text': u"爱简单吧", 'url': "http://www.ijd8.com", 'title': u"ijd8官方博客"},
{'text': u"YouBBS", 'url': "http://youbbs.sinaapp.com", 'title': u"ijd8支持论坛"},
]
MAJOR_DOMAIN = 'www.yourdomain.com' #主域名
##Mysql 数据库信息
MYSQL_DB = sae.const.MYSQL_DB
MYSQL_USER = sae.const.MYSQL_USER
MYSQL_PASS = sae.const.MYSQL_PASS
MYSQL_HOST = "%s:%s" % (sae.const.MYSQL_HOST_S, sae.const.MYSQL_PORT)
MYSQL_HOST_M = "%s:%s" % (sae.const.MYSQL_HOST, sae.const.MYSQL_PORT)
JQUERY = "http://lib.sinaapp.com/js/jquery/1.9.1/jquery-1.9.1.min.js"
COOKIE_SECRET = "11orTzKXQAsaYdkL5gEtGeJJFuYh7EQnp2XdTP1o/Vo="
LANGUAGE = 'zh-CN'
EACH_PAGE_POST_NUM = 10 #每页显示文章数
RECENT_POST_NUM = 10 #边栏显示最近文章数
RELATED_NUM = 10 #显示相关文章数
SIDER_TAG_NUM = 100 #边栏显示标签数
SIDER_CAT_NUM = 100 #边栏显示分类数
SHORTEN_CONTENT_WORDS = 150 #文章列表截取的字符数
DESCRIPTION_CUT_WORDS = 100 #meta description 显示的字符数
FEED_NUM = 10 #订阅输出文章数
#######下面是保存附件的空间,可选SAE Storage 和 七牛(有免费配额),只选一个
## 1) 用SAE Storage 需要在SAE 控制面板开通
BUCKET = "" #Domain Name, 如 upload 。不用或用七牛请留空
## 2) 七牛 注册可获永久10G空间和每月10G流量,注册地址 http://t.cn/z8h5lsg
QN_AK = "" #七牛 ACCESS_KEY
QN_SK = "" #七牛 SECRET_KEY
QN_BUCKET = "" #空间名称 , 如 upload
| ego008/ijd8 | sae/setting.py | Python | mit | 1,814 | 0.024441 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: lambda_alias
short_description: Creates, updates or deletes AWS Lambda function aliases.
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
itself and M(lambda_event) to manage event source mappings.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
description:
description:
- A short, user-defined function alias description.
required: false
version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
required: false
aliases: ['function_version']
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: show results
debug:
var: lambda_facts
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_facts.Version }}"
description: "QA is version {{ lambda_facts.Version }}"
when: lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: string
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: string
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: string
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: string
sample: dev
'''
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, aws):
"""
Returns the lambda function alias if it exists.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('lambda')
# set API parameters
api_params = set_api_params(module, ('function_name', 'name'))
# check if alias exists and get facts
try:
results = client.get_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
results = None
else:
module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
return results
def lambda_alias(module, aws):
"""
Adds, updates or deletes lambda function aliases.
:param module: Ansible module reference
:param aws: AWS client connection
:return dict:
"""
client = aws.client('lambda')
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_alias(module, aws)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if alias has changed -- only version and description can change
alias_params = ('function_version', 'description')
for param in alias_params:
if module.params.get(param) != facts.get(pc(param)):
changed = True
break
if changed:
api_params = set_api_params(module, ('function_name', 'name'))
api_params.update(set_api_params(module, alias_params))
if not module.check_mode:
try:
results = client.update_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function alias: {0}'.format(e))
else:
# create new function alias
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
try:
if not module.check_mode:
results = client.create_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating function alias: {0}'.format(e))
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', 'name'))
try:
if not module.check_mode:
results = client.delete_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error deleting function alias: {0}'.format(e))
return dict(changed=changed, **dict(results or facts))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True, default=None),
name=dict(required=True, default=None, aliases=['alias_name']),
function_version=dict(type='int', required=False, default=0, aliases=['version']),
description=dict(required=False, default=None),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
results = lambda_alias(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| camradal/ansible | lib/ansible/modules/cloud/amazon/lambda_alias.py | Python | gpl-3.0 | 12,318 | 0.002598 |
import socket
def is_valid_ipv4(ip_str):
"""
Check the validity of an IPv4 address
"""
try:
socket.inet_pton(socket.AF_INET, ip_str)
except AttributeError:
try: # Fall-back on legacy API or False
socket.inet_aton(ip_str)
except (AttributeError, socket.error):
return False
return ip_str.count('.') == 3
except socket.error:
return False
return True
def is_valid_ipv6(ip_str):
"""
Check the validity of an IPv6 address
"""
try:
socket.inet_pton(socket.AF_INET6, ip_str)
except socket.error:
return False
return True
def is_valid_ip(ip_str):
"""
Check the validity of an IP address
"""
return is_valid_ipv4(ip_str) or is_valid_ipv6(ip_str)
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/ipware/utils.py | Python | agpl-3.0 | 794 | 0 |
from bluebottle.bb_projects.serializers import ProjectPreviewSerializer
from bluebottle.quotes.serializers import QuoteSerializer
from bluebottle.slides.serializers import SlideSerializer
from apps.campaigns.serializers import CampaignSerializer
from bluebottle.bb_fundraisers.serializers import BaseFundRaiserSerializer
from apps.statistics.serializers import StatisticSerializer
from rest_framework import serializers
class HomePageSerializer(serializers.Serializer):
quotes = QuoteSerializer(source='quotes')
slides = SlideSerializer(source='slides')
impact = StatisticSerializer(source='stats')
projects = ProjectPreviewSerializer(source='projects')
campaign = CampaignSerializer(source='campaign')
fundraisers = BaseFundRaiserSerializer(source='fundraisers')
| onepercentclub/onepercentclub-site | apps/homepage/serializers.py | Python | bsd-3-clause | 794 | 0.001259 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import taskflow.engines
from taskflow.patterns import unordered_flow as uf
from taskflow import task
from taskflow import test
from taskflow.tests import utils
class UnorderedFlowTest(test.TestCase):
def _make_engine(self, flow):
return taskflow.engines.load(flow, store={'context': {}})
def test_result_access(self):
class DoApply(task.Task):
default_provides = ('a', 'b')
def execute(self):
return [1, 2]
wf = uf.Flow("the-test-action")
wf.add(DoApply())
e = self._make_engine(wf)
e.run()
data = e.storage.fetch_all()
self.assertIn('a', data)
self.assertIn('b', data)
self.assertEquals(2, data['b'])
self.assertEquals(1, data['a'])
def test_reverting_flow(self):
wf = uf.Flow("the-test-action")
wf.add(utils.make_reverting_task('1'))
wf.add(utils.make_reverting_task('2', blowup=True))
e = self._make_engine(wf)
self.assertRaises(Exception, e.run)
def test_functor_flow(self):
class DoApply1(task.Task):
default_provides = ('a', 'b', 'c')
def execute(self, context):
context['1'] = True
return ['a', 'b', 'c']
class DoApply2(task.Task):
def execute(self, context):
context['2'] = True
wf = uf.Flow("the-test-action")
wf.add(DoApply1())
wf.add(DoApply2())
e = self._make_engine(wf)
e.run()
self.assertEquals(2, len(e.storage.fetch('context')))
| ntt-sic/taskflow | taskflow/tests/unit/test_unordered_flow.py | Python | apache-2.0 | 2,294 | 0 |
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from ..core.app import SatchlessApp
from . import models
class OrderApp(SatchlessApp):
app_name = 'order'
namespace = 'order'
order_model = models.Order
order_details_templates = [
'satchless/order/view.html',
'satchless/order/%(order_model)s/view.html'
]
order_list_templates = [
'satchless/order/my_orders.html',
'satchless/order/%(order_model)s/my_orders.html'
]
@method_decorator(login_required)
def index(self, request):
orders = self.order_model.objects.filter(user=request.user)
context = self.get_context_data(request, orders=orders)
format_data = {
'order_model': self.order_model._meta.model_name
}
templates = [p % format_data for p in self.order_list_templates]
return TemplateResponse(request, templates, context)
def details(self, request, order_token):
order = self.get_order(request, order_token=order_token)
context = self.get_context_data(request, order=order)
format_data = {
'order_model': order._meta.model_name
}
templates = [p % format_data for p in self.order_details_templates]
return TemplateResponse(request, templates, context)
def get_order(self, request, order_token):
if request.user.is_authenticated():
orders = self.order_model.objects.filter(user=request.user)
else:
orders = self.order_model.objects.filter(user=None)
order = get_object_or_404(orders, token=order_token)
return order
def get_urls(self, prefix=None):
prefix = prefix or self.app_name
return patterns('',
url(r'^$', self.index, name='index'),
url(r'^(?P<order_token>[0-9a-zA-Z]+)/$', self.details,
name='details'),
)
order_app = OrderApp()
| fusionbox/satchless | satchless/order/app.py | Python | bsd-3-clause | 2,122 | 0.001414 |
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import print_function
import errno
import socket
import getpass
import gssapi
import ldif
import os
import re
import fileinput
import sys
import tempfile
import shutil
import traceback
import textwrap
from contextlib import contextmanager
from dns import resolver, rdatatype
from dns.exception import DNSException
import ldap
import ldapurl
import six
from six.moves.configparser import SafeConfigParser, NoOptionError
import ipaplatform
from ipapython import ipautil, sysrestore, admintool, version
from ipapython.admintool import ScriptError
from ipapython.ipa_log_manager import root_logger
from ipalib.util import validate_hostname
from ipapython import config
from ipalib import api, errors, x509
from ipapython.dn import DN
from ipaserver.install import certs, service, sysupgrade
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
if six.PY3:
unicode = str
# Used to determine install status
IPA_MODULES = [
'httpd', 'kadmin', 'dirsrv', 'pki-tomcatd', 'install', 'krb5kdc', 'ntpd',
'named', 'ipa_memcached']
class BadHostError(Exception):
pass
class HostLookupError(BadHostError):
pass
class HostForwardLookupError(HostLookupError):
pass
class HostReverseLookupError(HostLookupError):
pass
class HostnameLocalhost(HostLookupError):
pass
class UpgradeVersionError(Exception):
pass
class UpgradePlatformError(UpgradeVersionError):
pass
class UpgradeDataOlderVersionError(UpgradeVersionError):
pass
class UpgradeDataNewerVersionError(UpgradeVersionError):
pass
class UpgradeMissingVersionError(UpgradeVersionError):
pass
class ReplicaConfig:
def __init__(self, top_dir=None):
self.realm_name = ""
self.domain_name = ""
self.master_host_name = ""
self.dirman_password = ""
self.host_name = ""
self.dir = ""
self.subject_base = None
self.setup_ca = False
self.version = 0
self.top_dir = top_dir
subject_base = ipautil.dn_attribute_property('_subject_base')
def get_fqdn():
fqdn = ""
try:
fqdn = socket.getfqdn()
except Exception:
try:
fqdn = socket.gethostname()
except Exception:
fqdn = ""
return fqdn
def verify_fqdn(host_name, no_host_dns=False, local_hostname=True):
"""
Run fqdn checks for given host:
- test hostname format
- test that hostname is fully qualified
- test forward and reverse hostname DNS lookup
Raises `BadHostError` or derived Exceptions if there is an error
:param host_name: The host name to verify.
:param no_host_dns: If true, skip DNS resolution tests of the host name.
:param local_hostname: If true, run additional checks for local hostnames
"""
if len(host_name.split(".")) < 2 or host_name == "localhost.localdomain":
raise BadHostError("Invalid hostname '%s', must be fully-qualified." % host_name)
if host_name != host_name.lower():
raise BadHostError("Invalid hostname '%s', must be lower-case." % host_name)
if ipautil.valid_ip(host_name):
raise BadHostError("IP address not allowed as a hostname")
try:
# make sure that the host name meets the requirements in ipalib
validate_hostname(host_name)
except ValueError as e:
raise BadHostError("Invalid hostname '%s', %s" % (host_name, unicode(e)))
if local_hostname:
try:
root_logger.debug('Check if %s is a primary hostname for localhost', host_name)
ex_name = socket.gethostbyaddr(host_name)
root_logger.debug('Primary hostname for localhost: %s', ex_name[0])
if host_name != ex_name[0]:
raise HostLookupError("The host name %s does not match the primary host name %s. "\
"Please check /etc/hosts or DNS name resolution" % (host_name, ex_name[0]))
except socket.gaierror:
pass
except socket.error as e:
root_logger.debug(
'socket.gethostbyaddr() error: %d: %s',
e.errno, e.strerror) # pylint: disable=no-member
if no_host_dns:
print("Warning: skipping DNS resolution of host", host_name)
return
try:
root_logger.debug('Search DNS for %s', host_name)
hostaddr = socket.getaddrinfo(host_name, None)
except Exception as e:
root_logger.debug('Search failed: %s', e)
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
if len(hostaddr) == 0:
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
# Verify this is NOT a CNAME
try:
root_logger.debug('Check if %s is not a CNAME', host_name)
resolver.query(host_name, rdatatype.CNAME)
raise HostReverseLookupError("The IPA Server Hostname cannot be a CNAME, only A and AAAA names are allowed.")
except DNSException:
pass
# list of verified addresses to prevent multiple searches for the same address
verified = set()
for a in hostaddr:
address = a[4][0]
if address in verified:
continue
if address == '127.0.0.1' or address == '::1':
raise HostForwardLookupError("The IPA Server hostname must not resolve to localhost (%s). A routable IP address must be used. Check /etc/hosts to see if %s is an alias for %s" % (address, host_name, address))
try:
root_logger.debug('Check reverse address of %s', address)
revname = socket.gethostbyaddr(address)[0]
except Exception as e:
root_logger.debug('Check failed: %s', e)
root_logger.error(
"Unable to resolve the IP address %s to a host name, "
"check /etc/hosts and DNS name resolution", address)
else:
root_logger.debug('Found reverse name: %s', revname)
if revname != host_name:
root_logger.error(
"The host name %s does not match the value %s obtained "
"by reverse lookup on IP address %s", host_name, revname,
address)
verified.add(address)
def record_in_hosts(ip, host_name=None, conf_file=paths.HOSTS):
"""
Search record in /etc/hosts - static table lookup for hostnames
In case of match, returns a tuple of ip address and a list of
hostname aliases
When no record is matched, None is returned
:param ip: IP address
:param host_name: Optional hostname to search
:param conf_file: Optional path to the lookup table
"""
hosts = open(conf_file, 'r').readlines()
for line in hosts:
line = line.rstrip('\n')
fields = line.partition('#')[0].split()
if len(fields) == 0:
continue
try:
hosts_ip = fields[0]
names = fields[1:]
if hosts_ip != ip:
continue
if host_name is not None:
if host_name in names:
return (hosts_ip, names)
else:
return None
return (hosts_ip, names)
except IndexError:
print("Warning: Erroneous line '%s' in %s" % (line, conf_file))
continue
return None
def add_record_to_hosts(ip, host_name, conf_file=paths.HOSTS):
hosts_fd = open(conf_file, 'r+')
hosts_fd.seek(0, 2)
hosts_fd.write(ip+'\t'+host_name+' '+host_name.split('.')[0]+'\n')
hosts_fd.close()
def read_ip_addresses():
ips = []
print("Enter the IP address to use, or press Enter to finish.")
while True:
ip = ipautil.user_input("Please provide the IP address to be used for this host name", allow_empty = True)
if not ip:
break
try:
ip_parsed = ipautil.CheckedIPAddress(ip, match_local=True)
except Exception as e:
print("Error: Invalid IP Address %s: %s" % (ip, e))
continue
ips.append(ip_parsed)
return ips
def read_dns_forwarders():
addrs = []
if ipautil.user_input("Do you want to configure DNS forwarders?", True):
print("Following DNS servers are configured in /etc/resolv.conf: %s" %
", ".join(resolver.get_default_resolver().nameservers))
if ipautil.user_input("Do you want to configure these servers as DNS "
"forwarders?", True):
addrs = resolver.default_resolver.nameservers[:]
print("All DNS servers from /etc/resolv.conf were added. You can "
"enter additional addresses now:")
while True:
ip = ipautil.user_input("Enter an IP address for a DNS forwarder, "
"or press Enter to skip", allow_empty=True)
if not ip:
break
try:
ip_parsed = ipautil.CheckedIPAddress(ip, parse_netmask=False)
except Exception as e:
print("Error: Invalid IP Address %s: %s" % (ip, e))
print("DNS forwarder %s not added." % ip)
continue
print("DNS forwarder %s added. You may add another." % ip)
addrs.append(str(ip_parsed))
if not addrs:
print("No DNS forwarders configured")
return addrs
def get_password(prompt):
if os.isatty(sys.stdin.fileno()):
return getpass.getpass(prompt)
else:
sys.stdout.write(prompt)
sys.stdout.flush()
line = sys.stdin.readline()
if not line:
raise EOFError()
return line.rstrip()
def _read_password_default_validator(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
def read_password(user, confirm=True, validate=True, retry=True, validator=_read_password_default_validator):
correct = False
pwd = None
try:
while not correct:
if not retry:
correct = True
pwd = get_password(user + " password: ")
if not pwd:
continue
if validate:
try:
validator(pwd)
except ValueError as e:
print(str(e))
pwd = None
continue
if not confirm:
correct = True
continue
pwd_confirm = get_password("Password (confirm): ")
if pwd != pwd_confirm:
print("Password mismatch!")
print("")
pwd = None
else:
correct = True
except EOFError:
return None
finally:
print("")
return pwd
def update_file(filename, orig, subst):
if os.path.exists(filename):
st = os.stat(filename)
pattern = "%s" % re.escape(orig)
p = re.compile(pattern)
for line in fileinput.input(filename, inplace=1):
if not p.search(line):
sys.stdout.write(line)
else:
sys.stdout.write(p.sub(subst, line))
fileinput.close()
os.chown(filename, st.st_uid, st.st_gid) # reset perms
return 0
else:
print("File %s doesn't exist." % filename)
return 1
def set_directive(filename, directive, value, quotes=True, separator=' '):
"""Set a name/value pair directive in a configuration file.
A value of None means to drop the directive.
This has only been tested with nss.conf
"""
valueset = False
st = os.stat(filename)
fd = open(filename)
newfile = []
for line in fd:
if line.lstrip().startswith(directive):
valueset = True
if value is not None:
if quotes:
newfile.append('%s%s"%s"\n' % (directive, separator, value))
else:
newfile.append('%s%s%s\n' % (directive, separator, value))
else:
newfile.append(line)
fd.close()
if not valueset:
if value is not None:
if quotes:
newfile.append('%s%s"%s"\n' % (directive, separator, value))
else:
newfile.append('%s%s%s\n' % (directive, separator, value))
fd = open(filename, "w")
fd.write("".join(newfile))
fd.close()
os.chown(filename, st.st_uid, st.st_gid) # reset perms
def get_directive(filename, directive, separator=' '):
"""
A rather inefficient way to get a configuration directive.
"""
fd = open(filename, "r")
for line in fd:
if line.lstrip().startswith(directive):
line = line.strip()
result = line.split(separator, 1)[1]
result = result.strip('"')
result = result.strip(' ')
fd.close()
return result
fd.close()
return None
def kadmin(command):
ipautil.run(["kadmin.local", "-q", command,
"-x", "ipa-setup-override-restrictions"])
def kadmin_addprinc(principal):
kadmin("addprinc -randkey " + principal)
def kadmin_modprinc(principal, options):
kadmin("modprinc " + options + " " + principal)
def create_keytab(path, principal):
try:
if ipautil.file_exists(path):
os.remove(path)
except os.error:
root_logger.critical("Failed to remove %s." % path)
kadmin("ktadd -k " + path + " " + principal)
def resolve_host(host_name):
try:
addrinfos = socket.getaddrinfo(host_name, None,
socket.AF_UNSPEC, socket.SOCK_STREAM)
ip_list = []
for ai in addrinfos:
ip = ai[4][0]
if ip == "127.0.0.1" or ip == "::1":
raise HostnameLocalhost("The hostname resolves to the localhost address")
ip_list.append(ip)
return ip_list
except socket.error:
return []
def get_host_name(no_host_dns):
"""
Get the current FQDN from the socket and verify that it is valid.
no_host_dns is a boolean that determines whether we enforce that the
hostname is resolvable.
Will raise a RuntimeError on error, returns hostname on success
"""
hostname = get_fqdn()
verify_fqdn(hostname, no_host_dns)
return hostname
def get_server_ip_address(host_name, unattended, setup_dns, ip_addresses):
# Check we have a public IP that is associated with the hostname
try:
hostaddr = resolve_host(host_name)
except HostnameLocalhost:
print("The hostname resolves to the localhost address (127.0.0.1/::1)", file=sys.stderr)
print("Please change your /etc/hosts file so that the hostname", file=sys.stderr)
print("resolves to the ip address of your network interface.", file=sys.stderr)
print("The KDC service does not listen on localhost", file=sys.stderr)
print("", file=sys.stderr)
print("Please fix your /etc/hosts file and restart the setup program", file=sys.stderr)
sys.exit(1)
ips = []
if len(hostaddr):
for ha in hostaddr:
try:
ips.append(ipautil.CheckedIPAddress(ha, match_local=True))
except ValueError as e:
root_logger.warning("Invalid IP address %s for %s: %s", ha, host_name, unicode(e))
if not ips and not ip_addresses:
if not unattended:
ip_addresses = read_ip_addresses()
if ip_addresses:
if setup_dns:
ips = ip_addresses
else:
# all specified addresses was resolved for this host
if set(ip_addresses) <= set(ips):
ips = ip_addresses
else:
print("Error: the hostname resolves to IP address(es) that are different", file=sys.stderr)
print("from those provided on the command line. Please fix your DNS", file=sys.stderr)
print("or /etc/hosts file and restart the installation.", file=sys.stderr)
print("Provided but not resolved address(es): %s" % \
", ".join(str(ip) for ip in (set(ip_addresses) - set(ips))), file=sys.stderr)
sys.exit(1)
if not ips:
print("No usable IP address provided nor resolved.", file=sys.stderr)
sys.exit(1)
for ip_address in ips:
# check /etc/hosts sanity
hosts_record = record_in_hosts(str(ip_address))
if hosts_record is not None:
primary_host = hosts_record[1][0]
if primary_host != host_name:
print("Error: there is already a record in /etc/hosts for IP address %s:" \
% ip_address, file=sys.stderr)
print(hosts_record[0], " ".join(hosts_record[1]), file=sys.stderr)
print("Chosen hostname %s does not match configured canonical hostname %s" \
% (host_name, primary_host), file=sys.stderr)
print("Please fix your /etc/hosts file and restart the installation.", file=sys.stderr)
sys.exit(1)
return ips
def update_hosts_file(ip_addresses, host_name, fstore):
"""
Update hosts with specified addresses
:param ip_addresses: list of IP addresses
:return:
"""
if not fstore.has_file(paths.HOSTS):
fstore.backup_file(paths.HOSTS)
for ip_address in ip_addresses:
if record_in_hosts(str(ip_address)):
continue
print("Adding [{address!s} {name}] to your /etc/hosts file".format(
address=ip_address, name=host_name))
add_record_to_hosts(str(ip_address), host_name)
def expand_replica_info(filename, password):
"""
Decrypt and expand a replica installation file into a temporary
location. The caller is responsible to remove this directory.
"""
top_dir = tempfile.mkdtemp("ipa")
tarfile = top_dir+"/files.tar"
dir_path = top_dir + "/realm_info"
ipautil.decrypt_file(filename, tarfile, password, top_dir)
ipautil.run(["tar", "xf", tarfile, "-C", top_dir])
os.remove(tarfile)
return top_dir, dir_path
def read_replica_info(dir_path, rconfig):
"""
Read the contents of a replica installation file.
rconfig is a ReplicaConfig object
"""
filename = dir_path + "/realm_info"
fd = open(filename)
config = SafeConfigParser()
config.readfp(fd)
rconfig.realm_name = config.get("realm", "realm_name")
rconfig.master_host_name = config.get("realm", "master_host_name")
rconfig.domain_name = config.get("realm", "domain_name")
rconfig.host_name = config.get("realm", "destination_host")
rconfig.subject_base = config.get("realm", "subject_base")
try:
rconfig.version = int(config.get("realm", "version"))
except NoOptionError:
pass
def read_replica_info_dogtag_port(config_dir):
portfile = config_dir + "/dogtag_directory_port.txt"
default_port = 7389
if not ipautil.file_exists(portfile):
dogtag_master_ds_port = default_port
else:
with open(portfile) as fd:
try:
dogtag_master_ds_port = int(fd.read())
except (ValueError, IOError) as e:
root_logger.debug('Cannot parse dogtag DS port: %s', e)
root_logger.debug('Default to %d', default_port)
dogtag_master_ds_port = default_port
return dogtag_master_ds_port
def create_replica_config(dirman_password, filename, options):
top_dir = None
try:
top_dir, dir = expand_replica_info(filename, dirman_password)
except Exception as e:
root_logger.error("Failed to decrypt or open the replica file.")
print("ERROR: Failed to decrypt or open the replica file.")
print("Verify you entered the correct Directory Manager password.")
sys.exit(1)
config = ReplicaConfig(top_dir)
read_replica_info(dir, config)
root_logger.debug(
'Installing replica file with version %d (0 means no version in prepared file).',
config.version)
if config.version and config.version > version.NUM_VERSION:
root_logger.error(
'A replica file from a newer release (%d) cannot be installed on an older version (%d)',
config.version, version.NUM_VERSION)
sys.exit(1)
config.dirman_password = dirman_password
try:
host = get_host_name(options.no_host_dns)
except BadHostError as e:
root_logger.error(str(e))
sys.exit(1)
if config.host_name != host:
try:
print("This replica was created for '%s' but this machine is named '%s'" % (config.host_name, host))
if not ipautil.user_input("This may cause problems. Continue?", False):
root_logger.debug(
"Replica was created for %s but machine is named %s "
"User chose to exit",
config.host_name, host)
sys.exit(0)
config.host_name = host
print("")
except KeyboardInterrupt:
root_logger.debug("Keyboard Interrupt")
sys.exit(0)
config.dir = dir
config.ca_ds_port = read_replica_info_dogtag_port(config.dir)
return config
def check_server_configuration():
"""
Check if IPA server is configured on the system.
This is done by checking if there are system restore (uninstall) files
present on the system. Note that this check can only be run with root
privileges.
When IPA is not configured, this function raises a RuntimeError exception.
Most convenient use case for the function is in install tools that require
configured IPA for its function.
"""
server_fstore = sysrestore.FileStore(paths.SYSRESTORE)
if not server_fstore.has_files():
raise RuntimeError("IPA is not configured on this system.")
def remove_file(filename):
"""
Remove a file and log any exceptions raised.
"""
try:
if os.path.lexists(filename):
os.unlink(filename)
except Exception as e:
root_logger.error('Error removing %s: %s' % (filename, str(e)))
def rmtree(path):
"""
Remove a directory structure and log any exceptions raised.
"""
try:
if os.path.exists(path):
shutil.rmtree(path)
except Exception as e:
root_logger.error('Error removing %s: %s' % (path, str(e)))
def is_ipa_configured():
"""
Using the state and index install files determine if IPA is already
configured.
"""
installed = False
sstore = sysrestore.StateFile(paths.SYSRESTORE)
fstore = sysrestore.FileStore(paths.SYSRESTORE)
for module in IPA_MODULES:
if sstore.has_state(module):
root_logger.debug('%s is configured' % module)
installed = True
else:
root_logger.debug('%s is not configured' % module)
if fstore.has_files():
root_logger.debug('filestore has files')
installed = True
else:
root_logger.debug('filestore is tracking no files')
return installed
def run_script(main_function, operation_name, log_file_name=None,
fail_message=None):
"""Run the given function as a command-line utility
This function:
- Runs the given function
- Formats any errors
- Exits with the appropriate code
:param main_function: Function to call
:param log_file_name: Name of the log file (displayed on unexpected errors)
:param operation_name: Name of the script
:param fail_message: Optional message displayed on failure
"""
root_logger.info('Starting script: %s', operation_name)
try:
try:
return_value = main_function()
except BaseException as e:
if (
isinstance(e, SystemExit) and
(e.code is None or e.code == 0) # pylint: disable=no-member
):
# Not an error after all
root_logger.info('The %s command was successful',
operation_name)
else:
# Log at the DEBUG level, which is not output to the console
# (unless in debug/verbose mode), but is written to a logfile
# if one is open.
tb = sys.exc_info()[2]
root_logger.debug('\n'.join(traceback.format_tb(tb)))
root_logger.debug('The %s command failed, exception: %s: %s',
operation_name, type(e).__name__, e)
if fail_message and not isinstance(e, SystemExit):
print(fail_message)
raise
else:
if return_value:
root_logger.info('The %s command failed, return value %s',
operation_name, return_value)
else:
root_logger.info('The %s command was successful',
operation_name)
sys.exit(return_value)
except BaseException as error:
message, exitcode = handle_error(error, log_file_name)
if message:
print(message, file=sys.stderr)
sys.exit(exitcode)
def handle_error(error, log_file_name=None):
"""Handle specific errors. Returns a message and return code"""
if isinstance(error, SystemExit):
if isinstance(error.code, int):
return None, error.code
elif error.code is None:
return None, 0
else:
return str(error), 1
if isinstance(error, RuntimeError):
return str(error), 1
if isinstance(error, KeyboardInterrupt):
return "Cancelled.", 1
if isinstance(error, admintool.ScriptError):
return error.msg, error.rval
if isinstance(error, socket.error):
return error, 1
if isinstance(error, errors.ACIError):
return error.message, 1
if isinstance(error, ldap.INVALID_CREDENTIALS):
return "Invalid password", 1
if isinstance(error, ldap.INSUFFICIENT_ACCESS):
return "Insufficient access", 1
if isinstance(error, ldap.LOCAL_ERROR):
return error.args[0].get('info', ''), 1
if isinstance(error, ldap.SERVER_DOWN):
return error.args[0]['desc'], 1
if isinstance(error, ldap.LDAPError):
message = 'LDAP error: %s\n%s\n%s' % (
type(error).__name__,
error.args[0]['desc'].strip(),
error.args[0].get('info', '').strip()
)
return message, 1
if isinstance(error, config.IPAConfigError):
message = "An IPA server to update cannot be found. Has one been configured yet?"
message += "\nThe error was: %s" % error
return message, 1
if isinstance(error, errors.LDAPError):
return "An error occurred while performing operations: %s" % error, 1
if isinstance(error, HostnameLocalhost):
message = textwrap.dedent("""
The hostname resolves to the localhost address (127.0.0.1/::1)
Please change your /etc/hosts file so that the hostname
resolves to the ip address of your network interface.
Please fix your /etc/hosts file and restart the setup program
""").strip()
return message, 1
if log_file_name:
message = "Unexpected error - see %s for details:" % log_file_name
else:
message = "Unexpected error"
message += '\n%s: %s' % (type(error).__name__, error)
return message, 1
def load_pkcs12(cert_files, key_password, key_nickname, ca_cert_files,
host_name):
"""
Load and verify server certificate and private key from multiple files
The files are accepted in PEM and DER certificate, PKCS#7 certificate
chain, PKCS#8 and raw private key and PKCS#12 formats.
:param cert_files: Names of server certificate and private key files to
import
:param key_password: Password to decrypt private keys
:param key_nickname: Nickname of the private key to import from PKCS#12
files
:param ca_cert_files: Names of CA certificate files to import
:param host_name: Host name of the server
:returns: Temporary PKCS#12 file with the server certificate, private key
and CA certificate chain, password to unlock the PKCS#12 file and
the CA certificate of the CA that issued the server certificate
"""
with certs.NSSDatabase() as nssdb:
db_password = ipautil.ipa_generate_password()
db_pwdfile = ipautil.write_tmp_file(db_password)
nssdb.create_db(db_pwdfile.name)
try:
nssdb.import_files(cert_files, db_pwdfile.name,
True, key_password, key_nickname)
except RuntimeError as e:
raise ScriptError(str(e))
if ca_cert_files:
try:
nssdb.import_files(ca_cert_files, db_pwdfile.name)
except RuntimeError as e:
raise ScriptError(str(e))
for nickname, trust_flags in nssdb.list_certs():
if 'u' in trust_flags:
key_nickname = nickname
continue
nssdb.trust_root_cert(nickname)
# Check we have the whole cert chain & the CA is in it
trust_chain = list(reversed(nssdb.get_trust_chain(key_nickname)))
ca_cert = None
for nickname in trust_chain[1:]:
cert = nssdb.get_cert(nickname)
if ca_cert is None:
ca_cert = cert
nss_cert = x509.load_certificate(cert, x509.DER)
subject = DN(str(nss_cert.subject))
issuer = DN(str(nss_cert.issuer))
del nss_cert
if subject == issuer:
break
else:
raise ScriptError(
"The full certificate chain is not present in %s" %
(", ".join(cert_files)))
for nickname in trust_chain[1:]:
try:
nssdb.verify_ca_cert_validity(nickname)
except ValueError as e:
raise ScriptError(
"CA certificate %s in %s is not valid: %s" %
(subject, ", ".join(cert_files), e))
# Check server validity
try:
nssdb.verify_server_cert_validity(key_nickname, host_name)
except ValueError as e:
raise ScriptError(
"The server certificate in %s is not valid: %s" %
(", ".join(cert_files), e))
out_file = tempfile.NamedTemporaryFile()
out_password = ipautil.ipa_generate_password()
out_pwdfile = ipautil.write_tmp_file(out_password)
args = [
paths.PK12UTIL,
'-o', out_file.name,
'-n', key_nickname,
'-d', nssdb.secdir,
'-k', db_pwdfile.name,
'-w', out_pwdfile.name,
]
ipautil.run(args)
return out_file, out_password, ca_cert
@contextmanager
def stopped_service(service, instance_name=""):
"""
Ensure that the specified service is stopped while the commands within
this context are executed.
Service is started at the end of the execution.
"""
if instance_name:
log_instance_name = "@{instance}".format(instance=instance_name)
else:
log_instance_name = ""
root_logger.debug('Ensuring that service %s%s is not running while '
'the next set of commands is being executed.', service,
log_instance_name)
service_obj = services.service(service)
# Figure out if the service is running, if not, yield
if not service_obj.is_running(instance_name):
root_logger.debug('Service %s%s is not running, continue.', service,
log_instance_name)
yield
else:
# Stop the service, do the required stuff and start it again
root_logger.debug('Stopping %s%s.', service, log_instance_name)
service_obj.stop(instance_name)
try:
yield
finally:
root_logger.debug('Starting %s%s.', service, log_instance_name)
service_obj.start(instance_name)
def check_entropy():
"""
Checks if the system has enough entropy, if not, displays warning message
"""
try:
with open(paths.ENTROPY_AVAIL, 'r') as efname:
if int(efname.read()) < 200:
emsg = 'WARNING: Your system is running out of entropy, ' \
'you may experience long delays'
service.print_msg(emsg)
root_logger.debug(emsg)
except IOError as e:
root_logger.debug(
"Could not open %s: %s", paths.ENTROPY_AVAIL, e)
except ValueError as e:
root_logger.debug("Invalid value in %s %s", paths.ENTROPY_AVAIL, e)
def load_external_cert(files, subject_base):
"""
Load and verify external CA certificate chain from multiple files.
The files are accepted in PEM and DER certificate and PKCS#7 certificate
chain formats.
:param files: Names of files to import
:param subject_base: Subject name base for IPA certificates
:returns: Temporary file with the IPA CA certificate and temporary file
with the external CA certificate chain
"""
with certs.NSSDatabase() as nssdb:
db_password = ipautil.ipa_generate_password()
db_pwdfile = ipautil.write_tmp_file(db_password)
nssdb.create_db(db_pwdfile.name)
try:
nssdb.import_files(files, db_pwdfile.name)
except RuntimeError as e:
raise ScriptError(str(e))
ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
ca_nickname = None
cache = {}
for nickname, trust_flags in nssdb.list_certs():
cert = nssdb.get_cert(nickname, pem=True)
nss_cert = x509.load_certificate(cert)
subject = DN(str(nss_cert.subject))
issuer = DN(str(nss_cert.issuer))
del nss_cert
cache[nickname] = (cert, subject, issuer)
if subject == ca_subject:
ca_nickname = nickname
nssdb.trust_root_cert(nickname)
if ca_nickname is None:
raise ScriptError(
"IPA CA certificate not found in %s" % (", ".join(files)))
trust_chain = reversed(nssdb.get_trust_chain(ca_nickname))
ca_cert_chain = []
for nickname in trust_chain:
cert, subject, issuer = cache[nickname]
ca_cert_chain.append(cert)
if subject == issuer:
break
else:
raise ScriptError(
"CA certificate chain in %s is incomplete" %
(", ".join(files)))
for nickname in trust_chain:
try:
nssdb.verify_ca_cert_validity(nickname)
except ValueError as e:
raise ScriptError(
"CA certificate %s in %s is not valid: %s" %
(subject, ", ".join(files), e))
cert_file = tempfile.NamedTemporaryFile()
cert_file.write(ca_cert_chain[0] + '\n')
cert_file.flush()
ca_file = tempfile.NamedTemporaryFile()
ca_file.write('\n'.join(ca_cert_chain[1:]) + '\n')
ca_file.flush()
return cert_file, ca_file
def store_version():
"""Store current data version and platform. This is required for check if
upgrade is required.
"""
sysupgrade.set_upgrade_state('ipa', 'data_version',
version.VENDOR_VERSION)
sysupgrade.set_upgrade_state('ipa', 'platform', ipaplatform.NAME)
def check_version():
"""
:raise UpgradePlatformError: if platform is not the same
:raise UpgradeDataOlderVersionError: if data needs to be upgraded
:raise UpgradeDataNewerVersionError: older version of IPA was detected than data
:raise UpgradeMissingVersionError: if platform or version is missing
"""
platform = sysupgrade.get_upgrade_state('ipa', 'platform')
if platform is not None:
if platform != ipaplatform.NAME:
raise UpgradePlatformError(
"platform mismatch (expected '%s', current '%s')" % (
platform, ipaplatform.NAME)
)
else:
raise UpgradeMissingVersionError("no platform stored")
data_version = sysupgrade.get_upgrade_state('ipa', 'data_version')
if data_version is not None:
parsed_data_ver = tasks.parse_ipa_version(data_version)
parsed_ipa_ver = tasks.parse_ipa_version(version.VENDOR_VERSION)
if parsed_data_ver < parsed_ipa_ver:
raise UpgradeDataOlderVersionError(
"data needs to be upgraded (expected version '%s', current "
"version '%s')" % (version.VENDOR_VERSION, data_version)
)
elif parsed_data_ver > parsed_ipa_ver:
raise UpgradeDataNewerVersionError(
"data are in newer version than IPA (data version '%s', IPA "
"version '%s')" % (data_version, version.VENDOR_VERSION)
)
else:
raise UpgradeMissingVersionError("no data_version stored")
def realm_to_serverid(realm_name):
return "-".join(realm_name.split("."))
def realm_to_ldapi_uri(realm_name):
serverid = realm_to_serverid(realm_name)
socketname = paths.SLAPD_INSTANCE_SOCKET_TEMPLATE % (serverid,)
return 'ldapi://' + ldapurl.ldapUrlEscape(socketname)
def install_service_keytab(principal, server, path, force_service_add=False):
try:
api.Backend.rpcclient.connect()
# Create services if none exists (we use the .forward method
# here so that we can control the client version number and avoid
# errors. This is a workaround until the API becomes version
# independent: FIXME
api.Backend.rpcclient.forward(
'service_add',
krbprincipalname=principal,
force=force_service_add,
version=u'2.112' # All the way back to 3.0 servers
)
except errors.DuplicateEntry:
pass
finally:
if api.Backend.rpcclient.isconnected():
api.Backend.rpcclient.disconnect()
args = [paths.IPA_GETKEYTAB, '-k', path, '-p', principal, '-s', server]
ipautil.run(args)
def check_creds(options, realm_name):
# Check if ccache is available
default_cred = None
try:
root_logger.debug('KRB5CCNAME set to %s' %
os.environ.get('KRB5CCNAME', None))
# get default creds, will raise if none found
default_cred = gssapi.creds.Credentials()
principal = str(default_cred.name)
except gssapi.raw.misc.GSSError as e:
root_logger.debug('Failed to find default ccache: %s' % e)
principal = None
# Check if the principal matches the requested one (if any)
if principal is not None and options.principal is not None:
op = options.principal
if op.find('@') == -1:
op = '%s@%s' % (op, realm_name)
if principal != op:
root_logger.debug('Specified principal %s does not match '
'available credentials (%s)' %
(options.principal, principal))
principal = None
if principal is None:
(ccache_fd, ccache_name) = tempfile.mkstemp()
os.close(ccache_fd)
options.created_ccache_file = ccache_name
if options.principal is not None:
principal = options.principal
else:
principal = 'admin'
stdin = None
if principal.find('@') == -1:
principal = '%s@%s' % (principal, realm_name)
if options.admin_password is not None:
stdin = options.admin_password
else:
if not options.unattended:
try:
stdin = getpass.getpass("Password for %s: " % principal)
except EOFError:
stdin = None
if not stdin:
root_logger.error(
"Password must be provided for %s.", principal)
raise ScriptError("Missing password for %s" % principal)
else:
if sys.stdin.isatty():
root_logger.error("Password must be provided in " +
"non-interactive mode.")
root_logger.info("This can be done via " +
"echo password | ipa-client-install " +
"... or with the -w option.")
raise ScriptError("Missing password for %s" % principal)
else:
stdin = sys.stdin.readline()
# set options.admin_password for future use
options.admin_password = stdin
try:
ipautil.kinit_password(principal, stdin, ccache_name)
except RuntimeError as e:
root_logger.error("Kerberos authentication failed: %s" % e)
raise ScriptError("Invalid credentials: %s" % e)
os.environ['KRB5CCNAME'] = ccache_name
class ModifyLDIF(ldif.LDIFParser):
"""
Allows to modify LDIF file.
Operations keep the order in which were specified per DN.
Warning: only modifications of existing DNs are supported
"""
def __init__(self, input_file, output_file):
"""
:param input_file: an LDIF
:param output_file: an LDIF file
"""
ldif.LDIFParser.__init__(self, input_file)
self.writer = ldif.LDIFWriter(output_file)
self.dn_updated = set()
self.modifications = {} # keep modify operations in original order
def add_value(self, dn, attr, values):
"""
Add value to LDIF.
:param dn: DN of entry (must exists)
:param attr: attribute name
:param value: value to be added
"""
assert isinstance(values, list)
self.modifications.setdefault(dn, []).append(
dict(
op="add",
attr=attr,
values=values,
)
)
def remove_value(self, dn, attr, values=None):
"""
Remove value from LDIF.
:param dn: DN of entry
:param attr: attribute name
:param value: value to be removed, if value is None, attribute will
be removed
"""
assert values is None or isinstance(values, list)
self.modifications.setdefault(dn, []).append(
dict(
op="del",
attr=attr,
values=values,
)
)
def replace_value(self, dn, attr, values):
"""
Replace values in LDIF with new value.
:param dn: DN of entry
:param attr: attribute name
:param value: new value for atribute
"""
assert isinstance(values, list)
self.remove_value(dn, attr)
self.add_value(dn, attr, values)
def modifications_from_ldif(self, ldif_file):
"""
Parse ldif file. Default operation is add, only changetypes "add"
and "modify" are supported.
:param ldif_file: an opened file for read
:raises: ValueError
"""
parser = ldif.LDIFRecordList(ldif_file)
parser.parse()
last_dn = None
for dn, entry in parser.all_records:
if dn is None:
# ldif parser return None, if records belong to previous DN
dn = last_dn
else:
last_dn = dn
if "replace" in entry:
for attr in entry["replace"]:
try:
self.replace_value(dn, attr, entry[attr])
except KeyError:
raise ValueError("replace: {dn}, {attr}: values are "
"missing".format(dn=dn, attr=attr))
elif "delete" in entry:
for attr in entry["delete"]:
self.remove_value(dn, attr, entry.get(attr, None))
elif "add" in entry:
for attr in entry["add"]:
try:
self.replace_value(dn, attr, entry[attr])
except KeyError:
raise ValueError("add: {dn}, {attr}: values are "
"missing".format(dn=dn, attr=attr))
else:
root_logger.error("Ignoring entry: %s : only modifications "
"are allowed (missing \"changetype: "
"modify\")", dn)
def handle(self, dn, entry):
if dn in self.modifications:
self.dn_updated.add(dn)
for mod in self.modifications.get(dn, []):
attr_name = mod["attr"]
values = mod["values"]
if mod["op"] == "del":
# delete
attribute = entry.setdefault(attr_name, [])
if values is None:
attribute = []
else:
attribute = [v for v in attribute if v not in values]
if not attribute: # empty
del entry[attr_name]
elif mod["op"] == "add":
# add
attribute = entry.setdefault(attr_name, [])
attribute.extend([v for v in values if v not in attribute])
else:
assert False, "Unknown operation: %r" % mod["op"]
self.writer.unparse(dn, entry)
def parse(self):
ldif.LDIFParser.parse(self)
# check if there are any remaining modifications
remaining_changes = set(self.modifications.keys()) - self.dn_updated
for dn in remaining_changes:
root_logger.error(
"DN: %s does not exists or haven't been updated", dn)
def remove_keytab(keytab_path):
"""
Remove Kerberos keytab and issue a warning if the procedure fails
:param keytab_path: path to the keytab file
"""
try:
root_logger.debug("Removing service keytab: {}".format(keytab_path))
os.remove(keytab_path)
except OSError as e:
if e.errno != errno.ENOENT:
root_logger.warning("Failed to remove Kerberos keytab '{}': "
"{}".format(keytab_path, e))
root_logger.warning("You may have to remove it manually")
def remove_ccache(ccache_path=None, run_as=None):
"""
remove Kerberos credential cache, essentially a wrapper around kdestroy.
:param ccache_path: path to the ccache file
:param run_as: run kdestroy as this user
"""
root_logger.debug("Removing service credentials cache")
kdestroy_cmd = [paths.KDESTROY]
if ccache_path is not None:
root_logger.debug("Ccache path: '{}'".format(ccache_path))
kdestroy_cmd.extend(['-c', ccache_path])
try:
ipautil.run(kdestroy_cmd, runas=run_as, env={})
except ipautil.CalledProcessError as e:
root_logger.warning(
"Failed to clear Kerberos credentials cache: {}".format(e))
| tbabej/freeipa | ipaserver/install/installutils.py | Python | gpl-3.0 | 48,113 | 0.00158 |
'''
Test cases for pyclbr.py
Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import sys
from types import ClassType, FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase
StaticMethodType = type(staticmethod(lambda: None))
ClassMethodType = type(classmethod(lambda c: None))
# Silence Py3k warning
import_module('commands', deprecated=True)
# This next line triggers an error on old versions of pyclbr.
from commands import getstatus
# Here we test the python class browser code.
#
# The main function in this suite, 'testModule', compares the output
# of pyclbr with the introspected members of a module. Because pyclbr
# is imperfect (as designed), testModule is called with a set of
# members to ignore.
class PyclbrTest(TestCase):
def assertListEq(self, l1, l2, ignore):
''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
missing = (set(l1) ^ set(l2)) - set(ignore)
if missing:
print >>sys.stderr, "l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore)
self.fail("%r missing" % missing.pop())
def assertHasattr(self, obj, attr, ignore):
''' succeed iff hasattr(obj,attr) or attr in ignore. '''
if attr in ignore: return
if not hasattr(obj, attr): print "???", attr
self.failUnless(hasattr(obj, attr),
'expected hasattr(%r, %r)' % (obj, attr))
def assertHaskey(self, obj, key, ignore):
''' succeed iff key in obj or key in ignore. '''
if key in ignore: return
if key not in obj:
print >>sys.stderr, "***", key
self.assertTrue(key in obj)
def assertEqualsOrIgnored(self, a, b, ignore):
''' succeed iff a == b or a in ignore or b in ignore '''
if a not in ignore and b not in ignore:
self.assertEqual(a, b)
def checkModule(self, moduleName, module=None, ignore=()):
''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
to the actual module object, module. Any identifiers in
ignore are ignored. If no module is provided, the appropriate
module is loaded with __import__.'''
if module is None:
# Import it.
# ('<silly>' is to work around an API silliness in __import__)
module = __import__(moduleName, globals(), {}, ['<silly>'])
dict = pyclbr.readmodule_ex(moduleName)
def ismethod(oclass, obj, name):
classdict = oclass.__dict__
if isinstance(obj, FunctionType):
if not isinstance(classdict[name], StaticMethodType):
return False
else:
if not isinstance(obj, MethodType):
return False
if obj.im_self is not None:
if (not isinstance(classdict[name], ClassMethodType) or
obj.im_self is not oclass):
return False
else:
if not isinstance(classdict[name], FunctionType):
return False
objname = obj.__name__
if objname.startswith("__") and not objname.endswith("__"):
objname = "_%s%s" % (obj.im_class.__name__, objname)
return objname == name
# Make sure the toplevel functions and classes are the same.
for name, value in dict.items():
if name in ignore:
continue
self.assertHasattr(module, name, ignore)
py_item = getattr(module, name)
if isinstance(value, pyclbr.Function):
self.assert_(isinstance(py_item, (FunctionType, BuiltinFunctionType)))
if py_item.__module__ != moduleName:
continue # skip functions that came from somewhere else
self.assertEquals(py_item.__module__, value.module)
else:
self.failUnless(isinstance(py_item, (ClassType, type)))
if py_item.__module__ != moduleName:
continue # skip classes that came from somewhere else
real_bases = [base.__name__ for base in py_item.__bases__]
pyclbr_bases = [ getattr(base, 'name', base)
for base in value.super ]
try:
self.assertListEq(real_bases, pyclbr_bases, ignore)
except:
print >>sys.stderr, "class=%s" % py_item
raise
actualMethods = []
for m in py_item.__dict__.keys():
if ismethod(py_item, getattr(py_item, m), m):
actualMethods.append(m)
foundMethods = []
for m in value.methods.keys():
if m[:2] == '__' and m[-2:] != '__':
foundMethods.append('_'+name+m)
else:
foundMethods.append(m)
try:
self.assertListEq(foundMethods, actualMethods, ignore)
self.assertEquals(py_item.__module__, value.module)
self.assertEqualsOrIgnored(py_item.__name__, value.name,
ignore)
# can't check file or lineno
except:
print >>sys.stderr, "class=%s" % py_item
raise
# Now check for missing stuff.
def defined_in(item, module):
if isinstance(item, ClassType):
return item.__module__ == module.__name__
if isinstance(item, FunctionType):
return item.func_globals is module.__dict__
return False
for name in dir(module):
item = getattr(module, name)
if isinstance(item, (ClassType, FunctionType)):
if defined_in(item, module):
self.assertHaskey(dict, name, ignore)
def test_easy(self):
self.checkModule('pyclbr')
self.checkModule('doctest')
# Silence Py3k warning
rfc822 = import_module('rfc822', deprecated=True)
self.checkModule('rfc822', rfc822)
self.checkModule('difflib')
def test_decorators(self):
# XXX: See comment in pyclbr_input.py for a test that would fail
# if it were not commented out.
#
self.checkModule('test.pyclbr_input')
def test_others(self):
cm = self.checkModule
# These were once about the 10 longest modules
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('cgi', ignore=('log',)) # set with = in module
cm('urllib', ignore=('_CFNumberToInt32',
'_CStringFromCFString',
'_CFSetup',
'getproxies_registry',
'proxy_bypass_registry',
'proxy_bypass_macosx_sysconf',
'open_https',
'getproxies_macosx_sysconf',
'getproxies_internetconfig',)) # not on all platforms
cm('pickle')
cm('aifc', ignore=('openfp',)) # set with = in module
cm('Cookie')
cm('sre_parse', ignore=('dump',)) # from sre_constants import *
cm('pdb')
cm('pydoc')
# Tests for modules inside packages
cm('email.parser')
cm('test.test_pyclbr')
def test_main():
run_unittest(PyclbrTest)
if __name__ == "__main__":
test_main()
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_pyclbr.py | Python | mit | 7,874 | 0.002159 |
from givabit.backend.charity import Charity
from givabit.backend.errors import MissingValueException, MultipleValueException
from givabit.test_common import test_data
from givabit.test_common import test_utils
class CharityRepositoryTest(test_utils.TestCase):
def setUp(self):
super(CharityRepositoryTest, self).setUp()
self.all_charities = [test_data.c1, test_data.c2, test_data.c3, test_data.c4]
for charity in self.all_charities:
self.charity_repo.add_or_update_charity(charity)
def test_lists_charities(self):
self.assertSequenceEqual(self.charity_repo.list_charities(), self.all_charities)
def test_gets_single_charity(self):
self.assertEqual(self.charity_repo.get_charity('Shelter'), test_data.c1)
self.assertEqual(self.charity_repo.get_charity('Oxfam'), test_data.c2)
with self.assertRaises(MissingValueException):
self.charity_repo.get_charity('Does not exist')
try:
self.charity_repo.get_charity('BHF')
except MultipleValueException, e:
self.assertSequenceEqual(e.values, [test_data.c3, test_data.c4])
def test_gets_charity_by_id(self):
self.assertEquals(self.charity_repo.get_charity(id=test_data.c1.key().id()), test_data.c1)
def test_getting_missing_charity_by_id_throws(self):
missing_id = 0
while missing_id in map(lambda charity: charity.key().id(), self.all_charities):
missing_id += 1
with self.assertRaises(MissingValueException):
self.charity_repo.get_charity(id=missing_id)
| illicitonion/givabit | src/givabit/backend/charity_repository_test.py | Python | apache-2.0 | 1,597 | 0.004383 |
"""CPStats, a package for collecting and reporting on program statistics.
Overview
========
Statistics about program operation are an invaluable monitoring and debugging
tool. Unfortunately, the gathering and reporting of these critical values is
usually ad-hoc. This package aims to add a centralized place for gathering
statistical performance data, a structure for recording that data which
provides for extrapolation of that data into more useful information,
and a method of serving that data to both human investigators and
monitoring software. Let's examine each of those in more detail.
Data Gathering
--------------
Just as Python's `logging` module provides a common importable for gathering
and sending messages, performance statistics would benefit from a similar
common mechanism, and one that does *not* require each package which wishes
to collect stats to import a third-party module. Therefore, we choose to
re-use the `logging` module by adding a `statistics` object to it.
That `logging.statistics` object is a nested dict. It is not a custom class,
because that would:
1. require libraries and applications to import a third-party module in
order to participate
2. inhibit innovation in extrapolation approaches and in reporting tools, and
3. be slow.
There are, however, some specifications regarding the structure of the dict.::
{
+----"SQLAlchemy": {
| "Inserts": 4389745,
| "Inserts per Second":
| lambda s: s["Inserts"] / (time() - s["Start"]),
| C +---"Table Statistics": {
| o | "widgets": {-----------+
N | l | "Rows": 1.3M, | Record
a | l | "Inserts": 400, |
m | e | },---------------------+
e | c | "froobles": {
s | t | "Rows": 7845,
p | i | "Inserts": 0,
a | o | },
c | n +---},
e | "Slow Queries":
| [{"Query": "SELECT * FROM widgets;",
| "Processing Time": 47.840923343,
| },
| ],
+----},
}
The `logging.statistics` dict has four levels. The topmost level is nothing
more than a set of names to introduce modularity, usually along the lines of
package names. If the SQLAlchemy project wanted to participate, for example,
it might populate the item `logging.statistics['SQLAlchemy']`, whose value
would be a second-layer dict we call a "namespace". Namespaces help multiple
packages to avoid collisions over key names, and make reports easier to read,
to boot. The maintainers of SQLAlchemy should feel free to use more than one
namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
or other syntax constraints on the namespace names; they should be chosen
to be maximally readable by humans (neither too short nor too long).
Each namespace, then, is a dict of named statistical values, such as
'Requests/sec' or 'Uptime'. You should choose names which will look
good on a report: spaces and capitalization are just fine.
In addition to scalars, values in a namespace MAY be a (third-layer)
dict, or a list, called a "collection". For example, the CherryPy
:class:`StatsTool` keeps track of what each request is doing (or has most
recently done) in a 'Requests' collection, where each key is a thread ID; each
value in the subdict MUST be a fourth dict (whew!) of statistical data about
each thread. We call each subdict in the collection a "record". Similarly,
the :class:`StatsTool` also keeps a list of slow queries, where each record
contains data about each slow query, in order.
Values in a namespace or record may also be functions, which brings us to:
Extrapolation
-------------
The collection of statistical data needs to be fast, as close to unnoticeable
as possible to the host program. That requires us to minimize I/O, for example,
but in Python it also means we need to minimize function calls. So when you
are designing your namespace and record values, try to insert the most basic
scalar values you already have on hand.
When it comes time to report on the gathered data, however, we usually have
much more freedom in what we can calculate. Therefore, whenever reporting
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents
of `logging.statistics` for reporting, they first call
`extrapolate_statistics` (passing the whole `statistics` dict as the only
argument). This makes a deep copy of the statistics dict so that the
reporting tool can both iterate over it and even change it without harming
the original. But it also expands any functions in the dict by calling them.
For example, you might have a 'Current Time' entry in the namespace with the
value "lambda scope: time.time()". The "scope" parameter is the current
namespace dict (or record, if we're currently expanding one of those
instead), allowing you access to existing static entries. If you're truly
evil, you can even modify more than one entry at a time.
However, don't try to calculate an entry and then use its value in further
extrapolations; the order in which the functions are called is not guaranteed.
This can lead to a certain amount of duplicated work (or a redesign of your
schema), but that's better than complicating the spec.
After the whole thing has been extrapolated, it's time for:
Reporting
---------
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates
it all, and then transforms it to HTML for easy viewing. Each namespace gets
its own header and attribute table, plus an extra table for each collection.
This is NOT part of the statistics specification; other tools can format how
they like.
You can control which columns are output and how they are formatted by updating
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
`logging.statistics`. The difference is that, instead of data values, it has
formatting values. Use None for a given key to indicate to the StatsPage that a
given column should not be output. Use a string with formatting
(such as '%.3f') to interpolate the value(s), or use a callable (such as
lambda v: v.isoformat()) for more advanced formatting. Any entry which is not
mentioned in the formatting dict is output unchanged.
Monitoring
----------
Although the HTML output takes pains to assign unique id's to each <td> with
statistical data, you're probably better off fetching /cpstats/data, which
outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
That is probably easier to parse, and doesn't have any formatting controls,
so you get the "original" data in a consistently-serialized format.
Note: there's no treatment yet for datetime objects. Try time.time() instead
for now if you can. Nagios will probably thank you.
Turning Collection Off
----------------------
It is recommended each namespace have an "Enabled" item which, if False,
stops collection (but not reporting) of statistical data. Applications
SHOULD provide controls to pause and resume collection by setting these
entries to False or True, if present.
Usage
=====
To collect statistics on CherryPy applications::
from cherrypy.lib import cpstats
appconfig['/']['tools.cpstats.on'] = True
To collect statistics on your own code::
import logging
# Initialize the repository
if not hasattr(logging, 'statistics'): logging.statistics = {}
# Initialize my namespace
mystats = logging.statistics.setdefault('My Stuff', {})
# Initialize my namespace's scalars and collections
mystats.update({
'Enabled': True,
'Start Time': time.time(),
'Important Events': 0,
'Events/Second': lambda s: (
(s['Important Events'] / (time.time() - s['Start Time']))),
})
...
for event in events:
...
# Collect stats
if mystats.get('Enabled', False):
mystats['Important Events'] += 1
To report statistics::
root.cpstats = cpstats.StatsPage()
To format statistics reports::
See 'Reporting', above.
"""
import logging
import os
import sys
import threading
import time
import cherrypy
from cherrypy._cpcompat import json
# ------------------------------- Statistics -------------------------------- #
if not hasattr(logging, 'statistics'):
logging.statistics = {}
def extrapolate_statistics(scope):
"""Return an extrapolated copy of the given scope."""
c = {}
for k, v in list(scope.items()):
if isinstance(v, dict):
v = extrapolate_statistics(v)
elif isinstance(v, (list, tuple)):
v = [extrapolate_statistics(record) for record in v]
elif hasattr(v, '__call__'):
v = v(scope)
c[k] = v
return c
# -------------------- CherryPy Applications Statistics --------------------- #
appstats = logging.statistics.setdefault('CherryPy Applications', {})
appstats.update({
'Enabled': True,
'Bytes Read/Request': lambda s: (
s['Total Requests'] and
(s['Total Bytes Read'] / float(s['Total Requests'])) or
0.0
),
'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s),
'Bytes Written/Request': lambda s: (
s['Total Requests'] and
(s['Total Bytes Written'] / float(s['Total Requests'])) or
0.0
),
'Bytes Written/Second': lambda s: (
s['Total Bytes Written'] / s['Uptime'](s)
),
'Current Time': lambda s: time.time(),
'Current Requests': 0,
'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s),
'Server Version': cherrypy.__version__,
'Start Time': time.time(),
'Total Bytes Read': 0,
'Total Bytes Written': 0,
'Total Requests': 0,
'Total Time': 0,
'Uptime': lambda s: time.time() - s['Start Time'],
'Requests': {},
})
proc_time = lambda s: time.time() - s['Start Time']
class ByteCountWrapper(object):
"""Wraps a file-like object, counting the number of bytes read."""
def __init__(self, rfile):
self.rfile = rfile
self.bytes_read = 0
def read(self, size=-1):
data = self.rfile.read(size)
self.bytes_read += len(data)
return data
def readline(self, size=-1):
data = self.rfile.readline(size)
self.bytes_read += len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
return data
average_uriset_time = lambda s: s['Count'] and (s['Sum'] / s['Count']) or 0
def _get_threading_ident():
if sys.version_info >= (3, 3):
return threading.get_ident()
return threading._get_ident()
class StatsTool(cherrypy.Tool):
"""Record various information about the current request."""
def __init__(self):
cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
if appstats.get('Enabled', False):
cherrypy.Tool._setup(self)
self.record_start()
def record_start(self):
"""Record the beginning of a request."""
request = cherrypy.serving.request
if not hasattr(request.rfile, 'bytes_read'):
request.rfile = ByteCountWrapper(request.rfile)
request.body.fp = request.rfile
r = request.remote
appstats['Current Requests'] += 1
appstats['Total Requests'] += 1
appstats['Requests'][_get_threading_ident()] = {
'Bytes Read': None,
'Bytes Written': None,
# Use a lambda so the ip gets updated by tools.proxy later
'Client': lambda s: '%s:%s' % (r.ip, r.port),
'End Time': None,
'Processing Time': proc_time,
'Request-Line': request.request_line,
'Response Status': None,
'Start Time': time.time(),
}
def record_stop(
self, uriset=None, slow_queries=1.0, slow_queries_count=100,
debug=False, **kwargs):
"""Record the end of a request."""
resp = cherrypy.serving.response
w = appstats['Requests'][_get_threading_ident()]
r = cherrypy.request.rfile.bytes_read
w['Bytes Read'] = r
appstats['Total Bytes Read'] += r
if resp.stream:
w['Bytes Written'] = 'chunked'
else:
cl = int(resp.headers.get('Content-Length', 0))
w['Bytes Written'] = cl
appstats['Total Bytes Written'] += cl
w['Response Status'] = getattr(
resp, 'output_status', None) or resp.status
w['End Time'] = time.time()
p = w['End Time'] - w['Start Time']
w['Processing Time'] = p
appstats['Total Time'] += p
appstats['Current Requests'] -= 1
if debug:
cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')
if uriset:
rs = appstats.setdefault('URI Set Tracking', {})
r = rs.setdefault(uriset, {
'Min': None, 'Max': None, 'Count': 0, 'Sum': 0,
'Avg': average_uriset_time})
if r['Min'] is None or p < r['Min']:
r['Min'] = p
if r['Max'] is None or p > r['Max']:
r['Max'] = p
r['Count'] += 1
r['Sum'] += p
if slow_queries and p > slow_queries:
sq = appstats.setdefault('Slow Queries', [])
sq.append(w.copy())
if len(sq) > slow_queries_count:
sq.pop(0)
cherrypy.tools.cpstats = StatsTool()
# ---------------------- CherryPy Statistics Reporting ---------------------- #
thisdir = os.path.abspath(os.path.dirname(__file__))
missing = object()
locale_date = lambda v: time.strftime('%c', time.gmtime(v))
iso_format = lambda v: time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))
def pause_resume(ns):
def _pause_resume(enabled):
pause_disabled = ''
resume_disabled = ''
if enabled:
resume_disabled = 'disabled="disabled" '
else:
pause_disabled = 'disabled="disabled" '
return """
<form action="pause" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Pause" %s/>
</form>
<form action="resume" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Resume" %s/>
</form>
""" % (ns, pause_disabled, ns, resume_disabled)
return _pause_resume
class StatsPage(object):
formatting = {
'CherryPy Applications': {
'Enabled': pause_resume('CherryPy Applications'),
'Bytes Read/Request': '%.3f',
'Bytes Read/Second': '%.3f',
'Bytes Written/Request': '%.3f',
'Bytes Written/Second': '%.3f',
'Current Time': iso_format,
'Requests/Second': '%.3f',
'Start Time': iso_format,
'Total Time': '%.3f',
'Uptime': '%.3f',
'Slow Queries': {
'End Time': None,
'Processing Time': '%.3f',
'Start Time': iso_format,
},
'URI Set Tracking': {
'Avg': '%.3f',
'Max': '%.3f',
'Min': '%.3f',
'Sum': '%.3f',
},
'Requests': {
'Bytes Read': '%s',
'Bytes Written': '%s',
'End Time': None,
'Processing Time': '%.3f',
'Start Time': None,
},
},
'CherryPy WSGIServer': {
'Enabled': pause_resume('CherryPy WSGIServer'),
'Connections/second': '%.3f',
'Start time': iso_format,
},
}
@cherrypy.expose
def index(self):
# Transform the raw data into pretty output for HTML
yield """
<html>
<head>
<title>Statistics</title>
<style>
th, td {
padding: 0.25em 0.5em;
border: 1px solid #666699;
}
table {
border-collapse: collapse;
}
table.stats1 {
width: 100%;
}
table.stats1 th {
font-weight: bold;
text-align: right;
background-color: #CCD5DD;
}
table.stats2, h2 {
margin-left: 50px;
}
table.stats2 th {
font-weight: bold;
text-align: center;
background-color: #CCD5DD;
}
</style>
</head>
<body>
"""
for title, scalars, collections in self.get_namespaces():
yield """
<h1>%s</h1>
<table class='stats1'>
<tbody>
""" % title
for i, (key, value) in enumerate(scalars):
colnum = i % 3
if colnum == 0:
yield """
<tr>"""
yield (
"""
<th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" %
vars()
)
if colnum == 2:
yield """
</tr>"""
if colnum == 0:
yield """
<th></th><td></td>
<th></th><td></td>
</tr>"""
elif colnum == 1:
yield """
<th></th><td></td>
</tr>"""
yield """
</tbody>
</table>"""
for subtitle, headers, subrows in collections:
yield """
<h2>%s</h2>
<table class='stats2'>
<thead>
<tr>""" % subtitle
for key in headers:
yield """
<th>%s</th>""" % key
yield """
</tr>
</thead>
<tbody>"""
for subrow in subrows:
yield """
<tr>"""
for value in subrow:
yield """
<td>%s</td>""" % value
yield """
</tr>"""
yield """
</tbody>
</table>"""
yield """
</body>
</html>
"""
def get_namespaces(self):
"""Yield (title, scalars, collections) for each namespace."""
s = extrapolate_statistics(logging.statistics)
for title, ns in sorted(s.items()):
scalars = []
collections = []
ns_fmt = self.formatting.get(title, {})
for k, v in sorted(ns.items()):
fmt = ns_fmt.get(k, {})
if isinstance(v, dict):
headers, subrows = self.get_dict_collection(v, fmt)
collections.append((k, ['ID'] + headers, subrows))
elif isinstance(v, (list, tuple)):
headers, subrows = self.get_list_collection(v, fmt)
collections.append((k, headers, subrows))
else:
format = ns_fmt.get(k, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v = format(v)
elif format is not missing:
v = format % v
scalars.append((k, v))
yield title, scalars, collections
def get_dict_collection(self, v, formatting):
"""Return ([headers], [rows]) for the given collection."""
# E.g., the 'Requests' dict.
headers = []
try:
# python2
vals = v.itervalues()
except AttributeError:
# python3
vals = v.values()
for record in vals:
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for k2, record in sorted(v.items()):
subrow = [k2]
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
def get_list_collection(self, v, formatting):
"""Return ([headers], [subrows]) for the given collection."""
# E.g., the 'Slow Queries' list.
headers = []
for record in v:
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for record in v:
subrow = []
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
if json is not None:
@cherrypy.expose
def data(self):
s = extrapolate_statistics(logging.statistics)
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(s, sort_keys=True, indent=4)
@cherrypy.expose
def pause(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = False
raise cherrypy.HTTPRedirect('./')
pause.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']}
@cherrypy.expose
def resume(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = True
raise cherrypy.HTTPRedirect('./')
resume.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']}
| VeNoMouS/Sick-Beard | lib/cherrypy/lib/cpstats.py | Python | gpl-3.0 | 22,932 | 0.000218 |
import proxybase
import xmlrpclib
import mimetypes
import os
import data
################################################################################
""" getInst
returns an instance of a wpproxy object
"""
def getInst(url, user, password):
wp = WordpressProxy(url, user, password)
return wp
################################################################################
"""WordpressProxy
The following defines a blogproxy class that inherits methods from the
xmlrpclib. To make this work, the __init__ method of the ancestor
class(xmlrpclib.ServerProxy in this case) must be called explicitly as
part of the initialization. From that point, the various server methods
are "directly" accessible through my blogproxy class
"""
class WordpressProxy(proxybase.BlogProxy):
############################################################################
"""getCategories
"""
def getCategories(self):
def _tryMethods(blogid):
try:
response = self.wp.getTerms(blogid,
self._username,
self._password,
'category',
{})
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.getCategories", error)
else:
return [ { 'categoryName' : cat['name'],
'parentId' : cat['parent'],
'categoryId' : cat['term_id'],
'categoryDescription' : cat['description'],} for cat in response ]
# fallback to old method
try:
return self.metaWeblog.getCategories(blogid,
self._username,
self._password)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.getCategories", error)
########################################################################
# getCategories starts here...
if self._categories == None:
self._categories = _tryMethods(self._getBlogID)
return self._categories
############################################################################
"""newCategory
"""
def newCategory(self, newcat, parent, slug='', desc=''):
blogid = self._getBlogID()
# start by trying newer Wordpress API call
term = { 'name' : newcat,
'taxonomy' : 'category',
'slug' : slug,
'description' : desc}
# it appears that if parent is 0, the call won't work to add the
# category, but will work if parent is not present.
if int(parent) != 0:
term['parent'] = int(parent)
try:
return self.wp.newTerm(blogid,
self._username,
self._password,
term)
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.newCategory", error)
# fallback to old call
try:
return self.wp.newCategory(blogid,
self._username,
self._password,
{ 'name' : newcat,
'slug' : slug,
'description' : desc,
'parent_id' : parent})
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.newCategory", error)
############################################################################
"""getRecentTitles
"""
def getRecentTitles(self, number):
blogid = self._getBlogID()
# First, try the Wordpress XMLRPC API calls
try:
response = self.wp.getPosts(blogid,
self._username,
self._password,
{ # filter parameter
'post_type' : 'post', # or 'page', 'attachment'
'post_status' : 'publish', # or 'draft', 'private, 'pending'
'number' : number,
'offset' : 0, # offset by # posts
'orderby' : '', # appears to have no effect
'order' : '', # appears to have no effect
},
['post_id', 'post_title', 'post_date'])
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.getRecentTitles", error)
else:
return [{'postid' : postmeta['post_id'],
'title' : postmeta['post_title'],
'dateCreated' : postmeta['post_date']} for postmeta in response ]
# The Wordpress XMLRPC API is not available, try the old MT API
try:
return self.mt.getRecentPostTitles(blogid,
self._username,
self._password,
number)
except (xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.getRecentTitles", error)
############################################################################
"""publishPost
"""
def publishPost(self, post):
blogid = self._getBlogID()
try:
return self.wp.newPost(blogid,
self._username,
self._password,
post.wpStruct)
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.publishPost", error)
try:
return self.metaWeblog.newPost(blogid,
self._username,
self._password,
post.metaweblogStruct,
post.publish)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.publishPost", error)
############################################################################
"""editPost
"""
def editPost(self, postid, post):
try:
if self.wp.editPost(self._getBlogID(),
self._username,
self._password,
postid,
post.wpStruct):
return postid
# error updating post
raise proxybase.ProxyError("wp.editPost", "post not updated")
except xmlrpclib.Fault as err:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.editPost", error)
try:
self.metaWeblog.editPost(postid,
self._username,
self._password,
post.metaweblogStruct,
post.publish)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.editPost", error)
return postid
############################################################################
"""getPost
"""
def getPost(self, postid):
blogid = self._getBlogID()
try:
response = self.wp.getPost(blogid,
self._username,
self._password,
postid,
['postid',
'post_title',
'post_content',
'post_excerpt',
'terms'])
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.getPost", error)
else:
return data.Post(response, 'wp')
# fallback to older XMLRPC method
try:
response = self.metaWeblog.getPost(postid,
self._username,
self._password)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.getPost", error)
else:
return data.Post(response, 'metaweblog')
############################################################################
"""deletePost
"""
def deletePost(self, postid):
blogid = self._getBlogID()
# try the newer Wordpress XMLRPC API first...
try:
return self.wp.deletePost(blogid,
self._username,
self._password,
postid)
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.deletePost", error)
# if Wordpress API failed, try older XMLRPC API call
try:
return self.blogger.deletePost('',
postid,
self._username,
self._password,
True)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.deletePost", error)
############################################################################
"""upload
"""
def upload(self, filename):
#######################################################################
"""_tryMethods
Helper function to maintain compatibility with older version of
Wordpress. Tries the newest methods first and then older ones if
the newer fail.
"""
def _tryMethods(blogid, mediaStruct):
# try newer Wordpress API first...
try:
return self.wp.uploadFile(blogid,
self._username,
self._password,
mediaStruct )
except xmlrpclib.Fault:
pass
except xmlrpclib.ProtocolError, error:
raise proxybase.ProxyError("wp.upload", error)
# fall back to older XMLRPC API call
try:
return self.metaWeblog.newMediaObject(blogid,
self._username,
self._password,
mediaStruct )
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.upload", error)
#######################################################################
# upload starts here...
# see if user supplied full path name
if os.path.isfile(filename) != 1:
# if not, anchor to user's home directory
if not filename.startswith('/'):
filename = '/' + filename
filename = os.path.expanduser('~') + filename
try:
f = open(filename, 'rb')
mediaData = f.read()
f.close()
except IOError, error:
raise proxybase.ProxyError("wp.upload", error)
mediaStruct = {}
mediaStruct['type'], encoding = mimetypes.guess_type(filename)
if mediaStruct['type'] == None:
print "Can't determine MIME type for %s" % filename
sys.exit()
mediaStruct['name'] = os.path.basename(filename)
mediaStruct['bits'] = xmlrpclib.Binary(mediaData)
return _tryMethods(self._getBlogID(), mediaStruct)
############################################################################
"""getComments
"""
def getComments(self, postid):
blogid = self._getBlogID()
count = self._getCommentCount(postid)
comment_struct = {}
comment_struct['post_id'] = postid
comment_struct['status'] = ''
comment_struct['offset'] = 0
comment_struct['number'] = count['approved']
try:
comments = self.wp.getComments(blogid,
self._username,
self._password,
comment_struct)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.getComments", error)
return comments
############################################################################
"""newComment
"""
def newComment(self, postid, comment):
blogid = self._getBlogID()
try:
commentid = self.wp.newComment(blogid,
self._username,
self._password,
postid,
comment)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.newComment", error)
return commentid
############################################################################
"""deleteComment
"""
def deleteComment(self, commentid):
blogid = self._getBlogID()
try:
status = self.wp.deleteComment(blogid,
self._username,
self._password,
commentid)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.deleteComment", error)
return status
############################################################################
"""editComment
"""
def editComment(self, commentid, comment):
blogid = self._getBlogID()
try:
status = self.wp.editComment(blogid,
self._username,
self._password,
commentid,
comment)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.editComment", error)
return status
############################################################################
"""getComment
"""
def getComment(self, commentid):
blogid = self._getBlogID()
try:
status = self.wp.getComment(blogid,
self._username,
self._password,
commentid)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.editComment", error)
return status
##################### START PRIVATE METHODS ################################
############################################################################
"""_getBlogID
"""
def _getBlogID(self):
self._getUsersBlogs()
for blog in self._blogs:
if self._blogname == blog['blogName']:
return blog['blogid']
raise proxybase.ProxyError("wp._getBlogID",
'bad name: %s' % self._blogname)
############################################################################
"""_getUsersBlogs
"""
def _getUsersBlogs(self):
# a little trick to avoid repeatedly calling the xmlrpc method
# it may not be necessary, we'll figure that out later
if self._blogs == None:
try:
self._blogs = self.wp.getUsersBlogs(self._username,
self._password)
except (xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError('wp._getUsersBlogs', error)
############################################################################
"""_getCommentCount
"""
def _getCommentCount(self, postid):
blogid = self._getBlogID()
try:
count = self.wp.getCommentCount(blogid,
self._username,
self._password,
postid)
except(xmlrpclib.Fault, xmlrpclib.ProtocolError), error:
raise proxybase.ProxyError("wp.getCommentCount", error)
return count
| lama7/blogtool | blogtool/xmlproxy/wp_proxy.py | Python | mit | 17,754 | 0.005633 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import sys
import os.path
from loguru import logger
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../experiments/"))
# sys.path.append(os.path.join(path_to_script, "../extern/sed3/"))
# sys.path.append(os.path.join(path_to_script, "../src/"))
import unittest
import pytest
import experiments.tiled_liver_statistics as tls
class TextureFeaturesExperimentTest(unittest.TestCase):
# @unittest.skip("comment after implementation")
@pytest.mark.slow
def test_run_experiments(self):
"""
"""
import lisa.texture_features as tfeat
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
import classification
self.dcmdir = os.path.join(
path_to_script, '../sample_data/jatra_06mm_jenjatraplus/')
yaml_file = os.path.join(
path_to_script, '../experiments/20130919_liver_statistics.yaml')
# write_csv(fvall)
gf = tfeat.GaborFeatures()
glcmf = tfeat.GlcmFeatures()
haralick = tfeat.HaralickFeatures()
list_of_feature_fcn = [
[tls.feat_hist, []],
# [gf.feats_gabor, []],
# [glcmf.feats_glcm, []],
# [haralick.feats_haralick, [True]]
]
list_of_classifiers = [
# [GaussianNB, []],
# [svm.SVC, []],
[classification.GMMClassifier,
{'n_components': 2, 'covariance_type': 'full'}],
]
featrs_plus_classifs = tls.make_product_list(list_of_feature_fcn,
list_of_classifiers)
tile_shape = [50, 50, 50]
tls.experiment(yaml_file, yaml_file,
featrs_plus_classifs, tile_shape=tile_shape,
use_voxelsize_norm=False,
working_voxelsize_mm=[1, 1, 1],
visualization=False)
# slab = {'none':0, 'bone':8,'lungs':9,'heart':10}
# import pdb; pdb.set_trace()
# SupportStructureSegmentation
# sss = support_structure_segmentation.SupportStructureSegmentation(
# data3d = self.data3d,
# voxelsize_mm = self.metadata['voxelsize_mm'],
# modality = 'CT',
# slab = slab
#)
# sss.lungs_segmentation()
# sss.segmentation[260:270,160:170,1:10] = 2
# sss.visualization()
# total number of voxels segmented as bones in spine
# probebox1 = sss.segmentation [260:270,160:170,1:10]== slab['lungs']
# self.assertGreater(np.sum(probebox1),20)
# total number of voexel segmented as none in upper left corner
# probebox1 = sss.segmentation[10:20,10:20,5:15] == slab['none']
# self.assertGreater(np.sum(probebox1),900)
# import pdb; pdb.set_trace()
if __name__ == "__main__":
logging.basicConfig(stream=sys.stderr)
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| mjirik/lisa | tests/texture_features_experiments_test.py | Python | bsd-3-clause | 3,114 | 0.003215 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# The main purpose of this module is to provide a low-level JS scanner,
# materialized in the Scanner class. It only recognizes primitive lexems, like
# numbers, operators, and symbol names, but nothing that requires context
# awareness like strings or comments.
##
import sys, os, re, types
from collections import deque
##
# IterObject -- abstract base class for iterators, making them resettable and
# providing an immediate .next() method
#
class IterObject(object):
def __init__(self, inData):
self.inData = inData
self.resetIter()
def resetIter(self):
self._iter = self.__iter__()
self.next = self._iter.next
def __iter__(self):
raise RuntimeError("You have to overload the __iter__ method!")
##
# Scanner -- low-level scanner that reads text from a stream and returns simple tokens as tuples
#
# Usage:
# f=open('file.js')
# fs= f.read()
# x=Scanner(text)
# a=[y for y in Scanner(text)]
class Scanner(IterObject):
def __init__(self, stream):
super(Scanner, self).__init__(stream)
self.next_start = 0
patt = re.compile(ur'''
(?P<float>
\d*\.\d+(?:[eE][+-]?\d+)? # float, dotted
|\d+[eE][+-]?\d+ # undotted, with 'e'
)
|(?P<hexnum> 0x[0-9A-Fa-f]+) # hex number
|(?P<number> \d+) # number TODO: there is no such thing in JS!
|(?P<ident> [$\w]+) # identifier, name
|(?P<nl> # unicode line separators
\x0D\x0A
#|\x20\x28 # strange: this is ' (' !?
#|\x20\x29 # strange: this is ' )' !?
|\x0A
|\x0D
)
|(?P<white> (?:(?:\s|\ufeff)(?<!\n))+) # white ( + BOM - \n)
|(?P<mulop> # multi-char operators
<<=? # <<, <<=
|>= # >=
|<= # <=
|===? # ==, ===
|!==? # !=, !==
|[-+*/%|^&]= # -=, +=, *=, /=, %=, |=, ^=, &=
|>>>?=? # >>, >>>, >>=, >>>=
|&& # &&
|[|^]\| # ||, ^|
|\+\+ # ++
|-- # --
|:: # ::
|\.\. # ..
|// # // (end-of-line comment)
|/\* # /* (start multi-line comment)
|\*/ # */ (end multi-line comment)
)
|(?P<op> \W) # what remains (operators)
''', re.VERBOSE|re.DOTALL|re.MULTILINE|re.UNICODE) # re.LOCALE?!
# individual regex to search fast-forward to potential string ends (both comments and quoted)
stringEnd = {}
stringEnd['\n'] = re.compile('(?P<commI>.*(?=\n|$))', re.UNICODE)
stringEnd[r'\*/'] = re.compile(r'(?P<commM>.*?\*/)', re.DOTALL|re.MULTILINE|re.UNICODE)
stringEnd['"'] = re.compile(r'(?P<dquote>.*?")', re.UNICODE)
stringEnd["'"] = re.compile(r"(?P<squote>.*?')", re.UNICODE)
# yields :
# ( <group_name> , <scan_string> , <start_pos> , <scan_length> )
def __iter__1(self):
miter = self.patt.finditer(self.inData)
for mo in miter:
mo_lastgroup = mo.lastgroup
mstart = mo.start()
mend = mo.end()
if mstart != self.next_start: # assure compactness of scan
raise AssertionError, "There's a scan gap before: %s (at pos %d)" % (mo.group(), self.next_start)
self.next_start = mend # match range is [mo.start(), mo.end()[
yield (mo_lastgroup, mo.group(mo_lastgroup), mstart, mend - mstart)
def __iter__(self):
delimiter = None
inData = self.inData
lenData = len(inData)
cursor = 0
while cursor < lenData:
if delimiter:
mo = self.stringEnd[delimiter].search(inData, pos=cursor)
else:
mo = self.patt.match(inData, pos=cursor)
if mo:
mo_lastgroup = mo.lastgroup
mstart = mo.start()
mend = mo.end()
cursor = mend # when using the 'pos' parameter, mo.start/end refer to the *entire* underlying string
delimiter = (yield (mo_lastgroup, mo.group(mo_lastgroup), mstart, mend))
else:
raise SyntaxError("Unable to tokenize text starting with: \"%s\"" % inData[cursor:cursor+200])
##
# Token -- wraps a low-level scanner tuple into a simple object
class Token(object):
__slots__ = 'name', 'value', 'spos', 'len'
def __init__(self, ttup):
(
self.name, # type
self.value,
self.spos, # character position within stream
self.len, # length of value
) = ttup
def __str__(self):
return "(%s, %r, %d, %d)" % (self.name, self.value, self.spos, self.len)
##
# LQueue -- enhanced queue that allows push-back from one ("Left") side
#
# I'm using this class as a wrapper around (token) iterators, so I can not
# only get the next item from the iterator, but also push it back again.
# This allows peek-ahead processing of tokens, and methods can push tokens
# back into the stream if they find they don't want to use them.
# The implementation is based on a collections.deque double ended queue that
# uses one end (the "right" one) to fill from the iterator, and the other
# (the "left") end as the producer end for .next() iteration and the push-
# back method. Here are the schematics:
#
# -------------------------
# to consumer <--- LQueue <--- from source iterator
# (.next()) -------------------------
#
# from consumer--->
# (.putBack())
#
# The StopIteration exception is propagated (i.e.: uncaught) from the ori-
# ginal iterator. The interesting end of the deque is the left, hence the
# name "LQueue".
class LQueue(object):
def __init__(self, iterator):
self.iterator = iterator
self.queue = deque(())
def next(self, arg=None):
if len(self.queue) == 0:
self.queue.append(self.iterator.send(arg))
return self.queue.popleft()
##
# peek n tokens ahead
def peek(self, n=1):
toks = []
cnt = 0
# get the desired token
while cnt < n:
try:
t = self.next()
except StopIteration:
break
toks.append(t)
cnt += 1
# put all retrieved tokens back
for t in toks[::-1]:
self.putBack(t)
return toks
def putBack(self, item):
self.queue.appendleft(item)
def __iter__(self):
while True:
if len(self.queue) == 0:
self.queue.append(self.iterator.next()) # let self.iterator's StopIteration propagate
yield self.queue.popleft()
# - Helpers -------------------------------------------------------------------
##
# is_last_escaped -- check whether the last char in a string is escaped, i.e. preceded
# by an odd number of consecutive escape chars ("\")
def is_last_escaped(s):
i = len(s) - 2 # start from but-last char
c = 0
while i>=0: # indexing backwards
if s[i] == "\\":
c += 1 # counting escape chars
i -= 1
else:
break
return c % 2 == 1 # odd number means last char is escaped
# - Main ----------------------------------------------------------------------
# syntax: ./Scanner.py <classfile>.js
if __name__ == "__main__":
file = open(sys.argv[1]).read()
tokenizer = Scanner(file).__iter__()
#for tok in tokenizer:
# print tok
c = None
while True:
try:
tok = tokenizer.send(c)
except StopIteration:
break
if tok[1] == '//':
c = '\n'
elif tok[1] == '/*':
c = r'\*/'
elif tok[1] in ['"', "'"]:
c = tok[1]
else:
c = None
print tok
| Seldaiendil/meyeOS | devtools/qooxdoo-1.5-sdk/tool/pylib/ecmascript/frontend/Scanner.py | Python | agpl-3.0 | 8,958 | 0.008038 |
"""
A simple example script to get all posts on a user's timeline.
Originally created by Mitchell Stewart.
<https://gist.github.com/mylsb/10294040>
"""
import facebook
import requests
def some_action(post):
"""Here you might want to do something with each post. E.g. grab the
post's message (post['message']) or the post's picture (post['picture']).
In this implementation we just print the post's created time.
"""
print(post["created_time"])
# You'll need an access token here to do anything. You can get a temporary one
# here: https://developers.facebook.com/tools/explorer/
access_token = ""
# Look at Bill Gates's profile for this example by using his Facebook id.
user = "BillGates"
graph = facebook.GraphAPI(access_token)
profile = graph.get_object(user)
posts = graph.get_connections(profile["id"], "posts")
# Wrap this block in a while loop so we can keep paginating requests until
# finished.
while True:
try:
# Perform some action on each post in the collection we receive from
# Facebook.
[some_action(post=post) for post in posts["data"]]
# Attempt to make a request to the next page of data, if it exists.
posts = requests.get(posts["paging"]["next"]).json()
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break
| mobolic/facebook-sdk | examples/get_posts.py | Python | apache-2.0 | 1,391 | 0 |
"""Tests for the input_boolean component."""
| fbradyirl/home-assistant | tests/components/input_boolean/__init__.py | Python | apache-2.0 | 45 | 0 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
# Most of this was originally added by other creators in the postgresql_user module.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
psycopg2 = None # This line needs for unit tests
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
from distutils.version import LooseVersion
def postgres_common_argument_spec():
"""
Return a dictionary with connection options.
The options are commonly used by most of PostgreSQL modules.
"""
return dict(
login_user=dict(default='postgres'),
login_password=dict(default='', no_log=True),
login_host=dict(default=''),
login_unix_socket=dict(default=''),
port=dict(type='int', default=5432, aliases=['login_port']),
ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
ca_cert=dict(aliases=['ssl_rootcert']),
)
def ensure_required_libs(module):
"""Check required libraries."""
if not HAS_PSYCOPG2:
module.fail_json(msg=missing_required_lib('psycopg2'))
if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
"""Connect to a PostgreSQL database.
Return psycopg2 connection object.
Args:
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
conn_params (dict) -- dictionary with connection parameters
Kwargs:
autocommit (bool) -- commit automatically (default False)
fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
"""
ensure_required_libs(module)
db_connection = None
try:
db_connection = psycopg2.connect(**conn_params)
if autocommit:
if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
# Switch role, if specified:
if module.params.get('session_role'):
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
cursor.execute('SET ROLE %s' % module.params['session_role'])
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e))
finally:
cursor.close()
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least '
'version 8.4 to support sslrootcert')
if fail_on_conn:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
else:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
db_connection = None
except Exception as e:
if fail_on_conn:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
else:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
db_connection = None
return db_connection
def exec_sql(obj, query, ddl=False, add_to_executed=True):
"""Execute SQL.
Auxiliary function for PostgreSQL user classes.
Returns a query result if possible or True/False if ddl=True arg was passed.
It necessary for statements that don't return any result (like DDL queries).
Arguments:
obj (obj) -- must be an object of a user class.
The object must have module (AnsibleModule class object) and
cursor (psycopg cursor object) attributes
query (str) -- SQL query to execute
ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
(default False)
add_to_executed (bool) -- append the query to obj.executed_queries attribute
"""
try:
obj.cursor.execute(query)
if add_to_executed:
obj.executed_queries.append(query)
if not ddl:
res = obj.cursor.fetchall()
return res
return True
except Exception as e:
obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
def get_conn_params(module, params_dict, warn_db_default=True):
"""Get connection parameters from the passed dictionary.
Return a dictionary with parameters to connect to PostgreSQL server.
Args:
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
params_dict (dict) -- dictionary with variables
Kwargs:
warn_db_default (bool) -- warn that the default DB is used (default True)
"""
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the return dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
# Might be different in the modules:
if params_dict.get('db'):
params_map['db'] = 'database'
elif params_dict.get('database'):
params_map['database'] = 'database'
elif params_dict.get('login_db'):
params_map['login_db'] = 'database'
else:
if warn_db_default:
module.warn('Database name has not been passed, '
'used default database to connect to.')
kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and params_dict["login_unix_socket"] != "":
kw["host"] = params_dict["login_unix_socket"]
return kw
| aperigault/ansible | lib/ansible/module_utils/postgres.py | Python | gpl-3.0 | 7,961 | 0.004271 |
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
# Copyright (c) 1999-2007 Gary Strangman; All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: Dec 18, 2007 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
from __future__ import absolute_import
from __future__ import print_function
## CHANGE LOG:
## ===========
## 07-11.26 ... conversion for numpy started
## 07-05-16 ... added Lin's Concordance Correlation Coefficient (alincc) and acov
## 05-08-21 ... added "Dice's coefficient"
## 04-10-26 ... added ap2t(), an ugly fcn for converting p-vals to T-vals
## 04-04-03 ... added amasslinregress() function to do regression on N-D arrays
## 03-01-03 ... CHANGED VERSION TO 0.6
## fixed atsem() to properly handle limits=None case
## improved histogram and median functions (estbinwidth) and
## fixed atvar() function (wrong answers for neg numbers?!?)
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
## 00-12-28 ... removed aanova() to separate module, fixed licensing to
## match Python License, fixed doc string & imports
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of #s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to N.maximum/N.minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to N.add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
from . import pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
from six.moves import map
from six.moves import range
from six.moves import input
__version__ = 0.6
############# DISPATCH CODE ##############
class Dispatch:
"""
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
in stats.py module, prefix the function with an 'l' or 'a' for list or
array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
def __init__(self, *tuples):
self._dispatch = {}
for func, types in tuples:
for t in types:
if t in list(self._dispatch.keys()):
raise ValueError("can't have two dispatches on "+str(t))
self._dispatch[t] = func
self._types = list(self._dispatch.keys())
def __call__(self, arg1, *args, **kw):
if type(arg1) not in self._types:
raise TypeError("don't know how to dispatch %s arguments" % type(arg1))
return self._dispatch[type(arg1)](*(arg1,) + args, **kw)
##########################################################################
######################## LIST-BASED FUNCTIONS ########################
##########################################################################
### Define these regardless
####################################
####### CENTRAL TENDENCY #########
####################################
def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
def lmedian (inlist,numbins=1000):
"""
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
(hist, smallest, binsize, extras) = histogram(inlist,numbins,[min(inlist),max(inlist)]) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i]>=len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore (inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) /2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore=0
return maxfreq, mode
####################################
############ MOMENTS #############
####################################
def lmoment(inlist,moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist,3)/pow(moment(inlist,2),1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist,4)/pow(moment(inlist,2),2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist),max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
####################################
####### FREQUENCY STATS ##########
####################################
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def lscoreatpercentile (inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore (inlist, score,histbins=10,defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits != None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.000001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def lrelfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
####################################
##### VARIABILITY FUNCTIONS ######
####################################
def lobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Problem in obrientransform.')
else:
return nargs
def lsamplevar (inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev (inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lcov (x,y, keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: lcov(x,y,keepdims=0)
"""
n = len(x)
xmn = mean(x)
ymn = mean(y)
xdeviations = [0]*len(x)
ydeviations = [0]*len(y)
for i in range(len(x)):
xdeviations[i] = x[i] - xmn
ydeviations[i] = y[i] - ymn
ss = 0.0
for i in range(len(xdeviations)):
ss = ss + xdeviations[i]*ydeviations[i]
return ss/float(n-1)
def lvar (inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev (inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem (inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs (inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist,item))
return zscores
####################################
####### TRIMMING FUNCTIONS #######
####################################
def ltrimboth (l,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1 (l,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
####################################
##### CORRELATION FUNCTIONS ######
####################################
def lpaired(x,y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i','I','r','R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,0)
print('\nIndependent samples t-test: ', round(t,4),round(p,4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print('\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4))
else:
u,p = mannwhitneyu(x,y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print('\nRelated samples t-test: ', round(t,4),round(p,4))
else:
t,p = ranksums(x,y)
print('\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ",round(r,4),round(p,4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ',round(r,4),round(p,4))
print('\n\n')
return None
def lpearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) != len(y):
raise ValueError('Input values not paired in pearsonr. Aborting.')
n = len(x)
x = list(map(float,x))
y = list(map(float,y))
xmean = mean(x)
ymean = mean(y)
r_num = n*(summult(x,y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob
def llincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
covar = lcov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = lvar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = lvar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.')
data = pstat.abut(x,y)
categories = pstat.unique(x)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.abut(categories,list(range(2)))
recoded = pstat.recode(data,codemap,0)
x = pstat.linexand(data,0,categories[0])
y = pstat.linexand(data,0,categories[1])
xmean = mean(pstat.colex(x,1))
ymean = mean(pstat.colex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = list(map(float,x))
y = list(map(float,y))
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
####################################
##### INFERENTIAL STATISTICS #####
####################################
def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df,0.5,float(df)/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,min(a),max(a),
statname,t,prob)
return t,prob
def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,min(a),max(a),
name2,n2,x2,v2,min(b),max(b),
statname,t,prob)
return t,prob
def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a)!=len(b):
raise ValueError('Unequal length lists in ttest_rel.')
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,min(a),max(a),
name2,n,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob
def lchisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1)
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
def lmannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in lmannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def lwilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxont. Aborting.')
d=[]
for i in range(len(x)):
diff = x[i] - y[i]
if diff != 0:
d.append(diff)
count = len(d)
absd = list(map(abs,d))
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = list(map(len,args))
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = pstat.abut(*tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a,b,x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
print('a or b too big, or ITMAX too small in Betacf.')
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a,b,x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x<0.0 or x>1.0):
raise ValueError('Bad x in lbetai')
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b)
####################################
####### ANOVA CALCULATIONS #######
####################################
def lF_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
means = [0]*a
vars = [0]*a
ns = [0]*a
alldata = []
tmp = list(map(N.array,lists))
means = list(map(amean,tmp))
vars = list(map(avar,tmp))
ns = list(map(len,lists))
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def lF_value (ER,EF,dfnum,dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
####################################
######## SUPPORT FUNCTIONS #######
####################################
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = list(map(pstat.makestr,items))
maxsize[col] = max(list(map(len,items))) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l,cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum (inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum (inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult (list1,list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) != len(list2):
raise ValueError("Lists not equal length in summult.")
s = 0
for item1,item2 in pstat.abut(list1,list2):
s = s + item1*item2
return s
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = list(range(n))
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)!=StringType or len(fname)==0:
print()
print(statname)
print()
pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print('Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix)
print()
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
def lfindwithin (data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1,numfact):
examplelevel = pstat.unique(pstat.colex(data,col))[0]
rows = pstat.linexand(data,col,examplelevel) # get 1 level of this factor
factsubjs = pstat.unique(pstat.colex(rows,0))
allsubjs = pstat.unique(pstat.colex(data,0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
#########################################################
#########################################################
####### DISPATCH LISTS AND TUPLES TO ABOVE FCNS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)), )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)), )
mean = Dispatch ( (lmean, (ListType, TupleType)), )
median = Dispatch ( (lmedian, (ListType, TupleType)), )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)), )
mode = Dispatch ( (lmode, (ListType, TupleType)), )
## MOMENTS:
moment = Dispatch ( (lmoment, (ListType, TupleType)), )
variation = Dispatch ( (lvariation, (ListType, TupleType)), )
skew = Dispatch ( (lskew, (ListType, TupleType)), )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)), )
describe = Dispatch ( (ldescribe, (ListType, TupleType)), )
## FREQUENCY STATISTICS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)), )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)), )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)), )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)), )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)), )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)), )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)), )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)), )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)), )
var = Dispatch ( (lvar, (ListType, TupleType)), )
stdev = Dispatch ( (lstdev, (ListType, TupleType)), )
sterr = Dispatch ( (lsterr, (ListType, TupleType)), )
sem = Dispatch ( (lsem, (ListType, TupleType)), )
z = Dispatch ( (lz, (ListType, TupleType)), )
zs = Dispatch ( (lzs, (ListType, TupleType)), )
## TRIMMING FCNS:
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)), )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)), )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)), )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)), )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)), )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)), )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)), )
linregress = Dispatch ( (llinregress, (ListType, TupleType)), )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)), )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)), )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)), )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)), )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)), )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)), )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)), )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)), )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)), )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)), )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)), )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)), )
zprob = Dispatch ( (lzprob, (IntType, FloatType)), )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)), )
fprob = Dispatch ( (lfprob, (IntType, FloatType)), )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)), )
betai = Dispatch ( (lbetai, (IntType, FloatType)), )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)), )
gammln = Dispatch ( (lgammln, (IntType, FloatType)), )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)), )
F_value = Dispatch ( (lF_value, (ListType, TupleType)), )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType)), )
sum = Dispatch ( (lsum, (ListType, TupleType)), )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)), )
ss = Dispatch ( (lss, (ListType, TupleType)), )
summult = Dispatch ( (lsummult, (ListType, TupleType)), )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)), )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)), )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)), )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)), )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)), )
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import numpy as N
import numpy.linalg as LA
#####################################
######## ACENTRAL TENDENCY ########
#####################################
def ageometricmean (inarray,dimension=None,keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray,N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [IntType,FloatType]:
size = inarray.shape[dimension]
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult,dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
mult = N.power(inarray,1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult,dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult,shp)
return mult
def aharmonicmean (inarray,dimension=None,keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [IntType,FloatType]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray,nondims+dims) # put keep-dims first
idx = [0] *len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s],N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) -1
s = N.zeros(loopcap+1,N.float_)
while incr(idx,loopcap) != -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape,dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return size / s
def amean (inarray,dimension=None,keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.dtype in [N.int_, N.short,N.ubyte]:
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [IntType,FloatType]:
sum = asum(inarray,dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a TUPLE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
sum = inarray *1.0
for dim in dims:
sum = N.add.reduce(sum,dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum,shp)
return sum/denom
def amedian (inarray,numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray,numbins,[min(inarray),max(inarray)])
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist,len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore (inarray,dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray,dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray,[indx],dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a,score)
counts = asum(template,dimension,1)
mostfrequent = N.where(counts>oldcounts,score,oldmostfreq)
oldcounts = N.where(counts>oldcounts,counts,oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a,limits=None,inclusive=(1,1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.dtype in [N.int_, N.short,N.ubyte]:
a = a.astype(N.float_)
if limits == None:
return mean(a)
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atmean"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atmean).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a,limits=None,inclusive=(1,1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1). ASSUMES A FLAT ARRAY (OR ELSE PREFLATTENS).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.float_)
if limits == None or limits == [None,None]:
return avar(a)
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atvar"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atvar).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
a = N.compress(mask,a) # squish out excluded values
return avar(a)
def atmin(a,lowerlimit=None,dimension=None,inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive: lowerfcn = N.greater
else: lowerfcn = N.greater_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if lowerlimit == None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a,lowerlimit),a,biggest)
return N.minimum.reduce(ta,dimension)
def atmax(a,upperlimit,dimension=None,inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive: upperfcn = N.less
else: upperfcn = N.less_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if upperlimit == None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a,upperlimit),a,smallest)
return N.maximum.reduce(ta,dimension)
def atstdev(a,limits=None,inclusive=(1,1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a,limits,inclusive))
def atsem(a,limits=None,inclusive=(1,1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a,limits,inclusive)
if limits == None or limits == [None,None]:
n = float(len(N.ravel(a)))
limits = [min(a)-1, max(a)+1]
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atsem"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atsem).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
#####################################
############ AMOMENTS #############
#####################################
def amoment(a,moment=1,dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a,dimension,1) # 1=keepdims
s = N.power((a-mn),moment)
return amean(s,dimension)
def avariation(a,dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a,dimension)/amean(a,dimension)
def askew(a,dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),1.5)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) != 0:
print("Number of zeros in askew: ",asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a,3,dimension)/denom)
def akurtosis(a,dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),2)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) != 0:
print("Number of zeros in akurtosis: ",asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero,0,amoment(a,4,dimension)/denom)
def adescribe(inarray,dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray),N.maximum.reduce(inarray))
m = amean(inarray,dimension)
sd = astdev(inarray,dimension)
skew = askew(inarray,dimension)
kurt = akurtosis(inarray,dimension)
return n, mm, m, sd, skew, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
def askewtest(a,dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
b2 = askew(a,dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(y==0,1,y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a,dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n<20:
print("akurtosistest only valid for n>=20 ... continuing anyway, n=",n)
b2 = akurtosis(a,dimension)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 -2/(9.0*A)
denom = 1 +x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom,0), 99, denom)
term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))
Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom,99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a,dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
s,p = askewtest(a,dimension)
k,p = akurtosistest(a,dimension)
k2 = N.power(s,2) + N.power(k,2)
return k2, achisqprob(k2,2)
#####################################
###### AFREQUENCY FUNCTIONS #######
#####################################
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
@@@sorting OK?
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a,scores[i]))
return N.array(pstat.aabut(scores, freq))
def ascoreatpercentile (inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
targetcf = percent*len(inarray)
h, lrl, binsize, extras = histogram(inarray)
cumhist = cumsum(h*1)
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore (inarray,score,histbins=10,defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray,histbins,defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits != None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1e-6
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
cumhist = cumsum(h*1)
return cumhist,l,b,e
def arelfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h,l,b,e
#####################################
###### AVARIABILITY FUNCTIONS #####
#####################################
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k,N.float_)
v = N.zeros(k,N.float_)
m = N.zeros(k,N.float_)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.float_))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Lack of convergence in obrientransform.')
else:
return N.array(nargs)
def asamplevar (inarray,dimension=None,keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray,dimension)[:,N.NewAxis]
else:
mn = amean(inarray,dimension,keepdims=1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations,dimension,keepdims) / float(n)
return svar
def asamplestdev (inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray,dimension,keepdims))
def asignaltonoise(instack,dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack,dimension)
sd = stdev(instack,dimension)
return N.where(sd==0,0,m/sd)
def acov (x,y, dimension=None,keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: acov(x,y,dimension=None,keepdims=0)
"""
if dimension == None:
x = N.ravel(x)
y = N.ravel(y)
dimension = 0
xmn = amean(x,dimension,1) # keepdims
xdeviations = x - xmn
ymn = amean(y,dimension,1) # keepdims
ydeviations = y - ymn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*x.shape[d]
else:
n = x.shape[dimension]
covar = N.sum(xdeviations*ydeviations)/float(n-1)
return covar
def avar (inarray, dimension=None,keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray,dimension,1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations,dimension,keepdims)/float(n-1)
return var
def astdev (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray,dimension,keepdims))
def asterr (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray,dimension,keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem (inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray,dimension,keepdims) / N.sqrt(n-1)
return s
def az (a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs (a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a,item))
return N.array(zscores)
def azmap (scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare,dimension)
sstd = asamplestdev(compare,0)
return (scores - mns) / sstd
#####################################
####### ATRIMMING FUNCTIONS #######
#####################################
## deleted around() as it's in numpy now
def athreshold(a,threshmin=None,threshmax=None,newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin != None:
mask = mask + N.where(a<threshmin,1,0)
if threshmax != None:
mask = mask + N.where(a>threshmax,1,0)
mask = N.clip(mask,0,1)
return N.where(mask,newval,a)
def atrimboth (a,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1 (a,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
#####################################
##### ACORRELATION FUNCTIONS ######
#####################################
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) != 2:
raise TypeError("acovariance requires 2D matrices")
n = X.shape[0]
mX = amean(X,0)
return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V,V))
def apaired(x,y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i','I','r','R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,None,0)
print('\nIndependent samples t-test: ', round(t,4),round(p,4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print('\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4))
else:
u,p = mannwhitneyu(x,y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print('\nRelated samples t-test: ', round(t,4),round(p,4))
else:
t,p = ranksums(x,y)
print('\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ",round(r,4),round(p,4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ',round(r,4),round(p,4))
print('\n\n')
return None
def dices(x,y):
"""
Calculates Dice's coefficient ... (2*number of common terms)/(number of terms in x +
number of terms in y). Returns a value between 0 (orthogonal) and 1.
Usage: dices(x,y)
"""
import sets
x = sets.Set(x)
y = sets.Set(y)
common = len(x.intersection(y))
total = float(len(x) + len(y))
return 2*common/total
def icc(x,y=None,verbose=0):
"""
Calculates intraclass correlation coefficients using simple, Type I sums of squares.
If only one variable is passed, assumed it's an Nx2 matrix
Usage: icc(x,y=None,verbose=0)
Returns: icc rho, prob ####PROB IS A GUESS BASED ON PEARSON
"""
TINY = 1.0e-20
if y:
all = N.concatenate([x,y],0)
else:
all = x+0
x = all[:,0]
y = all[:,1]
totalss = ass(all-mean(all))
pairmeans = (x+y)/2.
withinss = ass(x-pairmeans) + ass(y-pairmeans)
withindf = float(len(x))
betwdf = float(len(x)-1)
withinms = withinss / withindf
betweenms = (totalss-withinss) / betwdf
rho = (betweenms-withinms)/(withinms+betweenms)
t = rho*math.sqrt(betwdf/((1.0-rho+TINY)*(1.0+rho+TINY)))
prob = abetai(0.5*betwdf,0.5,betwdf/(betwdf+t*t),verbose)
return rho, prob
def alincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
x = N.ravel(x)
y = N.ravel(y)
covar = acov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = avar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = avar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def apearsonr(x,y,verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t),verbose)
return r,prob
def aspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df,0.5,df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
data = pstat.aabut(x,y)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required (in x) for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.aabut(categories,N.arange(2))
recoded = pstat.arecode(data,codemap,0)
x = pstat.alinexand(data,0,categories[0])
y = pstat.alinexand(data,0,categories[1])
xmean = amean(pstat.acolex(x,1))
ymean = amean(pstat.acolex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
return rpb, prob
def akendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:,0]
y = args[:,1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest, n
def amasslinregress(*args):
"""
Calculates a regression line on one 1D array (x) and one N-D array (y).
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = N.ravel(args[0])
y = args[1]
else:
x = N.ravel(args[:,0])
y = args[:,1]
else:
x = args[0]
y = args[1]
x = x.astype(N.float_)
y = y.astype(N.float_)
n = len(x)
xmean = amean(x)
ymean = amean(y,0)
shp = N.ones(len(y.shape))
shp[0] = len(x)
x.shape = shp
print(x.shape, y.shape)
r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)
r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))
zerodivproblem = N.equal(r_den,0)
r_den = N.where(zerodivproblem,1,r_den) # avoid zero-division in 1st place
r = r_num / r_den # need to do this nicely for matrix division
r = N.where(zerodivproblem,0.0,r)
z = 0.5*N.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*N.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
ss = float(n)*ass(x)-asquare_of_sums(x)
s_den = N.where(ss==0,1,ss) # avoid zero-division in 1st place
slope = r_num / s_den
intercept = ymean - slope*xmean
sterrest = N.sqrt(1-r*r)*asamplestdev(y,0)
return slope, intercept, r, prob, sterrest, n
#####################################
##### AINFERENTIAL STATISTICS #####
#####################################
def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if type(a) != N.ndarray:
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname,t,prob)
return t,prob
def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar,0)
svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit != 0:
if type(t) == N.ndarray:
t = t[0]
if type(probs) == N.ndarray:
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def ap2t(pval,df):
"""
Tries to compute a t-value from a p-value (or pval array) and associated df.
SLOW for large numbers of elements(!) as it re-computes p-values 20 times
(smaller step-sizes) at which point it decides it's done. Keeps the signs
of the input array. Returns 1000 (or -1000) if t>100.
Usage: ap2t(pval,df)
Returns: an array of t-values with the shape of pval
"""
pval = N.array(pval)
signs = N.sign(pval)
pval = abs(pval)
t = N.ones(pval.shape,N.float_)*50
step = N.ones(pval.shape,N.float_)*25
print("Initial ap2t() prob calc")
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
print('ap2t() iter: ', end=' ')
for i in range(10):
print(i,' ', end=' ')
t = N.where(pval<prob,t+step,t-step)
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
step = step/2
print()
# since this is an ugly hack, we get ugly boundaries
t = N.where(t>99.9,1000,t) # hit upper-boundary
t = t+signs
return t #, prob, pval
def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a)!=len(b):
raise ValueError('Unequal length arrays.')
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)
zerodivproblem = N.equal(denom,0)
denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place
t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def achisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
@@@NOT RIGHT??
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp == None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.float_)
f_exp = f_exp.astype(N.float_)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, achisqprob(chisq, k-1)
def aks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:],N.float_)
data1 = N.sort(data1,0)
data2 = N.sort(data2,0)
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
# try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
# except:
# prob = 1.0
return d, prob
def amannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x,y)))
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - azprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x,y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - azprob(abs(z)))
return z, prob
def awilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) != len(y):
raise ValueError('Unequal N in awilcoxont. Aborting.')
d = x-y
d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = list(map(len,args))
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in akruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
data = pstat.aabut(*args)
data = data.astype(N.float_)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args,1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, achisqprob(chisq,k-1)
#####################################
#### APROBABILITY CALCULATIONS ####
#####################################
def achisqprob(chisq,df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x,-BIG),-BIG,x)
return N.exp(exponents)
if type(chisq) == N.ndarray:
arrayflag = 1
else:
arrayflag = 0
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape,N.float)
probs = N.zeros(chisq.shape,N.float_)
probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df%2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
if even:
e = N.zeros(probs.shape,N.float_)
else:
e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.float_)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a,BIG)
a_big_frozen = -1 *N.ones(probs.shape,N.float_)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask)!=totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z,chisq)
a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask,0,1)
if even:
z = N.ones(probs.shape,N.float_)
e = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.float_)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 *N.ones(probs.shape,N.float_)
while asum(mask)!=totalelements:
e = e * (a/z.astype(N.float_))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z,chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask,0,1)
probs = N.where(N.equal(probs,1),1,
N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x,0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape,N.float_) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's
x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z
prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if type(alam) == N.ndarray:
frozen = -1 *N.ones(alam.shape,N.float64)
alam = alam.astype(N.float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam,N.float64)
arrayflag = 1
mask = N.zeros(alam.shape)
fac = 2.0 *N.ones(alam.shape,N.float_)
sum = N.zeros(alam.shape,N.float_)
termbf = N.zeros(alam.shape,N.float_)
a2 = N.array(-2.0*alam*alam,N.float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1,201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents,-746)
frozen = N.where(overflowmask,0,frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +
N.less(abs(term),1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask,0), sum, frozen)
mask = N.clip(mask+newmask,0,1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob (dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if type(F) == N.ndarray:
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a,b,x,verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if type(x) == N.ndarray:
frozen = N.ones(x.shape,N.float_) *-1 #start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen,-1)))==0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold),EPS*abs(az))
frozen = N.where(newmask*N.equal(mask,0), az, frozen)
mask = N.clip(mask+newmask,0,1)
noconverge = asum(N.equal(frozen,-1))
if noconverge != 0 and verbose:
print('a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements')
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a,b,x,verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if type(a) == N.ndarray:
if asum(N.less(x,0)+N.greater(x,1)) != 0:
raise ValueError('Bad x in abetai')
x = N.where(N.equal(x,0),TINY,x)
x = N.where(N.equal(x,1.0),1-TINY,x)
bt = N.where(N.equal(x,0)+N.equal(x,1), 0, -1)
exponents = ( gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b*
N.log(1.0-x) )
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents,-740),-740,exponents)
bt = N.exp(exponents)
if type(x) == N.ndarray:
ans = N.where(N.less(x,(a+1)/(a+b+2.0)),
bt*abetacf(a,b,x,verbose)/float(a),
1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b))
else:
if x<(a+1)/(a+b+2.0):
ans = bt*abetacf(a,b,x,verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b)
return ans
#####################################
####### AANOVA CALCULATIONS #######
#####################################
import LinearAlgebra, operator
LA = LinearAlgebra
def aglm(data,para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) != len(data):
print("data and para must be same length in aglm")
return
n = len(para)
p = pstat.aunique(para)
x = N.zeros((n,len(p))) # design matrix
for l in range(len(p)):
x[:,l] = N.equal(para,p[l])
b = N.dot(N.dot(LA.inv(N.dot(N.transpose(x),x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x,b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1,-1])
df = n-2
fact = asum(1.0/asum(x,0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c,b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
means = [0]*na
vars = [0]*na
ns = [0]*na
alldata = []
tmp = list(map(N.array,args))
means = list(map(amean,tmp))
vars = list(map(avar,tmp))
ns = list(map(len,args))
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def aF_value (ER,EF,dfR,dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum,3)
Eden = round(Eden,3)
dfnum = round(Enum,3)
dfden = round(dfden,3)
f = round(f,3)
prob = round(prob,3)
suffix = '' # for *s after the p-value
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['EF/ER','DF','Mean Square','F-value','prob','']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum),3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden),3),'','','']]
pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [IntType, FloatType]:
ER = N.array([[ER]])
if type(EF) in [IntType, FloatType]:
EF = N.array([[EF]])
n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum)
d_en = LA.det(EF) / float(dfden)
return n_um / d_en
#####################################
####### ASUPPORT FUNCTIONS ########
#####################################
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((type(a) == type(1.4)) or (type(a) == type(1))):
return a-a-N.less(a,0)+N.greater(a,0)
else:
return N.zeros(N.shape(a))-N.less(a,0)+N.greater(a,0)
def asum (a, dimension=None,keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:
a = a.astype(N.float_)
if dimension == None:
s = N.sum(N.ravel(a))
elif type(dimension) in [IntType,FloatType]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to sum over
dims = list(dimension)
dims.sort()
dims.reverse()
s = a *1.0
for dim in dims:
s = N.add.reduce(s,dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return s
def acumsum (a,dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [ListType, TupleType, N.ndarray]:
dimension = list(dimension)
dimension.sort()
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a,d)
return a
else:
return N.add.accumulate(a,dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray,dimension,keepdims)
def asummult (array1,array2,dimension=None,keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension == None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2,dimension,keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray,dimension,keepdims)
if type(s) == N.ndarray:
return s.astype(N.float_)*s
else:
return float(s)*s
def asumdiffsquared(a,b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension == None:
inarray = N.ravel(a)
dimension = 0
return asum((a-b)**2,dimension,keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray *1.0
ivec = list(range(n))
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n,N.float_)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1,numfact+1):
rows = pstat.linexand(data,col,pstat.unique(pstat.colex(data,1))[0]) # get 1 level of this factor
if len(pstat.unique(pstat.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
#########################################################
#########################################################
###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)),
(ageometricmean, (N.ndarray,)) )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)),
(aharmonicmean, (N.ndarray,)) )
mean = Dispatch ( (lmean, (ListType, TupleType)),
(amean, (N.ndarray,)) )
median = Dispatch ( (lmedian, (ListType, TupleType)),
(amedian, (N.ndarray,)) )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)),
(amedianscore, (N.ndarray,)) )
mode = Dispatch ( (lmode, (ListType, TupleType)),
(amode, (N.ndarray,)) )
tmean = Dispatch ( (atmean, (N.ndarray,)) )
tvar = Dispatch ( (atvar, (N.ndarray,)) )
tstdev = Dispatch ( (atstdev, (N.ndarray,)) )
tsem = Dispatch ( (atsem, (N.ndarray,)) )
## VARIATION:
moment = Dispatch ( (lmoment, (ListType, TupleType)),
(amoment, (N.ndarray,)) )
variation = Dispatch ( (lvariation, (ListType, TupleType)),
(avariation, (N.ndarray,)) )
skew = Dispatch ( (lskew, (ListType, TupleType)),
(askew, (N.ndarray,)) )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)),
(akurtosis, (N.ndarray,)) )
describe = Dispatch ( (ldescribe, (ListType, TupleType)),
(adescribe, (N.ndarray,)) )
## DISTRIBUTION TESTS
skewtest = Dispatch ( (askewtest, (ListType, TupleType)),
(askewtest, (N.ndarray,)) )
kurtosistest = Dispatch ( (akurtosistest, (ListType, TupleType)),
(akurtosistest, (N.ndarray,)) )
normaltest = Dispatch ( (anormaltest, (ListType, TupleType)),
(anormaltest, (N.ndarray,)) )
## FREQUENCY STATS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)),
(aitemfreq, (N.ndarray,)) )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)),
(ascoreatpercentile, (N.ndarray,)) )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)),
(apercentileofscore, (N.ndarray,)) )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)),
(ahistogram, (N.ndarray,)) )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)),
(acumfreq, (N.ndarray,)) )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)),
(arelfreq, (N.ndarray,)) )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)),
(aobrientransform, (N.ndarray,)) )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)),
(asamplevar, (N.ndarray,)) )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)),
(asamplestdev, (N.ndarray,)) )
signaltonoise = Dispatch( (asignaltonoise, (N.ndarray,)),)
var = Dispatch ( (lvar, (ListType, TupleType)),
(avar, (N.ndarray,)) )
stdev = Dispatch ( (lstdev, (ListType, TupleType)),
(astdev, (N.ndarray,)) )
sterr = Dispatch ( (lsterr, (ListType, TupleType)),
(asterr, (N.ndarray,)) )
sem = Dispatch ( (lsem, (ListType, TupleType)),
(asem, (N.ndarray,)) )
z = Dispatch ( (lz, (ListType, TupleType)),
(az, (N.ndarray,)) )
zs = Dispatch ( (lzs, (ListType, TupleType)),
(azs, (N.ndarray,)) )
## TRIMMING FCNS:
threshold = Dispatch( (athreshold, (N.ndarray,)),)
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)),
(atrimboth, (N.ndarray,)) )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)),
(atrim1, (N.ndarray,)) )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)),
(apaired, (N.ndarray,)) )
lincc = Dispatch ( (llincc, (ListType, TupleType)),
(alincc, (N.ndarray,)) )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)),
(apearsonr, (N.ndarray,)) )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)),
(aspearmanr, (N.ndarray,)) )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)),
(apointbiserialr, (N.ndarray,)) )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)),
(akendalltau, (N.ndarray,)) )
linregress = Dispatch ( (llinregress, (ListType, TupleType)),
(alinregress, (N.ndarray,)) )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)),
(attest_1samp, (N.ndarray,)) )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)),
(attest_ind, (N.ndarray,)) )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)),
(attest_rel, (N.ndarray,)) )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)),
(achisquare, (N.ndarray,)) )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)),
(aks_2samp, (N.ndarray,)) )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)),
(amannwhitneyu, (N.ndarray,)) )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)),
(atiecorrect, (N.ndarray,)) )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)),
(aranksums, (N.ndarray,)) )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)),
(awilcoxont, (N.ndarray,)) )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)),
(akruskalwallish, (N.ndarray,)) )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)),
(afriedmanchisquare, (N.ndarray,)) )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)),
(achisqprob, (N.ndarray,)) )
zprob = Dispatch ( (lzprob, (IntType, FloatType)),
(azprob, (N.ndarray,)) )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)),
(aksprob, (N.ndarray,)) )
fprob = Dispatch ( (lfprob, (IntType, FloatType)),
(afprob, (N.ndarray,)) )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)),
(abetacf, (N.ndarray,)) )
betai = Dispatch ( (lbetai, (IntType, FloatType)),
(abetai, (N.ndarray,)) )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)),
(aerfcc, (N.ndarray,)) )
gammln = Dispatch ( (lgammln, (IntType, FloatType)),
(agammln, (N.ndarray,)) )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)),
(aF_oneway, (N.ndarray,)) )
F_value = Dispatch ( (lF_value, (ListType, TupleType)),
(aF_value, (N.ndarray,)) )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType, N.ndarray)), )
sum = Dispatch ( (lsum, (ListType, TupleType)),
(asum, (N.ndarray,)) )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)),
(acumsum, (N.ndarray,)) )
ss = Dispatch ( (lss, (ListType, TupleType)),
(ass, (N.ndarray,)) )
summult = Dispatch ( (lsummult, (ListType, TupleType)),
(asummult, (N.ndarray,)) )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)),
(asquare_of_sums, (N.ndarray,)) )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)),
(asumdiffsquared, (N.ndarray,)) )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)),
(ashellsort, (N.ndarray,)) )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)),
(arankdata, (N.ndarray,)) )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)),
(afindwithin, (N.ndarray,)) )
###################### END OF NUMERIC FUNCTION BLOCK #####################
###################### END OF STATISTICAL FUNCTIONS ######################
except ImportError:
pass
| karrtikr/ete | ete3/clustering/stats.py | Python | gpl-3.0 | 159,124 | 0.014555 |
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Routines for IPv4 and IPv6 addresses, subnets and ranges."""
import sys as _sys
import re as _re
from netaddr.core import AddrFormatError, AddrConversionError, num_bits, \
DictDotLookup, NOHOST, N, INET_PTON, P, ZEROFILL, Z
from netaddr.strategy import ipv4 as _ipv4, ipv6 as _ipv6
from netaddr.compat import _sys_maxint, _iter_range, _is_str, _int_type, \
_str_type
#-----------------------------------------------------------------------------
# Pre-compiled regexen used by cidr_merge() function.
RE_CIDR_ADJACENT = _re.compile(r'^([01]+)0 \1[1]$')
RE_CIDR_WITHIN = _re.compile(r'^([01]+) \1[10]+$')
RE_VALID_CIDR_BITS = _re.compile('^[01]+$')
#-----------------------------------------------------------------------------
class BaseIP(object):
"""
An abstract base class for common operations shared between various IP
related subclasses.
"""
__slots__ = ('_value', '_module')
def __init__(self):
"""Constructor."""
self._value = None
self._module = None
def _set_value(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.max_int:
raise AddrFormatError('value out of bounds for an %s address!' \
% self._module.family_name)
self._value = value
value = property(lambda self: self._value, _set_value,
doc='a positive integer representing the value of IP address/subnet.')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
return NotImplemented
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPAddress`
correctly.
"""
return NotImplemented
def __hash__(self):
"""
:return: A hash value uniquely indentifying this IP object.
"""
return hash(self.key())
def __eq__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() == other.key()
except (AttributeError, TypeError):
return NotImplemented
def __ne__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
not equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() != other.key()
except (AttributeError, TypeError):
return NotImplemented
def __lt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() < other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __le__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() <= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __gt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() > other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __ge__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() >= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def is_unicast(self):
""":return: ``True`` if this IP is unicast, ``False`` otherwise"""
return not self.is_multicast()
def is_multicast(self):
""":return: ``True`` if this IP is multicast, ``False`` otherwise"""
if self._module == _ipv4:
return self in IPV4_MULTICAST
elif self._module == _ipv6:
return self in IPV6_MULTICAST
def is_loopback(self):
"""
:return: ``True`` if this IP is loopback address (not for network
transmission), ``False`` otherwise.
References: RFC 3330 and 4291.
"""
if self.version == 4:
return self in IPV4_LOOPBACK
elif self.version == 6:
return self == IPV6_LOOPBACK
def is_private(self):
"""
:return: ``True`` if this IP is for internal/private use only
(i.e. non-public), ``False`` otherwise. Reference: RFCs 1918,
3330, 4193, 3879 and 2365.
"""
if self.version == 4:
for cidr in IPV4_PRIVATE:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_PRIVATE:
if self in cidr:
return True
if self.is_link_local():
return True
return False
def is_link_local(self):
"""
:return: ``True`` if this IP is link-local address ``False`` otherwise.
Reference: RFCs 3927 and 4291.
"""
if self.version == 4:
return self in IPV4_LINK_LOCAL
elif self.version == 6:
return self in IPV6_LINK_LOCAL
def is_reserved(self):
"""
:return: ``True`` if this IP is in IANA reserved range, ``False``
otherwise. Reference: RFCs 3330 and 3171.
"""
if self.version == 4:
for cidr in IPV4_RESERVED:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_RESERVED:
if self in cidr:
return True
return False
def is_ipv4_mapped(self):
"""
:return: ``True`` if this IP is IPv4-compatible IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0xffff
def is_ipv4_compat(self):
"""
:return: ``True`` if this IP is IPv4-mapped IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0
@property
def info(self):
"""
A record dict containing IANA registration details for this IP address
if available, None otherwise.
"""
# Lazy loading of IANA data structures.
from netaddr.ip.iana import query
return DictDotLookup(query(self))
@property
def version(self):
"""the IP protocol version represented by this IP object."""
return self._module.version
#-----------------------------------------------------------------------------
class IPAddress(BaseIP):
"""
An individual IPv4 or IPv6 address without a net mask or subnet prefix.
To support these and other network based operations, see `IPNetwork`.
"""
__slots__ = ()
def __init__(self, addr, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address which may be represented in an
accepted string format, as an unsigned integer or as another
IPAddress object (copy construction).
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Supported constants are
INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
super(IPAddress, self).__init__()
if isinstance(addr, BaseIP):
# Copy constructor.
if version is not None and version != addr._module.version:
raise ValueError('cannot switch IP versions using '
'copy constructor!')
self._value = addr._value
self._module = addr._module
else:
# Explicit IP address version.
if version is not None:
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('%r is an invalid IP version!' % version)
has_upper = hasattr(addr, 'upper')
if has_upper and '/' in addr:
raise ValueError('%s() does not support netmasks or subnet' \
' prefixes! See documentation for details.'
% self.__class__.__name__)
if self._module is None:
# IP version is implicit, detect it from addr.
if isinstance(addr, _int_type):
try:
if 0 <= int(addr) <= _ipv4.max_int:
self._value = int(addr)
self._module = _ipv4
elif _ipv4.max_int < int(addr) <= _ipv6.max_int:
self._value = int(addr)
self._module = _ipv6
except ValueError:
pass
else:
for module in _ipv4, _ipv6:
try:
self._value = module.str_to_int(addr, flags)
except:
continue
else:
self._module = module
break
if self._module is None:
raise AddrFormatError('failed to detect a valid IP ' \
'address from %r' % addr)
else:
# IP version is explicit.
if has_upper:
try:
self._value = self._module.str_to_int(addr, flags)
except AddrFormatError:
raise AddrFormatError('base address %r is not IPv%d'
% (addr, self._module.version))
else:
if 0 <= int(addr) <= self._module.max_int:
self._value = int(addr)
else:
raise AddrFormatError('bad address format: %r' % addr)
def __getstate__(self):
""":returns: Pickled state of an `IPAddress` object."""
return self._value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPAddress` object.
"""
value, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state: %s' \
% str(state))
def is_hostmask(self):
"""
:return: ``True`` if this IP address host mask, ``False`` otherwise.
"""
int_val = self._value + 1
return (int_val & (int_val - 1) == 0)
def is_netmask(self):
"""
:return: ``True`` if this IP address network mask, ``False`` otherwise.
"""
int_val = (self._value ^ self._module.max_int) + 1
return (int_val & (int_val - 1) == 0)
def __iadd__(self, num):
"""
Increases the numerical value of this IPAddress by num.
An IndexError is raised if result exceeds maximum IP address value or
is less than zero.
:param num: size of IP address increment.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __isub__(self, num):
"""
Decreases the numerical value of this IPAddress by num.
An IndexError is raised if result is less than zero or exceeds maximum
IP address value.
:param num: size of IP address decrement.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __add__(self, num):
"""
Add the numerical value of this IP address to num and provide the
result as a new IPAddress object.
:param num: size of IP address increase.
:return: a new IPAddress object with its numerical value increased by num.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
__radd__ = __add__
def __sub__(self, num):
"""
Subtract the numerical value of this IP address from num providing
the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
def __rsub__(self, num):
"""
Subtract num (lvalue) from the numerical value of this IP address
(rvalue) providing the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = num - self._value
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
# NB - we return the value here twice because this IP Address may
# be sorted with a list of networks and it should still end up
# in the expected order.
return self.version, self._value
def sort_key(self):
""":return: A key tuple used to compare and sort this `IPAddress` correctly."""
return self.version, self._value, self._module.width
def __int__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __long__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __oct__(self):
""":return: an octal string representation of this IP address."""
# Python 2.x
if self._value == 0:
return '0'
return '0%o' % self._value
def __hex__(self):
""":return: a hexadecimal string representation of this IP address."""
# Python 2.x
return '0x%x' % self._value
def __index__(self):
"""
:return: return the integer value of this IP address when called by \
hex(), oct() or bin().
"""
# Python 3.x
return self._value
def bits(self, word_sep=None):
"""
:param word_sep: (optional) the separator to insert between words.
Default: None - use default separator for address type.
:return: the value of this IP address as a binary digit string."""
return self._module.int_to_bits(self._value, word_sep)
@property
def packed(self):
"""The value of this IP address as a packed binary string."""
return self._module.int_to_packed(self._value)
@property
def words(self):
"""
A list of unsigned integer words (octets for IPv4, hextets for IPv6)
found in this IP address.
"""
return self._module.int_to_words(self._value)
@property
def bin(self):
"""
The value of this IP adddress in standard Python binary
representational form (0bxxx). A back port of the format provided by
the builtin bin() function found in Python 2.6.x and higher.
"""
return self._module.int_to_bin(self._value)
@property
def reverse_dns(self):
"""The reverse DNS lookup record for this IP address"""
return self._module.int_to_arpa(self._value)
def ipv4(self):
"""
Raises an `AddrConversionError` if IPv6 address cannot be converted
to IPv4.
:return: A numerically equivalent version 4 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self.version == 4:
ip = klass(self._value, 4)
elif self.version == 6:
if 0 <= self._value <= _ipv4.max_int:
ip = klass(self._value, 4)
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
ip = klass(self._value - 0xffff00000000, 4)
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: The IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass(self._value - 0xffff00000000, 6)
else:
ip = klass(self._value, 6)
elif self.version == 4:
# IPv4-Compatible IPv6 address
ip = klass(self._value, 6)
if not ipv4_compatible:
# IPv4-Mapped IPv6 address
ip = klass(0xffff00000000 + self._value, 6)
return ip
def format(self, dialect=None):
"""
Only relevant for IPv6 addresses. Has no effect for IPv4.
:param dialect: An ipv6_* dialect class.
:return: an alternate string representation for this IP address.
"""
if dialect is not None:
if not hasattr(dialect, 'word_fmt'):
raise TypeError(
'custom dialects should subclass ipv6_verbose!')
return self._module.int_to_str(self._value, dialect=dialect)
def __or__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise OR (x | y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value | int(other), self.version)
def __and__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise AND (x & y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value & int(other), self.version)
def __xor__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise exclusive OR (x ^ y) between the integer value of
this IP address and ``other``.
"""
return self.__class__(self._value ^ int(other), self.version)
def __lshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value left shifted by ``numbits``.
"""
return self.__class__(self._value << numbits, self.version)
def __rshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value right shifted by ``numbits``.
"""
return self.__class__(self._value >> numbits, self.version)
def __nonzero__(self):
""":return: ``True`` if the numerical value of this IP address is not \
zero, ``False`` otherwise."""
# Python 2.x.
return bool(self._value)
__bool__ = __nonzero__ # Python 3.x.
def __str__(self):
""":return: IP address in presentational format"""
return self._module.int_to_str(self._value)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPListMixin(object):
"""
A mixin class providing shared list-like functionality to classes
representing groups of IP addresses.
"""
def __iter__(self):
"""
:return: An iterator providing access to all `IPAddress` objects
within range represented by this ranged IP object.
"""
start_ip = IPAddress(self.first, self.version)
end_ip = IPAddress(self.last, self.version)
return iter_iprange(start_ip, end_ip)
@property
def size(self):
"""
The total number of IP addresses within this ranged IP object.
"""
return int(self.last - self.first + 1)
def __len__(self):
"""
:return: the number of IP addresses in this ranged IP object. Raises
an `IndexError` if size > system max int (a Python 2.x
limitation). Use the .size property for subnets of any size.
"""
size = self.size
if size > _sys_maxint:
raise IndexError(("range contains more than %d (index size max) "
"IP addresses! Use the .size property instead." % _sys_maxint))
return size
def __getitem__(self, index):
"""
:return: The IP address(es) in this `IPNetwork` object referenced by
index or slice. As slicing can produce large sequences of objects
an iterator is returned instead of the more usual `list`.
"""
item = None
if hasattr(index, 'indices'):
if self._module.version == 6:
raise TypeError('IPv6 slices are not supported!')
(start, stop, step) = index.indices(self.size)
if (start + step < 0) or (step > stop):
# step value exceeds start and stop boundaries.
item = iter([IPAddress(self.first, self.version)])
else:
start_ip = IPAddress(self.first + start, self.version)
end_ip = IPAddress(self.first + stop - step, self.version)
item = iter_iprange(start_ip, end_ip, step)
else:
try:
index = int(index)
if (- self.size) <= index < 0:
# negative index.
item = IPAddress(self.last + index + 1, self.version)
elif 0 <= index <= (self.size - 1):
# Positive index or zero index.
item = IPAddress(self.first + index, self.version)
else:
raise IndexError('index out range for address range size!')
except ValueError:
raise TypeError('unsupported index type %r!' % index)
return item
def __contains__(self, other):
"""
:param other: an `IPAddress` or ranged IP object.
:return: ``True`` if other falls within the boundary of this one,
``False`` otherwise.
"""
if self.version != other.version:
return False
if hasattr(other, '_value') and not hasattr(other, '_prefixlen'):
return other._value >= self.first and other._value <= self.last
return other.first >= self.first and other.last <= self.last
def __nonzero__(self):
"""
Ranged IP objects always represent a sequence of at least one IP
address and are therefore always True in the boolean context.
"""
# Python 2.x.
return True
__bool__ = __nonzero__ # Python 3.x.
#-----------------------------------------------------------------------------
def parse_ip_network(module, addr, implicit_prefix=False, flags=0):
if isinstance(addr, tuple):
# CIDR integer tuple
try:
val1, val2 = addr
except ValueError:
raise AddrFormatError('invalid %s tuple!' % module.family_name)
if 0 <= val1 <= module.max_int:
value = val1
if 0 <= val2 <= module.width:
prefixlen = val2
else:
raise AddrFormatError('invalid prefix for %s tuple!' \
% module.family_name)
else:
raise AddrFormatError('invalid address value for %s tuple!' \
% module.family_name)
elif isinstance(addr, _str_type):
# CIDR-like string subnet
if implicit_prefix:
#TODO: deprecate this option in netaddr 0.8.x
addr = cidr_abbrev_to_verbose(addr)
try:
if '/' in addr:
val1, val2 = addr.split('/', 1)
else:
val1 = addr
val2 = None
except ValueError:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
try:
ip = IPAddress(val1, module.version, flags=INET_PTON)
except AddrFormatError:
if module.version == 4:
# Try a partial IPv4 network address...
expanded_addr = _ipv4.expand_partial_address(val1)
ip = IPAddress(expanded_addr, module.version, flags=INET_PTON)
else:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
value = ip._value
try:
# Integer CIDR prefix.
prefixlen = int(val2)
except TypeError:
if val2 is None:
# No prefix was specified.
prefixlen = module.width
except ValueError:
# Not an integer prefix, try a netmask/hostmask prefix.
mask = IPAddress(val2, module.version, flags=INET_PTON)
if mask.is_netmask():
prefixlen = module.netmask_to_prefix[mask._value]
elif mask.is_hostmask():
prefixlen = module.hostmask_to_prefix[mask._value]
else:
raise AddrFormatError('addr %r is not a valid IPNetwork!' \
% addr)
if not 0 <= prefixlen <= module.width:
raise AddrFormatError('invalid prefix for %s address!' \
% module.family_name)
else:
raise TypeError('unexpected type %s for addr arg' % type(addr))
if flags & NOHOST:
# Remove host bits.
netmask = module.prefix_to_netmask[prefixlen]
value = value & netmask
return value, prefixlen
#-----------------------------------------------------------------------------
class IPNetwork(BaseIP, IPListMixin):
"""
An IPv4 or IPv6 network or subnet.
A combination of an IP address and a network mask.
Accepts CIDR and several related variants :
a) Standard CIDR::
x.x.x.x/y -> 192.0.2.0/24
x::/y -> fe80::/10
b) Hybrid CIDR format (netmask address instead of prefix), where 'y' \
address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/255.255.255.0
x::/y:: -> fe80::/ffc0::
c) ACL hybrid CIDR format (hostmask address instead of prefix like \
Cisco's ACL bitmasks), where 'y' address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/0.0.0.255
x::/y:: -> fe80::/3f:ffff:ffff:ffff:ffff:ffff:ffff:ffff
d) Abbreviated CIDR format (as of netaddr 0.7.x this requires the \
optional constructor argument ``implicit_prefix=True``)::
x -> 192
x/y -> 10/8
x.x/y -> 192.168/16
x.x.x/y -> 192.168.0/24
which are equivalent to::
x.0.0.0/y -> 192.0.0.0/24
x.0.0.0/y -> 10.0.0.0/8
x.x.0.0/y -> 192.168.0.0/16
x.x.x.0/y -> 192.168.0.0/24
"""
__slots__ = ('_prefixlen',)
def __init__(self, addr, implicit_prefix=False, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address with optional CIDR prefix,
netmask or hostmask. May be an IP address in presentation
(string) format, an tuple containing and integer address and a
network prefix, or another IPAddress/IPNetwork object (copy
construction).
:param implicit_prefix: (optional) if True, the constructor uses
classful IPv4 rules to select a default prefix when one is not
provided. If False it uses the length of the IP address version.
(default: False)
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Currently only supports the
NOHOST option. See the netaddr.core docs for further details.
"""
super(IPNetwork, self).__init__()
value, prefixlen, module = None, None, None
if hasattr(addr, '_prefixlen'):
# IPNetwork object copy constructor
value = addr._value
module = addr._module
prefixlen = addr._prefixlen
elif hasattr(addr, '_value'):
# IPAddress object copy constructor
value = addr._value
module = addr._module
prefixlen = module.width
elif version == 4:
value, prefixlen = parse_ip_network(_ipv4, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv4
elif version == 6:
value, prefixlen = parse_ip_network(_ipv6, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv6
else:
if version is not None:
raise ValueError('%r is an invalid IP version!' % version)
try:
module = _ipv4
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
try:
module = _ipv6
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
pass
if value is None:
raise AddrFormatError('invalid IPNetwork %s' % addr)
self._value = value
self._prefixlen = prefixlen
self._module = module
def __getstate__(self):
""":return: Pickled state of an `IPNetwork` object."""
return self._value, self._prefixlen, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPNetwork` object.
"""
value, prefixlen, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
if 0 <= prefixlen <= self._module.width:
self._prefixlen = prefixlen
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
def _set_prefixlen(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.width:
raise AddrFormatError('invalid prefix for an %s address!' \
% self._module.family_name)
self._prefixlen = value
prefixlen = property(lambda self: self._prefixlen, _set_prefixlen,
doc='size of the bitmask used to separate the network from the host bits')
@property
def ip(self):
"""
The IP address of this `IPNetwork` object. This is may or may not be
the same as the network IP address which varies according to the value
of the CIDR subnet prefix.
"""
return IPAddress(self._value, self.version)
@property
def network(self):
"""The network address of this `IPNetwork` object."""
return IPAddress(self._value & int(self.netmask), self.version)
@property
def broadcast(self):
"""The broadcast address of this `IPNetwork` object"""
return IPAddress(self._value | self.hostmask._value, self.version)
@property
def first(self):
"""
The integer value of first IP address found within this `IPNetwork`
object.
"""
return self._value & (self._module.max_int ^ self.hostmask._value)
@property
def last(self):
"""
The integer value of last IP address found within this `IPNetwork`
object.
"""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return self._value | hostmask
@property
def netmask(self):
"""The subnet mask of this `IPNetwork` object."""
netmask = self._module.max_int ^ self.hostmask._value
return IPAddress(netmask, self.version)
@property
def hostmask(self):
"""The host mask of this `IPNetwork` object."""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return IPAddress(hostmask, self.version)
@property
def cidr(self):
"""
The true CIDR address for this `IPNetwork` object which omits any
host bits to the right of the CIDR subnet prefix.
"""
ip = IPAddress(self._value & int(self.netmask), self.version)
cidr = IPNetwork("%s/%d" % (ip, self.prefixlen))
return cidr
def __iadd__(self, num):
"""
Increases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result exceeds maximum IP address value
or is less than zero.
:param num: (optional) number of `IPNetwork` blocks to increment \
this IPNetwork's value by.
"""
new_value = int(self.network) + (self.size * num)
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('increment exceeds address boundary!')
if new_value < 0:
raise IndexError('increment is less than zero!')
self._value = new_value
return self
def __isub__(self, num):
"""
Decreases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result is less than zero or exceeds
maximum IP address value.
:param num: (optional) number of `IPNetwork` blocks to decrement \
this IPNetwork's value by.
"""
new_value = int(self.network) - (self.size * num)
if new_value < 0:
raise IndexError('decrement is less than zero!')
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('decrement exceeds address boundary!')
self._value = new_value
return self
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPNetwork`.
"""
return self.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPNetwork` correctly.
"""
net_size_bits = self._module.width - num_bits(self.size)
host_bits = self._value - self.first
return self.version, self.first, net_size_bits, host_bits
def ipv4(self):
"""
:return: A numerically equivalent version 4 `IPNetwork` object. \
Raises an `AddrConversionError` if IPv6 address cannot be \
converted to IPv4.
"""
ip = None
klass = self.__class__
if self.version == 4:
ip = klass('%s/%d' % (self.ip, self.prefixlen))
elif self.version == 6:
if 0 <= self._value <= _ipv4.max_int:
addr = _ipv4.int_to_str(self._value)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
addr = _ipv4.int_to_str(self._value - 0xffff00000000)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: the IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPNetwork` object.
"""
ip = None
klass = self.__class__
if self.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass((self._value - 0xffff00000000, self._prefixlen),
version=6)
else:
ip = klass((self._value, self._prefixlen), version=6)
elif self.version == 4:
if ipv4_compatible:
# IPv4-Compatible IPv6 address
ip = klass((self._value, self._prefixlen + 96), version=6)
else:
# IPv4-Mapped IPv6 address
ip = klass((0xffff00000000 + self._value,
self._prefixlen + 96), version=6)
return ip
def previous(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the previous IP subnet).
:return: The adjacent subnet preceding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self.version)
ip_copy -= step
return ip_copy
def next(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the next IP subnet).
:return: The adjacent subnet succeeding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self.version)
ip_copy += step
return ip_copy
def supernet(self, prefixlen=0):
"""
Provides a list of supernets for this `IPNetwork` object between the
size of the current prefix and (if specified) an endpoint prefix.
:param prefixlen: (optional) a CIDR prefix for the maximum supernet.
Default: 0 - returns all possible supernets.
:return: a tuple of supernet `IPNetwork` objects.
"""
if not 0 <= prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self.version))
# Use a copy of self as we'll be editing it.
supernet = self.cidr
supernets = []
while supernet.prefixlen > prefixlen:
supernet.prefixlen -= 1
supernets.append(supernet.cidr)
return list(reversed(supernets))
def subnet(self, prefixlen, count=None, fmt=None):
"""
A generator that divides up this IPNetwork's subnet into smaller
subnets based on a specified CIDR prefix.
:param prefixlen: a CIDR prefix indicating size of subnets to be
returned.
:param count: (optional) number of consecutive IP subnets to be
returned.
:return: an iterator containing IPNetwork subnet objects.
"""
if not 0 <= self.prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self.version))
if not self.prefixlen <= prefixlen:
# Don't return anything.
raise StopIteration
# Calculate number of subnets to be returned.
width = self._module.width
max_subnets = 2 ** (width - self.prefixlen) // 2 ** (width - prefixlen)
if count is None:
count = max_subnets
if not 1 <= count <= max_subnets:
raise ValueError('count outside of current IP subnet boundary!')
base_subnet = self._module.int_to_str(self.first)
i = 0
while(i < count):
subnet = self.__class__('%s/%d' % (base_subnet, prefixlen),
self.version)
subnet.value += (subnet.size * i)
subnet.prefixlen = prefixlen
i += 1
yield subnet
def iter_hosts(self):
"""
An generator that provides all the IP addresses that can be assigned
to hosts within the range of this IP object's subnet.
- for IPv4, the network and broadcast addresses are always excluded. \
Any subnet that contains less than 4 IP addresses yields an empty list.
- for IPv6, only the unspecified address '::' is excluded from any \
yielded IP addresses.
:return: an IPAddress iterator
"""
it_hosts = iter([])
if self.version == 4:
# IPv4 logic.
if self.size >= 4:
it_hosts = iter_iprange(IPAddress(self.first+1, self.version),
IPAddress(self.last-1, self.version))
else:
# IPv6 logic.
if self.first == 0:
if self.size != 1:
# Don't return '::'.
it_hosts = iter_iprange(
IPAddress(self.first+1, self.version),
IPAddress(self.last, self.version))
else:
it_hosts = iter(self)
return it_hosts
def __str__(self):
""":return: this IPNetwork in CIDR format"""
addr = self._module.int_to_str(self._value)
return "%s/%s" % (addr, self.prefixlen)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPRange(BaseIP, IPListMixin):
"""
An arbitrary IPv4 or IPv6 address range.
Formed from a lower and upper bound IP address. The upper bound IP cannot
be numerically smaller than the lower bound and the IP version of both
must match.
"""
__slots__ = ('_start', '_end')
def __init__(self, start, end, flags=0):
"""
Constructor.
:param start: an IPv4 or IPv6 address that forms the lower
boundary of this IP range.
:param end: an IPv4 or IPv6 address that forms the upper
boundary of this IP range.
:param flags: (optional) decides which rules are applied to the
interpretation of the start and end values. Supported constants
are INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
self._start = IPAddress(start, flags=flags)
self._module = self._start._module
self._end = IPAddress(end, self._module.version, flags=flags)
if int(self._start) > int(self._end):
raise AddrFormatError('lower bound IP greater than upper bound!')
def __getstate__(self):
""":return: Pickled state of an `IPRange` object."""
return self._start.value, self._end.value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPRange` object.
"""
start, end, version = state
self._start = IPAddress(start, version)
self._module = self._start._module
self._end = IPAddress(end, version)
@property
def first(self):
"""The integer value of first IP address in this `IPRange` object."""
return int(self._start)
@property
def last(self):
"""The integer value of last IP address in this `IPRange` object."""
return int(self._end)
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPRange`.
"""
return self.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPRange` correctly.
"""
skey = self._module.width - num_bits(self.size)
return self.version, self.first, skey
def cidrs(self):
"""
The list of CIDR addresses found within the lower and upper bound
addresses of this `IPRange`.
"""
return iprange_to_cidrs(self._start, self._end)
def __str__(self):
""":return: this `IPRange` in a common representational format."""
return "%s-%s" % (self._start, self._end)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s', '%s')" % (self.__class__.__name__,
self._start, self._end)
#-----------------------------------------------------------------------------
def iter_unique_ips(*args):
"""
:param args: A list of IP addresses and subnets passed in as arguments.
:return: A generator that flattens out IP subnets, yielding unique
individual IP addresses (no duplicates).
"""
for cidr in cidr_merge(args):
for ip in cidr:
yield ip
#-----------------------------------------------------------------------------
def cidr_abbrev_to_verbose(abbrev_cidr):
"""
A function that converts abbreviated IPv4 CIDRs to their more verbose
equivalent.
:param abbrev_cidr: an abbreviated CIDR.
Uses the old-style classful IP address rules to decide on a default
subnet prefix if one is not explicitly provided.
Only supports IPv4 addresses.
Examples ::
10 - 10.0.0.0/8
10/16 - 10.0.0.0/16
128 - 128.0.0.0/16
128/8 - 128.0.0.0/8
192.168 - 192.168.0.0/16
:return: A verbose CIDR from an abbreviated CIDR or old-style classful \
network address, The original value if it was not recognised as a \
supported abbreviation.
"""
# Internal function that returns a prefix value based on the old IPv4
# classful network scheme that has been superseded (almost) by CIDR.
def classful_prefix(octet):
octet = int(octet)
if not 0 <= octet <= 255:
raise IndexError('Invalid octet: %r!' % octet)
if 0 <= octet <= 127: # Legacy class 'A' classification.
return 8
elif 128 <= octet <= 191: # Legacy class 'B' classification.
return 16
elif 192 <= octet <= 223: # Legacy class 'C' classification.
return 24
elif 224 <= octet <= 239: # Multicast address range.
return 4
return 32 # Default.
start = ''
tokens = []
prefix = None
if _is_str(abbrev_cidr):
if ':' in abbrev_cidr:
return abbrev_cidr
try:
# Single octet partial integer or string address.
i = int(abbrev_cidr)
tokens = [str(i), '0', '0', '0']
return "%s%s/%s" % (start, '.'.join(tokens), classful_prefix(i))
except ValueError:
# Multi octet partial string address with optional prefix.
part_addr = abbrev_cidr
tokens = []
if part_addr == '':
# Not a recognisable format.
return abbrev_cidr
if '/' in part_addr:
(part_addr, prefix) = part_addr.split('/', 1)
# Check prefix for validity.
if prefix is not None:
try:
if not 0 <= int(prefix) <= 32:
raise ValueError('prefixlen in address %r out of range' \
' for IPv4!' % abbrev_cidr)
except ValueError:
return abbrev_cidr
if '.' in part_addr:
tokens = part_addr.split('.')
else:
tokens = [part_addr]
if 1 <= len(tokens) <= 4:
for i in range(4 - len(tokens)):
tokens.append('0')
else:
# Not a recognisable format.
return abbrev_cidr
if prefix is None:
try:
prefix = classful_prefix(tokens[0])
except ValueError:
return abbrev_cidr
return "%s%s/%s" % (start, '.'.join(tokens), prefix)
except TypeError:
pass
except IndexError:
pass
# Not a recognisable format.
return abbrev_cidr
#-----------------------------------------------------------------------------
def cidr_merge(ip_addrs):
"""
A function that accepts an iterable sequence of IP addresses and subnets
merging them into the smallest possible list of CIDRs. It merges adjacent
subnets where possible, those contained within others and also removes
any duplicates.
:param ip_addrs: an iterable sequence of IP addresses and subnets.
:return: a summarized list of `IPNetwork` objects.
"""
if not hasattr(ip_addrs, '__iter__') or hasattr(ip_addrs, 'keys'):
raise ValueError('A sequence or iterator is expected!')
# Start off using set as we'll remove any duplicates at the start.
ipv4_bit_cidrs = set()
ipv6_bit_cidrs = set()
# Convert IP addresses and subnets into their CIDR bit strings.
ipv4_match_all_found = False
ipv6_match_all_found = False
for ip in ip_addrs:
cidr = IPNetwork(ip)
bits = cidr.network.bits(word_sep='')[0:cidr.prefixlen]
if cidr.version == 4:
if bits == '':
ipv4_match_all_found = True
ipv4_bit_cidrs = set(['']) # Clear all other IPv4 values.
if not ipv4_match_all_found:
ipv4_bit_cidrs.add(bits)
else:
if bits == '':
ipv6_match_all_found = True
ipv6_bit_cidrs = set(['']) # Clear all other IPv6 values.
if not ipv6_match_all_found:
ipv6_bit_cidrs.add(bits)
# Merge binary CIDR addresses where possible.
def _reduce_bit_cidrs(cidrs):
new_cidrs = []
cidrs.sort()
# Multiple passes are required to obtain precise results.
while 1:
finished = True
while (cidrs):
if not new_cidrs:
new_cidrs.append(cidrs.pop(0))
if not cidrs:
break
# lhs and rhs are same size and adjacent.
(new_cidr, subs) = RE_CIDR_ADJACENT.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# merge lhs with rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# lhs contains rhs.
(new_cidr, subs) = RE_CIDR_WITHIN.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# keep lhs, discard rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# no matches - accept rhs.
new_cidrs.append(cidrs.pop(0))
if finished:
break
else:
# still seeing matches, reset.
cidrs = new_cidrs
new_cidrs = []
if new_cidrs == ['0', '1']:
# Special case where summary CIDR result is '0.0.0.0/0' or
# '::/0' i.e. the whole IPv4 or IPv6 address space.
new_cidrs = ['']
return new_cidrs
new_cidrs = []
def _bits_to_cidr(bits, module):
if bits == '':
if module.version == 4:
return IPNetwork('0.0.0.0/0', 4)
else:
return IPNetwork('::/0', 6)
if RE_VALID_CIDR_BITS.match(bits) is None:
raise ValueError('%r is an invalid bit string!' % bits)
num_bits = len(bits)
if bits == '':
return IPAddress(module.int_to_str(0), module.version)
else:
bits = bits + '0' * (module.width - num_bits)
return IPNetwork((module.bits_to_int(bits), num_bits),
version=module.version)
# Reduce and format lists of reduced CIDRs.
for bits in _reduce_bit_cidrs(list(ipv4_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv4))
for bits in _reduce_bit_cidrs(list(ipv6_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv6))
return new_cidrs
#-----------------------------------------------------------------------------
def cidr_exclude(target, exclude):
"""
Removes an exclude IP address or subnet from target IP subnet.
:param target: the target IP address or subnet to be divided up.
:param exclude: the IP address or subnet to be removed from target.
:return: list of `IPNetwork` objects remaining after exclusion.
"""
cidrs = []
target = IPNetwork(target)
exclude = IPNetwork(exclude)
if exclude.last < target.first:
# Exclude subnet's upper bound address less than target
# subnet's lower bound.
return [target.cidr]
elif target.last < exclude.first:
# Exclude subnet's lower bound address greater than target
# subnet's upper bound.
return [target.cidr]
new_prefixlen = target.prefixlen + 1
if new_prefixlen <= target._module.width:
i_lower = target.first
i_upper = target.first + (2 ** (target._module.width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen))
upper = IPNetwork((i_upper, new_prefixlen))
while exclude.prefixlen >= new_prefixlen:
if exclude in lower:
matched = i_lower
unmatched = i_upper
elif exclude in upper:
matched = i_upper
unmatched = i_lower
else:
# Exclude subnet not within target subnet.
cidrs.append(target.cidr)
break
ip = IPNetwork((unmatched, new_prefixlen))
cidrs.append(ip)
new_prefixlen += 1
if new_prefixlen > target._module.width:
break
i_lower = matched
i_upper = matched + (2 ** (target._module.width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen))
upper = IPNetwork((i_upper, new_prefixlen))
cidrs.sort()
return cidrs
#-----------------------------------------------------------------------------
def spanning_cidr(ip_addrs):
"""
Function that accepts a sequence of IP addresses and subnets returning
a single `IPNetwork` subnet that is large enough to span the lower and
upper bound IP addresses with a possible overlap on either end.
:param ip_addrs: sequence of IP addresses and subnets.
:return: a single spanning `IPNetwork` subnet.
"""
sorted_ips = sorted(
[IPNetwork(ip) for ip in ip_addrs])
if not len(sorted_ips) > 1:
raise ValueError('IP sequence must contain at least 2 elements!')
lowest_ip = sorted_ips[0]
highest_ip = sorted_ips[-1]
if lowest_ip.version != highest_ip.version:
raise TypeError('IP sequence cannot contain both IPv4 and IPv6!')
ip = highest_ip.cidr
while ip.prefixlen > 0:
if highest_ip in ip and lowest_ip not in ip:
ip.prefixlen -= 1
else:
break
return ip.cidr
#-----------------------------------------------------------------------------
def iter_iprange(start, end, step=1):
"""
A generator that produces IPAddress objects between an arbitrary start
and stop IP address with intervals of step between them. Sequences
produce are inclusive of boundary IPs.
:param start: start IP address.
:param end: end IP address.
:param step: (optional) size of step between IP addresses. Default: 1
:return: an iterator of one or more `IPAddress` objects.
"""
start = IPAddress(start)
end = IPAddress(end)
if start.version != end.version:
raise TypeError('start and stop IP versions do not match!')
version = start.version
step = int(step)
if step == 0:
raise ValueError('step argument cannot be zero')
# We don't need objects from here, just integers.
start = int(start)
stop = int(end)
negative_step = False
if step < 0:
negative_step = True
index = start - step
while True:
index += step
if negative_step:
if not index >= stop:
break
else:
if not index <= stop:
break
yield IPAddress(index, version)
#-----------------------------------------------------------------------------
def iprange_to_cidrs(start, end):
"""
A function that accepts an arbitrary start and end IP address or subnet
and returns a list of CIDR subnets that fit exactly between the boundaries
of the two with no overlap.
:param start: the start IP address or subnet.
:param end: the end IP address or subnet.
:return: a list of one or more IP addresses and subnets.
"""
cidr_list = []
start = IPNetwork(start)
end = IPNetwork(end)
iprange = [start.first, end.last]
# Get spanning CIDR covering both addresses.
cidr_span = spanning_cidr([start, end])
if cidr_span.first == iprange[0] and cidr_span.last == iprange[-1]:
# Spanning CIDR matches start and end exactly.
cidr_list = [cidr_span]
elif cidr_span.last == iprange[-1]:
# Spanning CIDR matches end exactly.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
elif cidr_span.first == iprange[0]:
# Spanning CIDR matches start exactly.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_span, ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
elif cidr_span.first <= iprange[0] and cidr_span.last >= iprange[-1]:
# Spanning CIDR overlaps start and end.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
# Fix start.
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
# Fix end.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_list.pop(), ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
return cidr_list
#-----------------------------------------------------------------------------
def smallest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the smallest (most specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
else:
if match is not None:
break
return match
#-----------------------------------------------------------------------------
def largest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the largest (least specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
break
return match
#-----------------------------------------------------------------------------
def all_matching_cidrs(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: all matching IPAddress and/or IPNetwork objects from the provided
sequence, an empty list if there was no match.
"""
matches = []
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
matches.append(cidr)
else:
if matches:
break
return matches
#-----------------------------------------------------------------------------
# Cached IPv4 address range lookups.
#-----------------------------------------------------------------------------
IPV4_LOOPBACK = IPNetwork('127.0.0.0/8')
IPV4_PRIVATE = (
IPNetwork('10.0.0.0/8'), # Private-Use Networks
IPNetwork('172.16.0.0/12'), # Private-Use Networks
IPNetwork('192.0.2.0/24'), # Test-Net
IPNetwork('192.168.0.0/16'), # Private-Use Networks
IPRange('239.0.0.0', '239.255.255.255'), # Administrative Multicast
)
IPV4_LINK_LOCAL = IPNetwork('169.254.0.0/16')
IPV4_MULTICAST = IPNetwork('224.0.0.0/4')
IPV4_6TO4 = IPNetwork('192.88.99.0/24') # 6to4 Relay Anycast
IPV4_RESERVED = (
IPNetwork('128.0.0.0/16'), # Reserved but subject to allocation
IPNetwork('191.255.0.0/16'), # Reserved but subject to allocation
IPNetwork('192.0.0.0/24'), # Reserved but subject to allocation
IPNetwork('223.255.255.0/24'), # Reserved but subject to allocation
IPNetwork('240.0.0.0/4'), # Reserved for Future Use
# Reserved multicast
IPRange('234.0.0.0', '238.255.255.255'),
IPRange('225.0.0.0', '231.255.255.255'),
)
#-----------------------------------------------------------------------------
# Cached IPv6 address range lookups.
#-----------------------------------------------------------------------------
IPV6_LOOPBACK = IPAddress('::1')
IPV6_PRIVATE = (
IPNetwork('fc00::/7'), # Unique Local Addresses (ULA)
IPNetwork('fec0::/10'), # Site Local Addresses (deprecated - RFC 3879)
)
IPV6_LINK_LOCAL = IPNetwork('fe80::/10')
IPV6_MULTICAST = IPNetwork('ff00::/8')
IPV6_RESERVED = (
IPNetwork('ff00::/12'), IPNetwork('::/8'),
IPNetwork('0100::/8'), IPNetwork('0200::/7'),
IPNetwork('0400::/6'), IPNetwork('0800::/5'),
IPNetwork('1000::/4'), IPNetwork('4000::/3'),
IPNetwork('6000::/3'), IPNetwork('8000::/3'),
IPNetwork('A000::/3'), IPNetwork('C000::/3'),
IPNetwork('E000::/4'), IPNetwork('F000::/5'),
IPNetwork('F800::/6'), IPNetwork('FE00::/9'),
)
| ecolitan/fatics | venv/lib/python2.7/site-packages/netaddr/ip/__init__.py | Python | agpl-3.0 | 66,411 | 0.001837 |
import datetime
from django.conf import settings
from django.utils import timezone
from account.models import SignupCode
from waitinglist.models import WaitingListEntry
User = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def stats():
waiting_list = WaitingListEntry.objects
return {
"waiting_list_entries": waiting_list.count(),
"waitinglist_added_last_seven_days":
waiting_list.filter(created__gt=timezone.now() - datetime.timedelta(days=7)).count(),
"waitinglist_added_last_thirty_days":
waiting_list.filter(created__gt=timezone.now() - datetime.timedelta(days=30)).count(),
"waiting_list_entries_to_invite":
waiting_list.exclude(email__in=SignupCode.objects.values("email"))
.exclude(email__in=User.objects.values("email")).count()
}
| pinax/django-waitinglist | waitinglist/stats.py | Python | mit | 842 | 0.002375 |
#!/usr/bin/env python
"""Filter LightDock final swarm results depending on the compatibility with the membrane"""
from __future__ import print_function
import sys
import os
import argparse
import shutil
import re
from prody.measure.contacts import Contacts
from prody import parsePDB, confProDy
from lightdock.util.logger import LoggingManager
from lightdock.util.analysis import read_ranking_file
from lightdock.pdbutil.PDBIO import parse_complex_from_file
from lightdock.structure.complex import Complex
# Disable ProDy output
confProDy(verbosity='info')
filtered_folder = 'filtered'
log = LoggingManager.get_logger('lgd_filter_membrane')
def get_structures(ranking, base_path='.'):
structures = []
for rank in ranking:
swarm_id = rank.id_cluster
glowworm_id = rank.id_glowworm
structures.append(os.path.join(base_path,
'swarm_{}'.format(swarm_id),
'lightdock_{}.pdb'.format(glowworm_id)))
return structures
def get_restraints(restraints_file):
restraints_receptor = set()
restraints_ligand = set()
with open(restraints_file) as handle:
for line in handle:
line = line.rstrip(os.linesep)
if line:
if line.startswith('R'):
restraints_receptor.add(line.split()[-1])
if line.startswith('L'):
restraints_ligand.add(line.split()[-1])
return restraints_receptor, restraints_ligand
def calculate_membrane_height(parsed_receptor_file, restraints):
atoms, residues, chains = parse_complex_from_file(parsed_receptor_file)
receptor = Complex(chains, atoms)
z_coord = []
for restraint in restraints:
chain_id, residue_name, residue_number = restraint.split(".")
residue = receptor.get_residue(chain_id, residue_name, residue_number)
ca = residue.get_calpha()
z_coord.append(ca.z)
return min(z_coord)
def parse_command_line():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(prog='lgd_filter_restraints')
parser.add_argument("ranking_file", help="Path of ranking to be used", metavar="ranking_file")
parser.add_argument("restraints_file", help="File including restraints", metavar="restraints_file")
parser.add_argument("parsed_receptor_file", help="Receptor PDB parsed by LightDock", metavar="parsed_receptor_file")
parser.add_argument("receptor_chains", help="Chains on the receptor partner", metavar="receptor_chains")
parser.add_argument("ligand_chains", help="Chains on the receptor partner", metavar="ligand_chains")
parser.add_argument("--cutoff", "-cutoff", "-c", help="Interaction cutoff",
dest="cutoff", type=float, default=1.0)
return parser.parse_args()
if __name__ == '__main__':
# Parse command line
args = parse_command_line()
log.info("Cutoff for membrane is {:3.1f}A".format(args.cutoff))
# Get ranking
ranking = read_ranking_file(args.ranking_file)
# Get all the PDB structures in a given directory
base_path = os.path.abspath(os.path.dirname(args.ranking_file))
structures = get_structures(ranking, base_path)
restraints_receptor, restraints_ligand = get_restraints(args.restraints_file)
membrane_height_z = calculate_membrane_height(args.parsed_receptor_file, restraints_receptor)
if os.path.exists(filtered_folder):
raise SystemExit("Folder {} already exists".format(filtered_folder))
else:
os.makedirs(filtered_folder)
filter_passed = {}
percentages = {}
for pdb_file in structures:
try:
swarm_id = int(re.findall(r'swarm_\d+', pdb_file)[0].split('_')[-1])
glowworm_id = int(re.findall(r'lightdock_\d+', pdb_file)[0].split('_')[-1])
# Read molecule and split by receptor and ligand
molecule = parsePDB(pdb_file)
ca_ligand = molecule.select('protein and chain {} and calpha'.format(args.ligand_chains))
# Contacts on ligand side
out = 0
for ca in ca_ligand:
coord = ca.getCoords()
if coord[-1] >= membrane_height_z:
out += 1
perc = out / float(len(ca_ligand))
if perc >= args.cutoff:
percentages[(swarm_id, glowworm_id)] = perc
shutil.copyfile(pdb_file, os.path.join(filtered_folder, 'swarm_{}_{}.pdb'.format(swarm_id, glowworm_id)))
try:
filter_passed[swarm_id].append(glowworm_id)
except:
filter_passed[swarm_id] = [glowworm_id]
print("{:40s} {:5.3f}".format(pdb_file, perc))
except Exception, e:
log.error('Filtering has failed for structure {}. Please see error:'.format(pdb_file))
log.error(str(e))
filtered_ranking = os.path.join(filtered_folder, 'rank_filtered.list')
with open(filtered_ranking, 'w') as handle:
for rank in ranking:
if rank.id_cluster in filter_passed and rank.id_glowworm in filter_passed[rank.id_cluster]:
handle.write('swarm_{}_{}.pdb {:5.3f} {:5.3f}'.format(rank.id_cluster,
rank.id_glowworm, rank.scoring, percentages[(rank.id_cluster, rank.id_glowworm)]) + os.linesep)
| brianjimenez/lightdock | bin/post/lgd_filter_membrane.py | Python | gpl-3.0 | 5,399 | 0.00463 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from elasticsearch_dsl import Q
def ExistsMatch(field_name):
return Q('exists', field=field_name)
| gdestuynder/MozDef | mozdef_util/mozdef_util/query_models/exists_match.py | Python | mpl-2.0 | 369 | 0 |
# coding=utf-8
from pages.models import Page
def nav_pages(request):
return {'nav_pages': Page.objects.filter(public=True, in_navigation=True),}
| wonderbeyond/ezlog | pages/context_processors.py | Python | bsd-2-clause | 150 | 0.013333 |
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import codecs
from specs.utils import a_category_with
from specs.utils import TmpDirTestCase
from timelinelib.calendar.gregorian import Gregorian
from timelinelib.db.exceptions import TimelineIOError
from timelinelib.db import db_open
from timelinelib.drawing.viewproperties import ViewProperties
import wx
CONTENT_010 = u"""
# Written by Timeline 0.1.0 on 2009-11-15 19:28:7
PREFERRED-PERIOD:2009-10-17 22:38:32;2009-12-2 16:22:4
CATEGORY:Category 1;188,129,224;True
CATEGORY:Category 2;255,165,0;True
CATEGORY:Category 3;173,216,230;False
EVENT:2009-11-4 22:52:0;2009-11-11 22:52:0;Event 1;Category 1
""".strip()
CONTENT_0100 = u"""
<?xml version="1.0" encoding="utf-8"?>
<timeline>
<version>0.10.0</version>
<categories>
<category>
<name>Category 1</name>
<color>188,129,224</color>
</category>
<category>
<name>Category 2</name>
<color>255,165,0</color>
<parent>Category 1</parent>
</category>
<category>
<name>Category 3</name>
<color>173,216,230</color>
<parent>Category 2</parent>
</category>
</categories>
<events>
<event>
<start>2009-11-4 22:52:0</start>
<end>2009-11-11 22:52:0</end>
<text>Event 1</text>
<category>Category 1</category>
<description>The first event.</description>
</event>
</events>
<view>
<displayed_period>
<start>2009-10-17 22:38:32</start>
<end>2009-12-2 16:22:4</end>
</displayed_period>
<hidden_categories>
<name>Category 3</name>
</hidden_categories>
</view>
</timeline>
""".strip()
class DbOpenSpec(TmpDirTestCase):
def test_raises_error_when_reading_non_xml_file(self):
self.writeContentToTmpFile(CONTENT_010)
try:
db_open(self.tmp_path)
except TimelineIOError, e:
self.assertTrue("old file with a new version" in str(e))
def testRead0100File(self):
self.writeContentToTmpFile(CONTENT_0100)
db = db_open(self.tmp_path)
# Assert event correctly loaded
events = db.get_all_events()
self.assertEqual(len(events), 1)
event = events[0]
self.assertTrue(event.has_id())
self.assertEqual(event.get_text(), "Event 1")
self.assertEqual(event.get_time_period().start_time,
Gregorian(2009, 11, 4, 22, 52, 0).to_time())
self.assertEqual(event.get_time_period().end_time,
Gregorian(2009, 11, 11, 22, 52, 0).to_time())
self.assertEqual(event.get_category().get_name(), "Category 1")
self.assertEqual(event.get_data("description"), "The first event.")
self.assertEqual(event.get_data("icon"), None)
# Assert that correct view properties are loaded (category visibility
# checked later)
vp = ViewProperties()
db.load_view_properties(vp)
self.assertEqual(vp.displayed_period.start_time,
Gregorian(2009, 10, 17, 22, 38, 32).to_time())
self.assertEqual(vp.displayed_period.end_time,
Gregorian(2009, 12, 2, 16, 22, 4).to_time())
# Assert categories correctly loaded
categories = db.get_categories()
self.assertEqual(len(categories), 3)
for cat in categories:
self.assertTrue(cat.has_id())
if cat.get_name() == "Category 1":
self.assertEqual(cat.get_color(), (188, 129, 224))
self.assertTrue(vp.is_category_visible(cat))
self.assertEqual(cat.get_parent(), None)
elif cat.get_name() == "Category 2":
self.assertEqual(cat.get_color(), (255, 165, 0))
self.assertTrue(vp.is_category_visible(cat))
self.assertEqual(cat.get_parent().get_name(), "Category 1")
elif cat.get_name() == "Category 3":
self.assertEqual(cat.get_color(), (173, 216, 230))
self.assertFalse(vp.is_category_visible(cat))
self.assertEqual(cat.get_parent().get_name(), "Category 2")
else:
self.fail("Unknown category.")
def test_creates_new_xml_file(self):
new_db = db_open(self.tmp_path)
new_db.save_category(a_category_with(name="work"))
re_read_db = db_open(self.tmp_path)
self.assertEqual(len(re_read_db.get_categories()), 1)
self.assertEqual(re_read_db.get_categories()[0].get_name(), "work")
def setUp(self):
TmpDirTestCase.setUp(self)
self.tmp_path = self.get_tmp_path("test.timeline")
def writeContentToTmpFile(self, content):
f = codecs.open(self.tmp_path, "w", "utf-8")
f.write(content)
f.close()
| linostar/timeline-clone | test/specs/db/DbOpen.py | Python | gpl-3.0 | 5,595 | 0.000357 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun Feb 8 12:30:31 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x0f\x0a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5c\x72\xa8\x66\
\x00\x00\x0e\xd1\x49\x44\x41\x54\x78\xda\xed\x9d\x0b\xb0\x55\x55\
\x19\xc7\xe1\xf2\x94\xfb\x38\x7b\x9f\x0b\x5c\x10\x14\x49\x66\xa4\
\x10\x19\x51\x92\x4c\xc0\x88\x14\xe3\x21\xc8\x63\x50\x98\x72\x84\
\x51\xb2\x04\x72\x18\x04\x04\x1d\x42\x12\xd3\x42\x2d\x15\x0d\xb2\
\xc1\x34\x2d\x04\x95\x28\xdf\x51\x34\xc5\x14\x64\x8c\xa2\xa8\x40\
\x11\x84\x88\x10\xa9\xa8\x14\x0f\xfb\x16\x9c\x0b\x2a\x08\xf7\xdc\
\xbb\xd7\xb7\xd7\x5a\xfb\xf7\x9b\xf9\xa6\xa9\xb8\xf7\xee\xf5\xad\
\xff\xff\x7f\xf6\xde\x67\xef\xb5\xea\xd5\x03\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9f\x89\xe3\x38\x27\x9c\x15\
\x45\xd1\xc0\x5c\x1c\x8f\x8d\xa2\xfc\x4c\xf9\xcf\x7b\xa5\x66\x57\
\x44\xd1\x35\xb9\x5c\x7e\x48\x79\x3e\xff\x85\xd2\x16\x2d\x5a\xd1\
\x2d\x80\x30\x4c\x7f\xf2\x01\x73\x47\xf9\x67\x73\x51\xbc\x47\xea\
\xc3\x1a\xd6\x2a\xf9\xb9\xe9\xf2\xf3\x9d\xe9\x22\x80\x67\xc8\xa7\
\xf9\x50\x31\xfd\xca\x22\x0c\x7f\xac\x5a\x2f\x61\x30\x4e\x7e\x6d\
\x63\x3a\x0b\xe0\x30\x72\x7a\xdf\x53\x4e\xe9\x57\x24\x64\xfc\x23\
\x83\x20\x8e\x47\xc8\x9f\xa9\x4f\xa7\xc1\xdd\x4f\xbf\x28\x3e\x45\
\xea\x12\xa9\x91\x52\x9f\x95\x2a\x09\x7d\xcc\xe6\xba\x3d\x8a\xe2\
\x25\x96\x8c\xff\x89\xca\xaf\x94\xa0\xe9\x12\xa8\x76\x4a\xa4\x3a\
\x4a\x5d\x2a\x35\x44\xaa\x3d\x8e\xf2\x63\xe2\xba\x49\x3d\x29\xb5\
\xe3\x28\xa2\xdd\x25\xb5\x5c\xaa\x7f\x98\xa7\xfb\xb9\xb3\x65\x6c\
\x9b\x75\xcc\xff\x91\x9e\xca\x65\x46\x40\xfa\xe9\x5f\xd0\xc8\xbb\
\x47\x19\xeb\x4e\xa9\x67\xa4\xba\xe3\x34\xf7\x26\xae\xa9\xd4\x2d\
\x52\x7b\x6b\x28\xdc\x07\xa4\xf2\xa1\x8c\x5f\x4e\xc9\x47\xe6\x72\
\xf1\x07\xca\xe6\xaf\xae\xfd\xf2\x4d\xc2\x0c\x9f\x2f\x09\x8c\x16\
\x0a\x9a\xa8\xc9\x78\xf7\x49\xdd\x66\x34\x87\xf3\xdc\x98\xbc\x53\
\xa5\xd6\xd6\x42\xb8\x5b\xa5\xfa\x78\x3f\xfe\x38\xbe\x3e\x25\xe3\
\x7f\xbc\xe2\xf8\x11\x39\x9c\x12\x0f\xf5\xd3\xa7\xa0\x85\x62\xc7\
\xfc\xaa\xd1\x1e\x0e\x4c\xff\x5a\x6d\x79\x1d\x84\xbb\x5b\xaa\xaf\
\xc7\x9f\xfc\x23\x9c\x30\xff\xe1\x10\x98\xed\x99\x7e\xfa\x16\x34\
\x50\xdb\x31\x2f\xcf\xc2\xbd\x25\x97\x27\x70\x7c\x02\xc2\xf5\x32\
\x04\x0a\xd7\xfc\xef\x3b\x15\x00\x52\xe6\x72\x24\x23\xe6\xaf\xae\
\x09\x38\x31\x9d\x09\xec\x20\xf5\x5e\x42\xc2\xf5\x2a\x04\x9a\x35\
\x6f\xde\x3a\x85\x1b\x7e\x35\x2b\xb9\x17\x51\x51\x51\xf9\xf9\x8c\
\x98\xff\xc3\x82\x06\x3b\xe0\x48\xfd\x49\x9c\x9e\xb0\x78\xbd\x09\
\x01\xf9\x0a\xee\x51\x27\xcd\x7f\xa8\xa2\x57\xe5\x30\x1b\x66\xc0\
\xfc\xd5\x35\x1d\x47\xea\x4f\xe4\x62\x0b\xe2\x75\x3e\x04\x2a\xf2\
\xf9\xee\x6e\x9b\xff\xd0\xfd\x80\xb1\x19\x31\xbf\xa9\xc5\x38\x52\
\x7f\x32\x37\x5a\x12\xaf\xd3\x21\x50\x11\xc5\xbf\xf7\x22\x00\x72\
\xf1\x1b\x55\x55\x55\xa5\x19\x30\xbf\xa9\x8d\x38\x52\x77\x32\x23\
\xcb\x02\x76\x32\x04\xe4\xa5\x9c\x01\x5e\x98\xbf\xfa\x86\xa0\xbc\
\x48\x94\x01\xf3\x57\x57\x84\x33\xf5\x26\xb4\x81\xc2\x1d\x70\xe7\
\x42\x40\x8e\x67\x99\x4f\x01\x20\xb5\x5d\x0e\xbb\x41\x06\xcc\x6f\
\xb4\xd8\x00\x67\xea\x4e\xec\x0a\x05\x01\x3b\x13\x02\xe5\xe5\xe5\
\x95\x45\x3c\xe9\xe8\x4c\xc9\xfb\x02\xbd\x02\x37\xbf\xa9\x15\x38\
\x52\x7f\x72\xef\x56\x12\xb1\x13\x21\x20\x46\xfa\xba\x6f\xe6\x3f\
\x78\x19\x90\x9f\x13\xb8\xf9\x4d\xdd\x8d\x23\xf5\x27\xf8\x32\x45\
\x21\xa7\x1e\x02\xf2\xd5\xdf\x22\x1f\x03\x40\x6a\x43\xe0\xe6\x37\
\x75\x19\x8e\xd4\x9f\xe4\xba\x3e\x06\xec\x53\x08\x34\x4a\xf0\xa1\
\x27\xf5\x2a\xcb\xe7\x3f\x17\xb0\xf9\x79\x1c\x38\xc5\x10\xe8\xa0\
\x6c\x8c\x54\x42\xc0\x2c\xe9\xe5\xab\xf9\x0b\xf7\x01\x06\x06\x6a\
\x7e\x9e\x02\x74\x20\x04\xc6\x2b\x0b\x5a\x3d\x04\xcc\x22\x9d\x3e\
\x07\x80\x3c\x14\xf4\x8d\x00\xcd\x6f\x6a\x3c\x0e\x4c\x3f\x00\xea\
\x2b\xde\x10\x4c\x25\x04\xcc\x4a\xbd\x7e\x9f\x01\xe4\x67\x06\x68\
\x7e\xa3\x39\x96\x45\x73\x28\x04\xee\x09\x35\x04\x0e\xae\xe8\xeb\
\xf1\x19\x40\x14\xcd\xc7\xfc\x40\x08\xd4\xfa\x2b\xc0\xfc\x0c\x9f\
\x03\x40\x1e\x5f\x7e\x1c\xf3\x03\x21\xc0\x19\x00\xe6\x07\x42\x80\
\x7b\x00\x98\x1f\x08\x81\x9a\x9f\x01\xf8\xf2\x0a\xb0\xc2\xab\xc1\
\x98\x1f\x32\x17\x02\x3c\x07\x80\xf9\x21\xdb\x21\xd0\xf0\x53\xd6\
\xaa\xf7\xa2\xca\x2b\x2b\x3b\x62\x7e\x48\x33\x04\xe6\xfa\x1e\x02\
\x72\x23\x6d\xa1\xa7\x01\xb0\x0e\xf3\x03\x21\x50\xd7\xfb\x00\x71\
\x3c\xca\xd3\x6f\x00\x6e\xc3\xfc\x40\x08\xd4\xf9\x9b\x80\x5c\x5c\
\xe4\xd6\xde\x4e\x94\xdc\xbf\xe8\x81\xf9\x81\x10\x48\xe4\xf8\xf3\
\xcf\x79\x16\x00\xdb\xea\xd5\x72\xb7\x20\xcc\x0f\x84\xc0\x91\x97\
\x01\x17\x79\xf6\xf5\xdf\x14\xcc\x0f\x84\x40\x82\x21\x20\xbf\xe3\
\x79\x4f\x02\x60\x73\xdb\xb6\x6d\x4f\xc0\xfc\x40\x08\x24\x18\x02\
\x85\x2d\xc1\xf6\xbb\xbf\x2c\x78\x7e\x34\xe6\x07\x42\xc0\x46\x08\
\xc8\x4e\xbc\x6e\x3f\xf8\x13\xaf\xa9\x57\xe4\x6a\xc0\x98\x1f\xd2\
\x0c\x81\x7b\x7d\x0a\x81\xb2\xb2\xb2\x96\x16\x37\x46\xa9\xfb\x2a\
\x39\xb9\x5c\x57\xcc\x0f\x84\x80\xc5\x10\x90\xc7\x6b\xbb\xc8\xcf\
\xef\x72\xcc\xfc\xfb\xe5\xd4\x7f\x18\xe6\x07\x42\x40\x21\x04\x0a\
\x6f\x09\xee\x77\xe7\xd4\x3f\x3f\x03\xf3\x03\x21\xa0\x1b\x02\x13\
\x5d\x08\x01\xb9\xee\x5f\x20\x87\x53\x1f\xf3\x03\x21\xa0\x1f\x02\
\xc3\x52\x5c\x36\x7c\xbf\x2c\x58\x32\x8d\x4f\x7e\x20\x04\x52\x0d\
\x81\x5c\x57\xf9\xf9\x4d\xca\xc7\xbc\x4b\xee\x45\x0c\xc6\xfc\x40\
\x08\x38\x10\x02\xa5\x2d\x5a\xb4\x92\x53\xf1\x25\x3a\xc7\x9a\x5f\
\x69\x6e\x44\x62\x7e\x08\x3d\x04\xee\xf3\xed\x39\x01\x31\x66\x4f\
\x79\x56\xc0\xd6\xe6\xa9\xeb\xe5\x91\xe4\x11\xc5\x5c\xef\x63\x7e\
\x20\x04\xd2\x78\x6c\x38\x97\x1f\x6a\x3e\xa9\x13\x33\x7e\x14\x8d\
\x93\x5f\xdb\xb8\x16\x3d\xc4\xfc\x40\x08\xa4\xf5\x16\xa1\x59\x52\
\xec\xe0\xca\xc2\xf9\x67\x8b\x7c\xa5\x78\x95\xfc\xdc\x74\xf9\xf9\
\xce\x75\xe8\x1d\xe6\x07\x42\xc0\x95\x95\x85\xc4\xcc\x39\x73\xc3\
\x50\x4e\xe3\xfb\xc9\x19\xc2\x18\x73\x07\x5f\xbe\xbf\xbf\xe3\xc0\
\xde\x03\xb2\x85\x97\x5c\x3e\x0c\xaa\xa8\xa8\x3c\xc7\xdc\x4f\x48\
\xa0\x67\x98\x1f\x08\x01\xdf\xb7\x26\xc7\xfc\x00\x84\x00\xe6\x07\
\x20\x04\x30\x3f\x80\xdf\xdf\x0e\x44\x71\x0b\x29\xb3\xe5\xf8\x20\
\xa9\xab\xa4\x6e\x90\xfa\x9e\xd4\x75\x52\x63\x0a\xff\xfb\x69\xc5\
\x9a\x0a\xf3\x43\x16\x43\xe0\xc7\xae\x87\x80\xfc\x7b\xb3\xaf\x40\
\x0f\xa9\x59\xe6\xce\x7e\x11\xef\x0e\xec\x90\x5a\x2a\x35\x49\xaa\
\x2d\xe6\x07\xf0\x28\x04\xe4\xdf\x54\x48\x4d\x91\x7a\x33\x81\xbf\
\xb9\x4f\xea\x49\xa9\xe1\x52\x25\x98\x1f\xc0\xd1\x10\x90\xff\xbd\
\x52\x6a\xa6\xd4\x4e\x4b\x7f\xfb\x6f\x52\xbd\x31\x3f\x80\x63\x21\
\x20\xff\xfd\xa2\x84\x3e\xf1\x6b\x52\x4f\x63\x7e\x00\x07\x42\x40\
\xaa\x89\xd4\x1c\x2f\x16\x0c\xc5\xfc\x40\x08\x24\x1e\x02\x6b\x02\
\x36\x3e\xe6\x07\x42\x20\xc3\x85\xf9\xc1\xcb\x10\x98\x87\x79\x31\
\x3f\x10\x02\x18\x19\xf3\x03\x21\x40\x61\x7e\x20\x04\x28\xcc\x0f\
\x84\x00\x85\xf9\x81\x10\xa0\x30\x3f\x10\x02\x98\x1f\xf3\x03\x21\
\x80\xf9\x01\x32\x13\x02\xf3\x31\x3f\xe6\x07\x42\x00\xf3\x03\x10\
\x02\x98\x1f\x80\x10\xc0\xfc\x00\x84\x00\xe6\x07\x20\x04\x30\x3f\
\x00\x21\xe0\x7d\x6d\xc2\xfc\x00\x35\x0f\x81\x1f\x05\x16\x00\xdb\
\x99\x59\x80\xe2\x82\xa0\x9f\xd4\x96\x40\x02\x60\x0f\x33\x0a\x50\
\x7c\x08\xc4\x52\xbf\x0d\x24\x04\x9a\x30\xa3\x00\xc5\x87\xc0\xec\
\x40\x02\xa0\x29\xb3\x09\x40\x00\x00\x00\x01\x00\x00\x04\x00\x00\
\x10\x00\x00\x40\x00\x00\x00\x01\x00\x00\x04\x00\x00\x01\x40\x00\
\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\
\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\
\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\
\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\
\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\
\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\
\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\
\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\
\x00\x00\x40\x00\x00\x00\x01\x00\x00\x04\x00\x00\x10\x00\x00\x40\
\x00\x00\x00\x01\x00\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\
\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\
\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x00\x01\x40\x00\
\x00\x10\x00\x04\x00\x00\x01\x40\x00\x00\x10\x00\x04\x00\x80\x5f\
\x01\x70\x53\x20\x01\xd0\x98\xd9\x04\x28\x3e\x00\xae\x0e\xc0\xfc\
\xfb\x98\x49\x80\xda\x05\x40\xbf\x00\x02\xe0\x3d\x66\x12\xa0\x76\
\x01\xd0\x29\x90\x4b\x80\x1e\xcc\x26\x40\xf1\x01\x50\x16\x48\x00\
\x2c\x62\x36\x01\x6a\x17\x02\xdb\x03\x08\x80\xbd\x52\x2d\x99\x4d\
\x80\xe2\xcc\xdf\x4c\xea\x9d\x40\xce\x02\x46\x33\xa3\x00\xc5\x05\
\xc0\x15\x81\x98\xdf\xd4\x12\x66\x14\xa0\xb8\x00\x58\x11\x50\x00\
\x7c\x20\x55\xca\xac\x02\xd4\xcc\xfc\x5d\x02\x32\x7f\x75\x5d\xc2\
\xcc\x02\xd4\x2c\x00\xee\x0e\x30\x00\x16\x30\xb3\x00\xc7\x37\x7f\
\xa9\xd4\xdb\x01\x06\xc0\x0e\xa9\x86\xcc\x30\xc0\xb1\x03\x60\x4c\
\x80\xe6\xaf\xae\xde\xcc\x30\xc0\xb1\x03\xe0\x2f\x01\x07\xc0\x1d\
\xcc\x30\xc0\xa7\x9b\xff\xcc\x80\xcd\x6f\x6a\x23\xb3\x0c\xf0\xe9\
\x01\x30\x37\xf0\x00\x30\x75\x26\x33\x0d\x70\xa4\xf9\xcb\x02\x7a\
\xf2\xef\x58\x35\x83\xd9\x06\x38\x32\x00\xae\xcc\x80\xf9\x4d\xad\
\x66\xb6\x01\x8e\x0c\x80\x55\x19\x09\x00\x53\xed\x99\x71\x80\xc3\
\xe6\x3f\x2b\x43\xe6\x37\x35\x81\x59\x07\x38\x1c\x00\xf7\x65\x2c\
\x00\x96\x31\xeb\x00\x07\xcd\x5f\x2e\xf5\x6e\xc6\x02\xc0\xac\x11\
\x50\xc9\xec\x03\x01\x10\xc5\x63\x33\x66\xfe\xea\xba\x9c\xd9\x07\
\x02\x20\x8a\x5f\xc8\x68\x00\x2c\x66\xf6\x21\xeb\xe6\xef\xa6\x60\
\xb4\xb7\xa4\x7e\x28\xb5\x54\xea\x15\xa9\xdd\xc7\xf8\xb7\x3b\x0b\
\xff\x6e\xaa\xd4\xed\xb6\x57\x0c\x96\x3a\x01\x15\x40\x96\x03\x60\
\x9e\x42\x00\xdc\xf2\x89\xbf\x59\x5f\xaa\xad\x54\x4f\xa9\xc1\x52\
\xbd\xa4\x4e\x97\x6a\x6d\xfe\xbf\x8f\xfc\xbb\x93\x14\x8e\xed\x62\
\x54\x00\x59\x35\x7f\x85\xd4\x2e\xcb\x06\xdb\x2f\x75\x6a\x1d\x8e\
\x71\xa5\xe5\xe3\xbb\x1f\x25\x40\x56\x03\x40\x63\xe7\x9f\x67\xea\
\x78\x8c\xd3\x14\x2e\x4f\x1a\xa0\x06\xc8\x62\x00\xac\x56\x08\x80\
\xa1\x75\x3c\xc6\xd3\x15\x8e\xb1\x27\x6a\x80\xac\x99\xbf\xbb\x82\
\xb1\xb6\x4a\x35\x4a\xe0\x58\xd7\x59\x3e\xce\x1f\xa0\x08\xc8\x5a\
\x00\xfc\x44\x21\x00\x6e\x4e\xe8\x58\xbf\x6f\xf9\x38\x37\xa0\x08\
\xc8\x92\xf9\x73\x85\xaf\xc0\x6c\xdf\xfc\xfb\x4c\x42\xc7\xdb\x43\
\x21\xac\xce\x40\x19\x90\x95\x00\xf8\x96\x82\xa1\x9e\x4a\xf0\x78\
\x4b\xa4\xb6\x59\x3e\xde\x1b\x50\x06\x64\x25\x00\x5e\xf4\x6d\x0d\
\x7e\xf9\x7d\xf3\x2d\x1f\xef\x2a\x94\x01\x59\x30\xff\xb9\x0a\xe6\
\x7f\x23\xe9\xe5\xb7\xe5\xf7\x0d\x50\x38\xee\x93\x51\x08\x84\x1e\
\x00\x3f\x55\x30\xd2\x2c\x0b\xc7\xdd\x54\xe1\xa1\xa5\x6b\x50\x08\
\x84\x6c\xfe\x48\xea\x7d\xcb\x26\xda\x27\x75\x8a\xa5\xe3\x5f\x64\
\xf9\xd8\x9f\x43\x25\x10\x72\x00\x8c\x53\xf8\xf4\xff\x8d\xc5\xe3\
\xff\x9a\xe5\x63\xdf\x23\x15\xa3\x14\x08\x35\x00\x5e\x52\x08\x80\
\xc1\x16\x8f\x3f\x5f\x30\xa9\xcd\xe3\x1f\x85\x52\x20\x44\xf3\x9f\
\xa7\x60\xfe\x2d\xb6\xf7\xde\x93\xdf\xff\xbc\xe5\x31\x2c\x44\x2d\
\x10\x62\x00\x2c\x50\x08\x80\x99\x01\x5c\xc6\x98\x1b\x8d\x4d\x51\
\x0c\x84\x64\x7e\x73\xea\xfc\x81\xc2\xcd\xbf\x76\x0a\x63\x69\xa7\
\x10\x64\xfd\x51\x0d\x84\x14\x00\x13\x14\x4c\xb3\x54\x71\x3c\xb6\
\x97\x30\x9b\x87\x6a\x20\xa4\x00\x78\x39\xa4\x95\x75\xe4\x6f\xdd\
\x68\x79\x2c\x6f\x9a\xc7\x8f\x51\x0e\x84\x60\xfe\x9e\x0a\xe6\xdf\
\xac\xb9\xa8\x86\xfc\xad\x2e\x0a\x63\xfa\x22\xea\x81\x10\x02\xe0\
\xc1\x10\x37\xdb\x94\xbf\xf9\x77\xcb\x63\xba\x15\xf5\x80\xef\xe6\
\xaf\x3c\xce\x0a\xbc\x49\xdd\xfc\x3b\x29\x85\xb1\xcd\xb1\x3c\xae\
\xd7\x51\x10\xf8\x1e\x00\xd7\x2a\x7c\xfa\xff\x2a\xa5\xb1\xf5\x52\
\x18\x5b\x27\x54\x04\x3e\x07\xc0\x5a\x05\x93\x0c\x48\x69\x6c\x0d\
\xa4\xb6\x5b\x1e\xdb\x54\x54\x04\xbe\x9a\xff\x7c\x05\xf3\x6f\x4a\
\x73\x45\x5d\x85\x37\x1b\xff\x8c\x92\xc0\xd7\x00\xf8\xb9\x42\x00\
\xdc\x98\xf2\x18\x07\x29\x2c\x6b\xd6\x06\x35\x81\x6f\xe6\x6f\x2e\
\xf5\x5f\x85\xdd\x75\xdb\xa6\x3c\xce\x66\x0a\xaf\x37\x5f\x8d\xa2\
\xc0\xb7\x00\x98\xa8\xf0\xe9\xff\x84\x23\x63\x7d\xcc\xf2\x38\x9f\
\x46\x51\xe0\x93\xf9\xcd\xbe\x7b\xaf\x29\x04\x40\x3f\x47\xc6\x7b\
\xb9\xe5\x71\xfe\xcf\xac\xa2\x8c\xb2\xc0\x97\x00\xe8\xad\x60\xfe\
\x8d\xae\x3c\x2a\x5b\xb8\xdc\xd9\x6b\x79\xbc\x97\xa2\x2c\xf0\x25\
\x00\x1e\x51\x08\x80\xe9\x8e\x8d\xf9\x77\x96\xc7\xfb\x08\xca\x02\
\x1f\xcc\xdf\x52\xe9\xe6\xdf\x89\x8e\x8d\xfb\xdb\x96\xc7\xfc\x8e\
\x54\x63\x14\x06\xae\x07\xc0\x24\x85\x4f\xff\xc7\x1c\x1c\x77\x7b\
\x85\x71\x5f\x84\xc2\xc0\xf5\x9b\x7f\xeb\xb2\x6a\x04\x85\x9d\x8e\
\xe7\xa2\x32\x70\x39\x00\xfa\x28\x98\xff\x1f\xae\xbe\x27\x6f\xde\
\x48\x54\xd8\xec\xa4\x3e\x4a\x03\x57\x03\xe0\x97\x0a\x01\x70\xbd\
\xc3\xe3\xef\xaa\x30\xfe\xee\x28\x0d\x5c\x14\x7f\x55\xe1\xfb\x6a\
\xdb\x6b\xe6\xb7\x76\xbc\x0f\x1b\x2d\xf7\x60\x36\x6a\x03\x17\x85\
\x3f\x59\xe1\xd3\x6f\x91\x07\x7d\xb8\xd3\x72\x0f\xd6\xa2\x36\x70\
\xf1\xe6\xdf\x7a\x85\x00\xb8\xd0\x83\x5e\x68\x3c\x04\xd5\x11\xd5\
\x81\x4b\xa2\xbf\x40\x41\xf4\x1b\x7c\xb8\x01\x66\x36\x25\x91\xfa\
\xb7\xe5\x5e\x5c\x87\xea\xc0\x25\xd1\x3f\xaa\x10\x00\x53\x3d\xea\
\x87\xed\x0d\x50\xfe\x84\xea\xc0\x15\xb1\xb7\x52\xd8\x2b\xcf\xdc\
\x5c\x6c\xe5\x51\x4f\x86\x28\xac\x11\xd0\x1a\xf5\x81\x0b\x62\x9f\
\xaa\xf0\xe9\xbf\xd0\xb3\x9e\x94\x2a\xec\x82\x74\x15\xea\x83\xb4\
\x85\x5e\xa2\xb0\x34\xb6\xa9\xaf\x78\xd8\x9b\x25\xbe\x6e\x81\x0e\
\x50\x53\x91\xf7\x55\x30\xff\x7a\x1f\x9f\x7e\x93\x63\x1e\x6d\xb9\
\x2f\xe6\x85\xab\x72\x54\x08\x69\x8a\x7c\xb1\x42\x00\x4c\xf6\xb4\
\x37\x2d\x0b\xfb\x15\xd8\xec\xcd\x70\x54\x08\x69\x09\xfc\x44\xa5\
\x9b\x7f\x55\x1e\xf7\x68\xb9\xe5\xfe\x3c\x84\x12\x21\x2d\x71\x4f\
\x53\xf8\xf4\xff\x85\xe7\x3d\xb2\xbd\x2e\xe2\x7f\xa4\x1a\xa1\x46\
\x48\xe3\xe6\xdf\x46\x85\x00\xf8\xb2\xe7\x7d\xea\xa0\xd0\xa3\x0b\
\x50\x24\x68\x0b\xfb\xab\x0a\xc2\x7e\x3d\x84\x57\x5f\x65\x0c\x2f\
\x59\xee\xd3\x5d\x28\x12\xb4\x45\xfd\xb8\x42\x00\x4c\x0a\xa4\x57\
\x37\x29\x6c\x8b\xce\x1a\x01\xa0\x26\xe8\x36\x0a\x2b\xe0\x9a\xaf\
\xb8\x5a\x04\xd2\xaf\xb3\x15\xc2\xb2\x1b\xca\x04\x2d\x41\xdf\xa8\
\x20\xe8\x87\x03\xea\x57\xfd\xc2\xfe\x85\x36\xfb\x35\x0b\x65\x82\
\x86\x98\xcd\x6e\xb8\xff\x54\x08\x80\x2f\x05\xd6\xb7\xbb\x2c\xf7\
\x6b\x0d\xea\x04\x0d\x21\xf7\x57\x30\xff\x6b\xa1\x5d\xd3\x9a\x47\
\x99\x15\xfa\xd6\x01\x85\x82\x6d\x21\x2f\x51\x10\xf2\xc4\x00\xfb\
\xd6\xa8\xf0\x9d\x3d\x7d\x03\x6f\x45\x7c\x92\xd2\xcd\xbf\xe6\x81\
\xf6\xef\x41\xcb\xbd\xfb\x03\x2a\x05\x9b\x02\x9e\xa1\xf0\xe9\xff\
\x50\xc0\xfd\x1b\x66\xb9\x77\xe6\xbd\x83\x96\x28\x15\x6c\xdd\xfc\
\xdb\xac\x10\x00\xe7\x07\xdc\xc3\x72\xa9\xdd\x96\xfb\x37\x06\xb5\
\x82\x0d\xf1\x0e\x54\x30\xff\xda\x0c\xf4\xf1\xd7\x96\x7b\xb8\x04\
\xb5\x82\x0d\xe1\x2e\x55\x08\x80\x6b\x33\xd0\xc7\x2b\x2d\xf7\xd0\
\xac\x42\x54\x8a\x62\x21\x49\xd1\x9e\xac\xf0\x5e\xbb\x39\x35\xae\
\xcc\x40\x2f\x5b\x15\xd6\xf3\xb3\xd9\xcb\x21\xa8\x16\x92\x14\xad\
\xc6\x93\x7f\x3f\xcb\x50\x3f\xff\x68\xb9\x97\x0f\xa0\x5a\x48\x52\
\xb0\x4f\x29\x04\x40\xcf\x0c\xf5\xd3\xf6\xf6\xe9\x66\x4f\x82\x86\
\x28\x17\x92\x12\xec\x56\xcb\x82\x7d\x39\x63\xfd\x3c\x4d\x21\x50\
\x7b\xa3\x5c\x48\xea\x9a\xd5\xb6\x58\x27\x64\xb0\xaf\xaf\x58\xee\
\xe9\x9d\xa8\x17\x92\x10\xea\x85\x0a\x77\xad\xf3\x19\xec\xeb\xcd\
\xb6\xdf\xa7\x40\xbd\x90\x84\x50\x87\x71\xc3\xca\x4a\x5f\xcf\x51\
\x78\xa4\x9a\x45\x42\xa0\xce\x42\x6d\x67\x59\xa8\xe7\x65\xb4\xaf\
\x66\x8d\x80\x2d\x96\x7b\xdb\x1c\x05\x83\xcb\x37\x01\xd7\x64\xbc\
\xaf\xf7\x58\x0e\x80\x33\x50\x2f\x24\x21\xd4\x27\x2c\x09\x74\x5c\
\xc6\xfb\x6a\x7b\x57\xa5\xbe\xa8\x17\x92\x10\xea\x04\x0b\xe2\xdc\
\x21\x15\x65\xbc\xaf\x8d\xa5\xde\xb6\x18\x00\xdd\x51\x2f\x24\x21\
\x54\xb3\x98\xc5\x0b\x09\x8b\x73\x2c\x9d\x3d\xd0\xdb\x87\x2d\x99\
\x7f\x9b\xd9\xbb\x81\x0e\x43\x52\x42\xed\x5c\xb8\xb3\x9c\x84\x38\
\xff\x8a\x38\x0f\xf5\x75\x90\xa5\x00\xb8\x9f\xee\x42\xd2\x62\x9d\
\x9c\x80\x30\x77\x4a\x75\xa5\x9b\x87\x7a\x6a\xbe\x0d\x78\x91\x17\
\x82\xc0\x07\xb1\x9a\x45\x41\x6e\xaf\xc3\x9b\x81\x66\x35\xe1\x4e\
\x74\xf2\x88\xbe\x0e\x4f\xd8\xfc\x66\xcb\xb6\x32\x3a\x0b\xb6\x04\
\x7b\x6e\x2d\x1e\x65\x35\x6f\xc0\xb5\xa1\x7b\x47\xed\x67\x49\x82\
\x67\x01\xef\x73\x86\x05\x1a\xa2\x6d\x22\xf5\x5d\xa9\xb7\x8e\x23\
\xc8\x65\x66\x49\x6c\x3a\x76\xdc\x7e\x9a\x07\xae\x36\x24\x10\x00\
\xa3\xe8\x26\x68\x8b\xd7\xac\x16\x7c\x71\x61\xd1\xd0\x79\x52\x53\
\xcc\x35\xa8\x54\x47\xba\x53\x74\x1f\xd7\xd5\xc1\xfc\xb7\xd2\x45\
\x00\xbf\x43\xc0\xec\xbb\xb8\xa8\x48\xe3\xaf\x66\x8b\x70\x80\xb0\
\x82\xa0\x6b\x0d\xd6\x61\xfc\x97\xd4\x15\x7c\xa5\x0a\x10\x6e\x10\
\x54\x99\x7d\x12\xa5\xbe\x69\xde\xf1\x97\xfa\x8e\xd4\xc8\xc2\x8e\
\xc3\x4d\xe9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\xc0\xc7\xf8\x3f\xd6\x72\xa8\x52\xc2\xfb\xeb\xae\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xde\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x03\x00\x00\x00\xf4\xe0\x91\xf9\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x11\xb7\x00\x00\x11\xb7\x01\
\x9a\x63\x53\x2b\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x87\x50\x4c\x54\
\x45\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x8f\xa4\x9f\xb3\x00\x00\x00\x2c\
\x74\x52\x4e\x53\x00\x01\x03\x0d\x13\x1b\x21\x24\x26\x2a\x2c\x45\
\x54\x5d\x5f\x64\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x79\x7e\x80\x93\
\x97\xa9\xaa\xb4\xba\xbb\xe0\xe9\xec\xee\xf0\xf2\xf4\xf5\xf9\xfe\
\x05\x86\xc2\x45\x00\x00\x01\x91\x49\x44\x41\x54\x78\xda\xed\x96\
\xd9\x6e\xc2\x30\x10\x45\x4d\x81\x86\xb2\x87\x7d\x6b\xd9\x5a\x28\
\xe0\xff\xff\xbe\x4a\x45\x42\x04\x4f\x62\x27\x1e\x8f\x5f\xee\x79\
\x9e\x9b\x73\x24\x14\x14\xa5\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\xf0\xa2\xd6\x6a\xd5\x22\xea\x93\xed\x59\xeb\xf3\x36\x89\
\xe5\x4f\x2f\xfa\x9f\x4b\x1a\xc7\xbf\xd2\x0f\x56\x91\xfd\x51\x0a\
\x32\xfe\x08\x05\x2f\x7e\xf1\x02\xc3\x2f\x5c\x40\xf8\x45\x0b\x48\
\xbf\x60\x41\x8e\x5f\xac\x20\xd7\x2f\x54\x50\xe0\x17\x29\x28\xf4\
\x0b\x14\x58\xfc\xc1\x0b\xac\xfe\xc0\x05\xa6\xff\x76\x93\x2c\x20\
\xfc\xc3\xa1\x60\x01\xe5\x57\x4a\xae\x80\xf6\xcb\x15\xe4\xf9\xa5\
\x0a\x08\xff\xe3\x4b\x2c\x15\x28\x28\xf2\x4b\x14\x10\xef\x7f\xe6\
\x4b\x34\x0d\xfc\x7f\x60\xf3\x87\x2e\xb0\xfb\xc3\x16\x10\xfe\x91\
\x79\x35\x0a\x56\xe0\xe6\x0f\x57\xe0\xea\x0f\x55\xe0\xee\x0f\x53\
\x50\xc6\x1f\xa2\x80\xf0\x8f\x8b\xee\xc7\xcc\x05\x65\xfd\xdc\x05\
\xe5\xfd\xbc\x05\x55\xfc\x9c\x05\xd5\xfc\x7c\x05\x84\x7f\xe2\xb6\
\x9c\xb0\x14\x54\xf7\xf3\x14\xf8\xf8\x39\x0a\xfc\xfc\xfe\x05\xbe\
\x7e\xdf\x02\xc2\x3f\x2d\xfb\x1b\x4e\x3d\x0a\x38\xfc\x3e\x05\x4b\
\x16\x3f\x59\xb0\x74\xd9\x75\x98\xfc\x64\x41\xc7\xbe\xaa\x1f\xb8\
\xfc\x54\xc1\xa1\x6e\x1d\xf5\x8d\xd1\x4c\x55\x66\x66\x3c\xac\x6f\
\xdd\xac\x19\xfd\x44\xc1\xda\x3a\xd9\x73\xfa\xcd\x82\xbd\x75\xb1\
\x61\xf5\x1b\x05\x1b\xeb\x60\xc0\xeb\x7f\x2d\x18\x58\xef\x1b\xc7\
\xa7\xf3\xb9\x62\x60\xfe\xf4\xc0\x63\xc3\x7e\xdf\x65\xf6\x67\x0a\
\xba\x2e\xf7\x8b\xeb\xfd\xf8\xd4\x53\x4c\xf4\x4e\xf7\x27\x5e\x17\
\x6e\xf7\xed\xdd\x59\xeb\xef\xcf\xa6\x62\xa3\xf9\xf5\xa3\xf5\xef\
\xbe\xed\x3c\xa8\x25\x8c\xf6\x3b\xef\x1f\x6f\x0a\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x8a\x3f\x66\xd8\x22\x5c\xc5\xf4\x17\
\x37\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x26\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x11\xb0\x00\x00\x11\xb0\
\x01\xc8\x27\xf5\x54\x00\x00\x04\xd8\x49\x44\x41\x54\x78\x9c\xed\
\xdc\x31\x6b\x1c\x47\x18\x87\xf1\x67\x4e\x01\x43\x42\x08\x41\x02\
\x17\xa9\x42\x42\xaa\x14\x86\x34\x29\x67\xd0\xe5\x03\x04\x92\x74\
\x2e\x5d\xc4\x76\x8c\x93\xce\x95\x65\x17\x2e\x23\x6c\x4b\xfe\x00\
\x2e\x0d\x69\xdd\x04\xb1\x0b\x2a\xd2\x38\x5d\x0c\x26\x9d\x8b\x94\
\xa9\x8c\x48\xa3\x65\x52\x68\x8d\x65\x1c\x2b\xd2\xcd\xbb\x77\xbb\
\xf3\x7f\x9f\x4a\x9c\x77\x67\xd7\xfb\xfe\xe0\xc4\x69\xf7\x42\xdb\
\xb6\x78\xba\xcd\x56\x7d\x02\xde\x6a\x73\x00\xe2\x39\x00\xf1\x1c\
\x80\x78\x0e\x40\x3c\x07\x20\x9e\x03\x10\xcf\x01\x88\xe7\x00\xc4\
\x73\x00\xe2\x39\x00\xf1\x1c\x80\x78\x0e\x40\x3c\x07\x20\x9e\x03\
\x10\xcf\x01\x88\xe7\x00\xc4\x73\x00\xe2\x39\x00\xf1\x1c\x80\x78\
\x0e\x40\x3c\x07\x20\x9e\x03\x10\xcf\x01\x88\xe7\x00\xc4\x73\x00\
\xe2\x39\x00\xf1\x1c\x80\x78\x0e\x40\x3c\x07\x20\x9e\x03\x10\xcf\
\x01\x88\xe7\x00\xc4\x73\x00\xe2\x39\x00\xf1\xde\x19\x62\xd1\x10\
\x42\x88\x31\x7e\x32\x9b\xcd\xfe\xd9\xdb\xdb\xfb\x6b\x88\x63\xa8\
\x14\x63\xfc\x78\x36\x9b\xbd\xbb\xb1\xb1\xf1\xec\xd1\xa3\x47\x9d\
\xf5\xfa\xc1\xf2\x4b\xa2\x52\x4a\x9f\xe7\x9c\x7f\x06\xbe\x04\xde\
\xef\x5f\xfe\x1b\xd8\x5b\x5b\x5b\xfb\xc9\x31\x9c\xae\xcd\xcd\xcd\
\x8f\xba\xae\xdb\x06\xe6\xc0\x87\xfd\xcb\x07\xc0\x6f\x21\x84\x1f\
\x9b\xa6\xf9\xc3\xea\x58\x66\x6f\x01\x31\xc6\x1b\x39\xe7\x27\xc0\
\x57\xbc\x1a\x3e\xc0\x3a\xf0\x5d\xd7\x75\x4f\x63\x8c\x17\xad\x8e\
\x57\x6b\x31\xc6\x8b\x5d\xd7\x3d\x05\xbe\xe5\xd5\xf0\x01\xde\x03\
\xe6\x39\xe7\x27\x31\xc6\x1b\x56\xc7\x33\x01\x90\x52\xfa\x06\xb8\
\x03\x9c\x3b\x61\xb3\x0f\x80\x87\x29\xa5\xeb\x16\xc7\xac\xb1\xfe\
\xda\x3c\xe4\xe8\x5a\xbd\xad\x73\xc0\x9d\xfe\x9a\x17\x57\x0c\x60\
\x3e\x9f\xaf\xe7\x9c\x77\x4e\xbb\x7d\xce\x79\xdb\x11\xbc\x59\x4a\
\xe9\x7a\xce\x79\xfb\xb4\xdb\xe7\x9c\x77\xe6\xf3\xf9\x7a\xe9\x71\
\x8b\x01\x74\x5d\xf7\x35\x70\xfe\x2c\xfb\x38\x82\xd7\x3b\xeb\xf0\
\xfb\xce\xf7\xd7\xbe\xa8\x62\x00\x39\xe7\x2f\x16\xdc\xcf\x11\xb0\
\xf0\xf0\x81\xc5\xaf\xfd\xf1\x2c\x7e\x07\xf8\x74\xd1\x1d\xd5\x11\
\x94\x0c\xbf\x6f\xe1\x6b\xff\x32\x0b\x00\xcf\x4a\x76\x56\x45\x60\
\x30\x7c\x28\xbc\xf6\x60\x00\x20\x84\xf0\x7b\xe9\x1a\x6a\x08\x8c\
\x86\x6f\x72\xed\x8b\x01\x1c\x1e\x1e\xfe\x02\x3c\x2f\x5d\x27\xe7\
\xbc\x1d\x63\xbc\x56\xba\xce\xd8\x8b\x31\x5e\xb3\x18\x3e\xf0\xbc\
\xbf\xf6\x45\x15\x03\xd8\xdf\xdf\x7f\x01\x5c\x2a\x5d\xa7\xef\x6e\
\xcd\x08\xfa\xff\xdb\x5d\xa3\xe5\x2e\xf5\xd7\xbe\x28\x93\x0f\x82\
\xda\xb6\xfd\x15\xb8\x69\xb1\x16\x95\x22\x30\x1e\xfe\xcd\xfe\x9a\
\x17\x67\xf6\x51\x70\xdb\xb6\xb7\x43\x08\xb7\x8c\x96\xab\x0a\x81\
\xe5\xf0\x43\x08\xb7\xda\xb6\xbd\x6d\xb1\x16\x18\xff\x39\xb8\x69\
\x9a\x2d\x47\xf0\x7a\xd6\xc3\x6f\x9a\x66\xcb\x62\xad\x97\x99\xdf\
\x0f\x30\x00\x82\x1f\x8c\xd6\x5a\x7a\xfd\xb9\x8f\x76\xf8\x30\xd0\
\x0d\x21\xc6\x08\xee\x4d\x11\x41\x7f\xce\xf7\x2c\xd6\x1a\x6a\xf8\
\x30\xe0\x1d\x41\xca\x08\xa6\x32\x7c\x18\xf8\x96\x30\x45\x04\x53\
\x1a\x3e\x2c\xe1\x9e\x40\x25\x04\x53\x1b\x3e\x2c\xe9\xa6\x50\x6b\
\x04\x29\xa5\xab\x46\x6b\x99\xd5\x9f\xd3\xa4\x86\x0f\x4b\xbc\x2b\
\xd8\x12\x41\xce\xf9\xfe\x98\x10\xa4\x94\xae\xe6\x9c\xef\x5b\xac\
\xb5\xcc\xe1\xc3\x92\x6f\x0b\xaf\x11\xc1\x94\x87\x0f\x2b\x78\x2e\
\xa0\x26\x04\x53\x1f\x3e\xac\xe8\xc1\x90\x1a\x10\xd4\x30\x7c\x58\
\xe1\x93\x41\x53\x46\x50\xcb\xf0\x61\xc5\x8f\x86\x4d\x11\x41\x4d\
\xc3\x87\x11\x3c\x1b\x38\x25\x04\xb5\x0d\x1f\x46\x00\x00\xa6\x81\
\xa0\xc6\xe1\xc3\x48\x00\xc0\xb8\x11\xd4\x3a\x7c\x18\x11\x00\xb0\
\x47\x10\x63\xbc\x5c\xba\x4e\x8c\xf1\x72\xad\xc3\x87\x91\x01\x00\
\xf3\x8f\x8d\x77\x4b\x10\xf4\xfb\xee\x5a\x9c\xc8\x18\x87\x0f\x23\
\x04\x00\xe3\x40\xa0\x30\x7c\x18\x29\x00\x30\x47\xb0\x73\x16\x04\
\xfd\xb6\xa7\x7e\xe0\xf5\xa4\xc6\x3c\x7c\x18\x31\x00\x30\x45\x10\
\x38\x25\x82\x63\xc3\x0f\xc5\x07\x1d\xf9\xf0\x61\xe4\x00\x60\x10\
\x04\xdf\xbf\x6d\x83\xfe\xdf\x64\x86\x0f\x13\x00\x00\xe6\x08\x76\
\xff\x0b\x41\xff\xda\x2e\x42\xc3\x87\x89\x00\x80\x61\x11\xa8\x0e\
\x1f\x8c\xbf\x24\x6a\x19\xa5\x94\xb6\x72\xce\x16\x4f\x21\x65\xe0\
\x4a\xff\xb3\xe4\xf0\x61\x82\x00\xc0\x1c\x01\x88\x0e\x1f\x26\xf4\
\x16\x70\x3c\xe3\xb7\x03\xd9\xe1\xc3\x44\x01\x80\xf9\xe7\x04\x0b\
\x37\xe5\xe1\xc3\x84\x01\xc0\xea\x11\x4c\x7d\xf8\x30\x71\x00\xb0\
\x3a\x04\x35\x0c\x1f\x2a\x00\x00\xcb\x47\x50\xcb\xf0\xa1\x12\x00\
\xb0\x3c\x04\x35\x0d\x1f\x2a\x02\x00\xc3\x23\xa8\x6d\xf8\x50\x19\
\x00\x18\x0e\x41\x8d\xc3\x87\x0a\x01\x80\x3d\x82\x5a\x87\x0f\x95\
\x02\x00\x3b\x04\x35\x0f\x1f\x2a\x06\x00\xe5\x08\x6a\x1f\x3e\x54\
\x0e\x00\x16\x47\xa0\x30\x7c\x10\x00\x00\x67\x47\xa0\x32\x7c\x10\
\x01\x00\x47\x08\x38\xfa\xf3\xef\xc1\x09\x9b\x1d\x00\x57\x54\x86\
\x0f\x42\x00\x00\xda\xb6\x7d\x10\x42\xb8\x00\x3c\x06\x8e\x7f\xcd\
\xea\x0b\xe0\x71\x08\xe1\x42\xdb\xb6\x0f\x56\x73\x76\xab\x69\x92\
\xf7\x03\x58\x14\x42\x08\x29\xa5\xcf\x00\x9a\xa6\xf9\x33\xe7\x9c\
\xff\x6f\x9f\x1a\x93\x05\xe0\x1d\x25\xf5\x16\xe0\xbd\x99\x03\x10\
\xcf\x01\x88\xe7\x00\xc4\x73\x00\xe2\x39\x00\xf1\x1c\x80\x78\x0e\
\x40\x3c\x07\x20\x9e\x03\x10\xcf\x01\x88\xe7\x00\xc4\x73\x00\xe2\
\x39\x00\xf1\x1c\x80\x78\x0e\x40\x3c\x07\x20\x9e\x03\x10\xcf\x01\
\x88\xe7\x00\xc4\x73\x00\xe2\x39\x00\xf1\x1c\x80\x78\x0e\x40\x3c\
\x07\x20\x9e\x03\x10\xcf\x01\x88\xe7\x00\xc4\x73\x00\xe2\x39\x00\
\xf1\x1c\x80\x78\x0e\x40\x3c\x07\x20\x9e\x03\x10\xcf\x01\x88\xf7\
\x2f\x8e\x2b\x7a\x5d\x47\x6c\xf4\xe4\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x04\xcd\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x03\x00\x00\x00\x6b\xac\x58\x54\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x23\x6f\x00\x00\x23\x6f\x01\
\x35\xfc\x07\xfb\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x26\x50\x4c\x54\
\x45\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x39\xc2\x50\x14\x00\x00\x00\x61\x74\
\x52\x4e\x53\x00\x01\x03\x04\x06\x09\x0a\x0d\x0e\x10\x12\x14\x18\
\x1a\x1b\x1d\x20\x21\x22\x23\x2d\x2e\x30\x37\x38\x3f\x40\x44\x45\
\x48\x4d\x5c\x5d\x5e\x60\x64\x6a\x6c\x70\x75\x78\x7c\x80\x82\x88\
\x8c\x8f\x92\x93\x94\x95\x98\x9a\x9e\xa0\xa8\xa9\xab\xb0\xb1\xb2\
\xb6\xb7\xb8\xb9\xba\xc1\xc5\xc8\xcc\xce\xd0\xd1\xd3\xd8\xdc\xdd\
\xe0\xe1\xe4\xe5\xe6\xe7\xe8\xe9\xee\xf0\xf1\xf2\xf3\xf5\xf7\xf9\
\xfa\xfc\xfd\xfe\xec\xce\xc6\x96\x00\x00\x02\xac\x49\x44\x41\x54\
\x78\xda\xed\xdc\xeb\x52\x13\x41\x10\x86\xe1\x59\xe4\xb4\x82\x18\
\x41\x20\xa8\x51\x11\x01\x45\x08\x82\x8a\x18\x4f\x44\x45\x88\x28\
\x27\x63\x50\x30\x92\xef\xfe\x6f\xc2\x1b\x08\x5b\xa5\x35\x5d\x54\
\x67\xde\xf7\x02\x76\xab\x9f\xaa\xad\xed\x3f\x33\x21\x10\x11\x11\
\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\xa5\xda\xd8\
\xdc\xda\x46\xf4\xaa\xf7\x72\x2f\xf3\xcf\x9e\xc8\xa4\xc3\x8a\x8f\
\xf9\x67\x3a\x32\xea\x74\xc2\x05\xc0\x8e\xcc\xda\xf4\x30\xff\xd0\
\xb9\x1d\xc0\xb1\x07\x80\x49\x49\x5a\x1f\x8d\xde\x96\x24\x0d\x3b\
\x00\x98\x92\xa4\xe5\xf8\xcf\xad\x49\x52\x0e\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x70\x09\x8d\x4b\xd2\xee\xb3\xe8\xed\x4b\
\x6a\xf7\x3b\x00\xc8\x5a\x76\x67\x86\xb6\x83\x87\x56\xed\x00\xe6\
\x5d\x00\x0c\x36\xac\xe6\xaf\x67\x2e\x00\xc2\xc0\xca\x81\xc5\xf8\
\x7b\x8b\x7d\xc1\x4d\x23\xa5\xc2\xea\x92\xf4\xa4\xf4\x4f\xe5\xa1\
\x87\xaa\x49\xd2\x42\x48\x37\x00\x00\x28\x02\xa8\xbc\xf8\xfc\x25\
\x7a\x1f\x9f\xde\x74\x02\x90\x3d\x37\xfa\x89\xfe\x7e\xe8\x03\x60\
\xde\x6c\x8d\xfa\x33\xed\x02\xa0\x69\xb7\x48\xbe\xf6\x00\x70\x5d\
\x92\x7e\x7e\x8b\x5e\x5b\xd2\xa1\x07\x80\x69\x49\x5a\x8a\xff\xca\
\xf7\x92\xe4\x01\xa0\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xbd\x0a\xd0\xa8\
\x75\xeb\x43\x3a\x00\x05\x01\x00\x00\x00\xbd\x0d\x70\x77\xe1\xc2\
\xaa\x49\x00\x14\x54\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x92\x01\xb8\x21\x49\x47\x9f\xa2\xf7\x4b\x52\xd3\x03\x40\
\xf8\x61\x77\x68\xea\x9d\x0b\x80\xc7\x76\x00\x77\x5c\x00\x5c\x79\
\x63\x34\x7e\x67\x29\x38\xe9\xc1\xdb\xa3\x56\xf7\xce\x24\xa9\xdd\
\xfa\x9f\xbe\xbe\xba\x15\xfc\xf7\x48\x92\x5e\x86\x74\x03\x00\x00\
\x00\x00\x28\x04\xc8\x4b\xd1\x1b\xf1\x03\xd0\xb7\xb8\x67\xb1\x3f\
\x1c\xac\x0c\xf8\x00\xc8\xea\x56\x1b\x64\x63\xd0\x05\x80\xdd\xd5\
\x13\x5a\x75\x01\xb0\x6d\x07\xd0\xca\x1c\x00\xf4\xb7\x25\xed\xc7\
\xbf\xc6\x6f\x57\x92\xc6\x1d\x00\xe4\x92\x54\x8b\xff\xce\x65\x49\
\x9a\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\xe0\x12\x01\xce\xbe\x77\xad\x99\x0c\x40\
\x51\x00\x00\x00\x40\x4f\x03\x5c\x2b\x5f\xdc\xed\x14\x00\x8a\xca\
\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x52\x01\
\x18\x96\xa4\xad\xd1\xe8\xad\x4b\xd2\xa4\x03\x80\x70\x6c\x77\x66\
\xe8\x7c\xc8\x03\xc0\xa6\x1d\xc0\x8e\x87\xf9\xc3\xc4\xa9\xd5\xfc\
\x9d\x19\x17\x00\xa1\x62\xf4\x11\x9c\xcc\x06\x27\x5d\xbd\x5f\xdd\
\x88\xde\xda\xdc\x58\x20\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\
\x22\x22\x22\x22\x22\x4a\xb5\xbf\x0c\x2e\xa6\x5a\xc1\x16\x4e\x36\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\x18\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x89\x08\x06\x00\x00\x00\xe4\x31\x30\x03\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x51\x21\x00\x00\x51\x21\
\x01\x42\xd6\x73\x50\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x95\x49\x44\
\x41\x54\x78\x9c\xed\xdd\x31\x8a\x14\x41\x18\x47\xf1\x7f\xad\xa3\
\xab\x99\xa1\x0b\x22\x78\x91\x15\x3d\x80\xb7\xf0\x0e\x26\xc6\x9e\
\xc6\xc4\xc8\x4c\x23\x59\xf0\x12\x9a\x6d\x66\xa2\xac\xbb\x8b\x7e\
\x26\x93\xba\x34\x6c\x59\x35\xdd\xef\xfd\xd2\x81\xaa\x6f\xe0\x41\
\xd1\x3d\x4c\x77\x4b\x52\x39\x0c\x4f\xab\xea\xeb\xec\x21\x68\x8e\
\x66\x0f\xa0\xb9\x0c\x00\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\
\xae\xe5\x70\x2e\x03\xdf\x25\xf9\xd1\x69\xad\x57\x55\x75\xdd\x69\
\xad\x4d\x3b\xa4\x00\x7a\x7a\x50\x55\xbf\x66\x0f\xb1\x06\x1e\x01\
\x70\x06\x00\x67\x00\x70\x06\x00\x67\x00\x70\x06\x00\x67\x00\x70\
\x06\x00\x67\x00\x70\x06\x00\x67\x00\x70\x06\x00\x67\x00\x70\x06\
\x00\x67\x00\x70\x06\x00\x67\x00\x70\x06\x00\x67\x00\x70\x06\x00\
\x67\x00\x70\x06\x00\x67\x00\x70\x06\x00\x67\x00\x70\x06\x00\x67\
\x00\x70\x06\x00\x67\x00\x70\x06\x00\xb7\x9b\x3d\xc0\x5a\xb4\xd6\
\x8e\x92\x9c\xce\x9e\x63\xef\x73\x55\x5d\xf5\x58\xc8\xe7\x03\x2c\
\xd4\x5a\xbb\x9f\xe4\xa2\xe7\x9a\xb7\x70\x52\x55\xe7\x3d\x16\xf2\
\x08\x80\xdb\x25\x79\xde\x71\xbd\x17\x49\xde\xfc\xe3\xb3\xab\x24\
\xcf\x3a\xee\x75\x93\xcb\x41\xfb\xac\xde\xae\xaa\x3e\xf5\x5a\xac\
\xb5\xf6\xe8\x86\x8f\xab\xaa\xce\x7a\xed\xa5\x3e\x3c\x02\xe0\x0c\
\x00\xce\x00\xe0\xbc\x0f\xb0\x4e\x5f\x5a\x6b\xbf\x7b\x2c\x64\x00\
\xeb\xf4\xb8\xd7\x42\x1e\x01\x70\x06\x00\x67\x00\x70\x06\x00\x67\
\x00\x70\x06\x00\x67\x00\x70\x06\x00\x67\x00\x70\x06\x00\x37\xf4\
\x56\x70\x6b\xed\xe1\xc8\xfd\x3a\x3b\x9e\x3d\xc0\xff\x30\x32\x80\
\xe3\x24\xdf\x07\xee\xa7\x05\x3c\x02\xe0\x0c\x00\xce\x00\xe0\x0c\
\x00\xce\x00\xe0\x0c\x00\x6e\xe4\x65\xe0\x75\x92\x97\x03\xf7\xeb\
\xed\x6e\x92\xf7\xb3\x87\xe8\x6d\x64\x00\x7f\xaa\xea\xc3\xc0\xfd\
\xba\xda\xff\x37\x70\x73\x3c\x02\xe0\x0c\x00\xce\x00\xe0\x0c\x00\
\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\xce\
\x00\xe0\x0c\x00\xce\x07\x44\x2c\x57\xd9\xe0\xe3\xe7\x0c\x60\xa1\
\xaa\xba\x4c\xb2\xb9\x5f\x04\x3d\x02\xe0\x0c\x00\xce\x00\xe0\x0c\
\x00\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\
\xce\x00\xe0\xfc\x2d\x60\xa1\xad\xbe\x36\xce\x00\x96\xbb\x97\xe4\
\xe3\xec\x21\xf6\x4e\x92\xf8\xda\x38\xdd\x9e\x01\xc0\x19\x00\x9c\
\x01\xc0\x19\x00\x9c\x01\xc0\x79\x19\xb8\x4e\xbe\x36\x0e\xce\xd7\
\xc6\xa9\x0f\x03\x80\x33\x00\x38\x03\x80\x33\x00\x38\x03\x80\x33\
\x00\x38\x03\x80\x1b\x79\x23\xe8\x4e\x6b\xed\xf5\xc0\xfd\x7a\xdb\
\xe4\x4d\xb3\x91\x5f\x6a\x97\xe4\xed\xc0\xfd\xb4\x80\x47\x00\x9c\
\x01\xc0\x19\x00\x9c\x01\xc0\x19\x00\x5c\xef\xab\x80\x9f\x49\xbe\
\x75\x5e\xf3\x50\xb4\x24\x4f\x66\x0f\xd1\x5b\xab\xaa\xd9\x33\xac\
\xc2\xfe\x9d\x41\x17\xb3\xe7\xe8\xcd\x23\x00\xce\x00\xe0\x0c\x00\
\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\xce\
\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\xce\x00\xe0\x0c\x00\xce\x00\
\xe0\xfe\x02\xb2\xf0\x4c\x4d\x49\x3e\x21\x26\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0e\x27\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x01\x2a\x08\x03\x00\x00\x00\xea\x10\xa8\xc5\
\x00\x00\x02\xf1\x50\x4c\x54\x45\xff\xff\xff\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\
\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\
\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\
\x01\x00\x02\x01\x00\x02\x01\x00\x02\x7e\xa8\x28\x5d\x00\x00\x00\
\xfa\x74\x52\x4e\x53\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\
\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\
\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\
\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\
\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\
\x4c\x4d\x4e\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\
\x5d\x5e\x5f\x60\x61\x62\x63\x64\x65\x66\x67\x68\x6a\x6b\x6c\x6d\
\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\
\x7e\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\
\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\
\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\
\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb8\xb9\xba\xbb\xbc\xbd\xbe\
\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\
\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\
\xdf\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\
\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\x12\
\x9b\xa1\x80\x00\x00\x09\xeb\x49\x44\x41\x54\x78\xda\xc5\xdd\xf9\
\x7f\x54\xd5\x19\xc7\xf1\x6f\x26\x03\x24\x61\x11\x6a\x64\x91\xc5\
\x46\xd6\x0a\xee\xe0\x06\x14\xb0\x52\x08\x8a\x40\x59\x44\x28\xa2\
\xc4\x0a\x6e\xd0\x58\x71\x83\xd6\x62\xab\x55\x91\x82\xca\xa2\x80\
\xfb\x82\x42\x51\x51\xa9\xa0\x75\xc3\xe2\x82\x88\x0b\x0a\x45\x24\
\x0a\x04\xd9\x82\x64\x21\xc9\xcc\xf7\xa7\xbe\x5e\x41\x99\xe0\xeb\
\x3c\x33\xf7\x9c\x7b\xe6\xc9\xfb\x0f\xc8\xfd\x24\x33\x99\xfb\xdc\
\x73\xee\x4d\x90\x5e\xad\x26\xdc\xfd\xc4\xea\x2f\xf6\x95\x7d\xfd\
\xee\x73\xb3\x0b\xbb\x41\xd9\xaf\x6e\xfa\x6f\x9c\xb5\x6d\x99\xd9\
\x3b\x13\x6a\x7e\xbb\x8e\x06\x3b\x26\x46\xa1\xe2\xcc\xd5\x14\x6c\
\x1a\x8e\xf4\xcb\x7d\x3a\x4e\xd9\xda\x53\x91\x66\xdd\xb6\x32\xa9\
\xd2\xdf\x21\xad\x2e\xfe\x81\x29\xc4\xa7\x67\x20\x7d\x6e\x8d\x33\
\xb5\xe7\xb2\x91\x2e\x53\x18\xc8\xb3\x19\x48\x8f\x81\x31\x06\x33\
\x0d\x69\xd1\xa5\x84\x01\xc5\x87\x21\x0d\x9a\x6e\x66\x60\xa5\xa7\
\xc0\xbf\x59\xb4\xb0\x06\xde\xe5\x1d\xa2\x8d\x21\xf0\xed\x19\x5a\
\xf9\x32\x0a\xbf\x7a\xc4\x69\xe7\x4a\xf8\xf5\x32\x2d\x15\x45\xe0\
\x53\x93\x4a\xda\x3a\x17\x3e\x8d\xa4\xb5\xbb\xe1\xd3\x53\xb4\xb6\
\x19\x1e\xd5\xdb\x4f\x7b\x5d\xe1\x4f\x4f\x3a\x28\x84\x3f\xa3\xe9\
\x60\x36\xfc\xf9\x23\x1d\x3c\x07\x7f\xee\xa1\x83\x77\xe0\xcf\x93\
\x74\xb0\x05\xfe\xbc\x4e\x07\x65\xf0\xe7\x55\x3a\xd8\x0f\x7f\x16\
\xd3\xc1\x46\xf8\x73\x07\x1d\xac\x86\x3f\x57\xd3\xc1\x13\xf0\x67\
\x18\x59\xb7\x67\xa3\xae\x74\x30\x01\x1e\x6d\xa5\xb5\x58\x0b\x95\
\x91\x58\xf6\x0e\x7c\xea\x4b\x6b\x53\xe0\x53\x74\x2f\x6d\xe5\xc1\
\xab\x07\x69\xe9\x6d\xf8\xd5\xea\x20\xed\xf4\x84\x67\x7f\xa1\x95\
\x97\xe0\x5b\xa3\x9d\xb4\x10\xeb\x0a\xef\xae\xa2\x85\xc5\xf0\x2f\
\x63\x39\x03\xfb\xaa\x29\x3c\xc8\xec\x7e\xf1\x98\xf1\x7d\x5b\x27\
\x5e\x84\x0d\x0c\x68\x5f\x27\x84\x16\x1d\xb5\xe2\x00\x6b\x6c\xfd\
\x7b\x17\x1c\x76\xc2\x2e\x06\x52\xdd\x1f\x61\x35\xbb\xb1\x88\x09\
\xb1\x05\xb9\xa8\xd1\xeb\x10\x83\xb8\x16\x21\x75\x7a\xb0\x94\x47\
\xdb\xdb\x0f\x35\xce\x2b\x66\x4a\x95\x13\x10\x4e\x9f\x97\xe3\x86\
\xaf\x3a\x16\x35\xda\xae\x63\x0a\xdf\xf7\x42\x28\xa7\xad\xa4\x51\
\x7c\x00\x6a\xe4\x2c\x61\x52\x1b\x4e\x40\x18\x79\x4f\xc5\x29\xd8\
\xdd\x06\x87\x5d\x56\x44\x51\xe9\x8c\x86\x08\xe1\xb8\xd9\x95\x94\
\x2d\xc5\x8f\xb2\x6f\x2a\xa1\x51\xf5\xc3\xc7\x23\x84\xac\x69\x07\
\x98\x4c\xbc\x73\x62\xc9\x7e\xe6\x4e\xc3\x77\xbf\xe4\x24\x84\xd1\
\xfb\x2b\xa6\x30\x17\x09\x91\x73\xee\xdc\xc8\x5a\x8a\x1f\xbe\x28\
\x1b\x61\x34\x99\x1b\xb7\x5e\x7c\x6c\x3f\x68\xc2\x6d\x73\x97\xbf\
\xb2\x70\xc6\xa4\x21\xa7\x47\x10\xce\xe0\x6f\x99\xda\x76\xa4\x4b\
\xf3\x67\x19\x44\xbc\x01\xd2\x63\xdc\x1e\x06\xd3\x11\xe9\xd0\x6c\
\x05\x83\xea\x8f\x34\x38\x79\x0b\x03\x1b\x0c\xff\x46\x97\x32\xb8\
\x0b\xe1\x5b\xf4\x3e\xda\xc8\x87\x67\xcd\xff\xc3\x3a\x0d\xe8\x51\
\x44\x3b\x23\xe0\x55\x41\x45\x9d\x5e\x6f\x67\xce\xab\xdb\xcb\xcd\
\xc8\x13\xb4\xf7\x67\x8f\xc7\x7f\x9c\x0e\xee\xf3\x77\xfc\xc7\xe8\
\x62\xa1\xc2\xf1\x55\x56\x9f\x23\x8f\xd2\xcd\x4a\x4f\xc7\x7f\x84\
\x8e\xd6\xfa\x39\xfe\x62\xba\xfa\x4e\xe7\xf8\xb2\x58\x3d\x84\x96\
\xb1\x90\x21\xb4\x43\x68\xb7\x30\x8c\x5e\x08\x2b\x3f\xc6\x30\x46\
\x23\xa4\x0e\xfb\x19\xca\x8d\x08\xa7\xf1\xe7\x0c\xe7\x7e\x84\x92\
\xb1\x8c\x21\xbd\x88\x50\x26\x33\xac\x8f\x11\x46\x97\x72\x86\xb5\
\x0f\x21\x44\xdf\x67\x78\xc7\xc3\xdd\x6d\xf4\xe0\x02\x38\x3b\xad\
\x92\x1e\x5c\x0f\x57\x0d\x3e\xa3\x0f\x0f\xc3\xd5\x54\x7a\xf1\x1e\
\x1c\x1d\x57\x42\x2f\x4a\xe0\xe8\x01\x7a\xd2\x06\x4e\x3a\x57\xd1\
\x93\x81\x70\xf2\x22\x7d\x29\x84\x8b\x3e\xf4\x66\x31\x5c\xbc\x47\
\x6f\x3e\x80\x83\x73\xe8\xcf\xc1\x08\xec\x2d\xa1\x47\xa7\xc0\x5a\
\xbb\x6a\x7a\x74\xb5\xce\x2d\x38\xb2\x25\xb0\xd5\x68\x3f\x7d\xda\
\x01\x5b\xd7\xd0\xaf\xf6\xb0\xf4\x29\xfd\x1a\x0f\x3b\x1d\xe8\xd9\
\x22\xd8\xb9\x91\xee\xca\x7d\xdc\xa7\xb8\x96\xce\x3e\x58\x4e\x93\
\x56\xb0\xd1\x3a\x4e\x67\x93\xaf\xa5\xc9\x08\xad\xdf\x81\x58\xab\
\xae\x34\x99\x03\x1b\x6f\xd0\xd9\x2a\xa0\x98\x06\xeb\x61\x21\xb7\
\x9a\xce\x2e\x17\x6e\xe7\x8e\x35\x45\x70\x97\xd3\x59\xc5\x31\xc0\
\x95\x34\x19\xa4\x73\x22\x5c\x0a\xa0\xbd\xb0\x7d\x17\xdc\x66\x3a\
\x1b\x06\x00\xdb\x68\x50\x9c\x89\xa0\x1a\xc7\xe9\x6a\x7f\x03\x00\
\x78\x84\x26\x7d\x11\xd4\x79\x61\x3f\x71\x7f\x4f\x93\x07\x11\xd4\
\x24\x3a\xfb\x0d\x00\xa0\x35\x4d\x76\x46\x10\xd0\x43\x74\xb5\x3d\
\x82\x1a\x9b\x68\xd2\x07\x01\x7d\xc0\x04\xb7\x95\xf9\x79\x34\x79\
\x00\xc1\x44\xcb\xe9\xea\x4c\x1c\x36\x22\xd4\x6b\x70\x12\x5d\x6d\
\xc2\x8f\x9a\x1e\xa2\xc9\xaf\x11\xc8\x28\x0a\x2c\x36\x67\x5e\x08\
\xb3\x5e\x77\x1d\x05\x16\xdb\xd4\x97\xd0\x64\x47\x04\x41\x4c\xf7\
\x70\x01\xd6\xb0\x94\x26\xbd\x11\xc4\x2c\x3a\x9a\x8c\x84\x25\x21\
\x86\x82\x47\x9d\x47\x11\x24\x0c\x4d\xfa\x39\x91\xdc\x0b\xee\xa3\
\x48\x42\xd6\x01\xf7\x95\xfb\xb7\xbc\x8c\xfe\x8f\xb9\x6f\xe1\x7d\
\xea\x3c\x8a\xd4\x96\x4f\x93\xb2\x66\x48\xad\xc8\x79\x14\xa9\xad\
\xde\x9e\x24\x6f\xd4\xe4\x0e\xd2\xc9\xd0\x40\xa7\xb4\x4d\x19\x48\
\xa9\xca\x7d\x14\xa9\xed\x7c\xe7\x75\xe3\xbd\x7e\x2e\xfe\x32\x77\
\xd2\xe4\x5f\x48\xe9\x1b\xba\x38\x1f\x3f\x37\x87\x26\xd5\x6d\x90\
\xca\x67\x6e\xa3\x48\xd0\x27\xcd\x66\xa4\x67\x79\x6e\xa6\x61\xb3\
\x69\x23\x4d\xf6\x36\x46\x0a\xff\x76\x1d\x45\x7e\xae\x80\x46\x53\
\x91\xc2\x52\xda\xfb\x0a\x06\x59\xc5\x34\xd9\x95\x83\xe4\x1e\xa1\
\xbd\xe9\x36\xfb\x3d\xd7\x23\xb9\x39\xb4\xd7\x01\x26\xc7\x96\xd2\
\xe4\xdb\x06\x48\xea\x6f\x0e\xa3\x88\xdd\x86\xc3\x1f\x90\xd4\xd5\
\xfe\x76\xa5\xda\xc7\x68\xb2\x35\x8a\x64\xfa\xb9\x8c\x22\x82\x65\
\x34\xba\x02\xc9\xb4\xa4\xad\xd7\x20\x39\x97\x46\x3b\x1a\xf9\x3d\
\x19\x8c\x87\x68\x0d\x8d\xee\x40\x32\xef\xd8\x8f\x22\xa2\xa1\x34\
\x2a\x6f\x87\x24\x16\xd0\xce\xf3\x90\x45\xfe\x47\xa3\x67\x90\xc4\
\xf5\xf6\xa3\x88\x6c\x92\xc3\x43\xee\xfd\xed\x47\x11\x59\xce\x1e\
\x1a\xad\xcd\x80\xa8\xb5\xd7\x9b\xd5\xfe\x4a\xb3\x31\x90\x95\xd8\
\x8f\x22\xb2\x16\xa5\x34\x2a\xce\x85\xe8\x75\x87\x51\x44\x76\x3b\
\xcd\x9e\x85\xe8\x66\x5a\x98\xe9\xfc\xb4\xd7\x70\x48\x7a\x58\x8d\
\x22\xce\x4f\x7b\x7d\xdf\x1c\x82\xcc\x7d\x0e\xa3\x88\x2c\xba\x51\
\xbc\x96\x91\x2c\x73\x19\x45\x64\x83\x29\x18\x05\xc1\x44\xa7\x51\
\x44\xf6\x26\xcd\x76\xb7\x80\x59\x47\x06\xf5\x3e\x82\xe8\x1e\xa7\
\xd9\xe3\x10\x7c\x63\x3b\x8a\x38\xce\x99\xb1\xce\x30\x5b\xc8\x60\
\x62\x2d\x11\x48\xee\x6e\x9a\x3d\x0d\xb3\x4b\xdc\x46\x11\xd9\x65\
\x34\xab\x12\x4e\x24\xc7\x55\xfb\xde\x10\x7d\xdd\x72\x63\xff\x15\
\x06\x51\xde\x04\x41\x75\xac\xb0\x3b\x27\x8d\x74\x1c\x45\x64\xd3\
\x68\x34\x09\x66\x59\xfb\xdc\x46\x11\x59\xfd\x8d\x76\x4f\xa0\xcc\
\x77\x1f\x45\xac\xde\xd8\x5d\xc5\x81\xda\xf3\x7d\xf3\xd1\xc2\x83\
\x34\x28\xae\x0f\xc9\x26\xa6\xd4\x0f\x81\x9d\xb5\xde\xfa\xe9\x87\
\x5b\x99\xca\x77\x11\x04\x94\x71\x5f\x4c\x98\xe8\x5b\x40\xd4\x2e\
\x6e\x3b\x8a\xc8\x66\x52\x50\x3d\x04\x21\x06\xb3\x33\x10\x50\x21\
\x45\x15\xfd\x20\x1a\x67\x37\x8a\xc8\x2e\x8d\x53\x76\x40\xfe\x36\
\xb2\x77\xd9\x8c\x22\xb2\xb3\x2b\x99\xcc\x97\xf5\x1d\xdf\x86\xf1\
\x0e\x08\xe6\x35\x26\x77\x2b\x24\xcd\x7e\x60\x12\xe5\xf5\x11\xc8\
\x59\x3c\x4a\xd5\xfa\xa5\x1b\xe3\xac\xad\x2c\x0f\x92\x7b\x99\xcc\
\x79\x08\xe4\x25\xd6\xf2\x56\xcf\x6c\x00\x8d\xfb\x6f\x08\x76\x8b\
\x4f\xeb\xca\xf0\x77\xed\x9f\xca\x84\xf2\x29\x11\x1c\x56\x7f\x46\
\x15\x8f\x28\xc9\x82\x64\x11\x93\x58\x81\x20\xe6\x09\x27\x9e\x02\
\x26\x0c\x83\xa4\x4b\x9c\xb2\x7d\x19\x08\xe0\x6d\xe9\x47\xbd\x32\
\xc8\xf5\x01\x96\x33\x89\x6e\x08\x60\x67\x62\x61\xa8\x09\x6a\x6b\
\x9b\xd8\xa2\xde\x06\xd1\xd9\x4c\x62\x22\x52\x6b\xc2\x23\x9e\xc4\
\xd1\xde\x4d\x7c\x20\x67\x5a\x5c\x52\x08\x13\xad\xe4\x0c\x79\xbb\
\xe8\x9f\x3c\xa2\x2d\x44\x83\x28\x2b\x42\x6a\xa3\xe4\x3d\xc3\xb1\
\x81\x7e\xa3\x1b\x54\x52\x76\x02\x52\x1a\xc8\x9f\xc4\x1a\xe1\x68\
\x03\x78\xc4\x00\xc8\x3e\xa2\x6c\x0c\x52\x6a\xce\x9f\x7c\x01\xb7\
\x80\xf9\x94\xdd\x6b\x73\x51\x36\xd6\x31\x60\x02\x65\xaf\x22\xb5\
\xa6\x9f\xb3\xc6\x62\x38\x06\x9c\x1a\xf2\x5d\x88\xac\x3b\xb7\xb1\
\xfa\xb3\xd1\x70\x0d\x88\x96\x51\x76\x0c\x02\xc9\x6d\x08\x38\x07\
\x60\x8d\x9f\xbf\xe9\xe9\x1e\x30\x9b\xb2\x02\x8d\x80\xb1\x94\xcd\
\xd2\x08\xe8\x42\xd9\x6b\x1a\x01\x91\x03\x14\x6d\x57\x08\x48\x7e\
\x3e\xfa\x85\x46\xc0\x7c\xca\x7a\x69\x04\xdc\x40\xd9\x38\x8d\x80\
\x21\x94\xdd\xa2\x11\x70\x32\x65\x73\x35\x02\x1a\x52\xf6\x92\x46\
\x00\xb6\x53\xb4\x5e\x25\xe0\x2d\x8a\x76\xab\x04\x2c\xa2\x2c\x4b\
\x23\xe0\x66\xca\xda\x6b\x04\x8c\xa4\xac\x8f\x46\x40\x2f\xca\xc6\
\x68\x04\x74\xa2\x6c\xaa\x46\x40\x33\xca\xee\xd7\x08\x40\x25\x45\
\xcb\x55\x02\xbe\xa3\xe8\x43\x95\x80\x75\x14\x6d\x53\x09\x78\x95\
\xa2\x3d\x2a\x01\x8f\x51\x54\xa1\x12\x70\x0f\x65\x11\x8d\x80\x3f\
\x51\xd6\x48\x23\x60\x1c\x65\x2d\x34\x02\x06\x52\xf6\x4b\x8d\x80\
\x5e\x94\x75\xd5\x08\x38\x9b\xb2\x1e\x1a\x01\xa7\x53\xd6\x47\x23\
\xa0\x1b\x65\x83\x34\x02\x3a\x53\x36\x5c\x23\xe0\x44\xca\x2e\xd3\
\x08\x68\x4b\xd9\x44\x8d\x80\x56\x94\x15\x6a\x04\xe4\x52\x36\x5d\
\x23\xe0\x18\xca\xee\xd2\x08\xc8\xa1\xec\x6e\x8d\x80\x7a\x94\xdd\
\xae\x11\x00\xca\xa6\xaa\x04\x54\x51\x74\x9d\x4a\x40\x35\x45\x05\
\x75\xfd\x26\xbc\x54\x23\xa0\x25\x65\x43\x35\x02\x3a\x50\x36\xa0\
\xae\xe7\x81\xde\x1a\x01\x7d\x28\xeb\xae\x11\x70\x11\x65\x27\x69\
\x04\x8c\xae\xeb\xa9\xf8\x2a\xca\x5a\x6a\x04\xdc\x40\x59\x13\x8d\
\x80\xdb\x29\x8b\x6a\x04\xcc\xa2\xa8\x0a\x1a\x01\x0b\x29\x2a\x51\
\x09\x58\x42\xd1\x8e\xba\x5e\x21\xf9\x5a\x25\x60\x0d\x45\xeb\xea\
\x7a\x95\x6c\x85\x46\x40\x4e\x9c\xa2\xf9\x1a\x01\x5d\x29\x9b\xa6\
\x11\x70\x31\x65\x57\x68\x04\x14\x52\x36\x40\x23\x60\x1e\x65\x27\
\x6b\x04\xac\xa2\xec\x58\x8d\x80\xad\x14\x95\x43\x21\xa0\x7e\x8c\
\xa2\x2d\x1a\x01\x9d\x28\x7b\x43\x23\x60\x10\x65\xf3\x35\x02\xae\
\xa3\x6c\xb2\x46\xc0\x6c\xca\xf2\x35\x02\x5e\xa1\x2c\x4f\x23\x60\
\x13\x45\x15\x11\x85\x80\xcc\x4a\x8a\x3e\x85\x42\x40\x47\xca\x9e\
\xd7\x08\x18\x4f\xd9\x0c\x8d\x80\x45\x94\x8d\xd5\x08\xd8\x4c\x59\
\x77\x85\x80\x56\x94\x55\x65\x2b\x04\x8c\xa0\xec\x63\x28\x04\xdc\
\x4f\xd9\x02\x8d\x80\x4f\x28\x2b\x50\x08\x68\x1a\xa3\xec\x14\x85\
\x80\x0b\x29\x2b\x8b\x2a\x04\xdc\x45\xd9\xbb\x50\x08\x58\x43\xd9\
\x2c\x85\x80\xec\x4a\xca\x46\x2b\x04\xf4\x65\x12\x1d\x14\x02\xa6\
\x51\xb6\x37\x43\x21\x60\x15\x65\xcb\x90\xfe\x80\xe8\x41\xca\x26\
\x2a\x04\x9c\xc5\x24\x3a\x2a\x04\x5c\x43\xd9\x37\x50\x08\x58\x4c\
\xd9\x42\x8d\x80\x8f\x28\xbb\x44\x23\x60\x03\x45\xf1\xe6\x1a\x01\
\x1f\x52\xf4\x09\x34\x02\x56\x51\x74\xaf\x4a\xc0\x3f\x28\x1a\xa0\
\x12\x30\x9c\x92\xb2\x86\x2a\x01\x8d\x0f\x50\x30\x07\x2a\x01\x98\
\x43\xb3\x43\x6d\x94\x02\xf2\xca\x68\xf4\x10\x94\x02\x30\x99\x26\
\x15\x79\x6a\x01\x91\x95\xa6\x17\x20\x1f\x6a\x01\xc8\x59\xcd\x9f\
\xab\x1c\x0c\xc5\x00\xe4\x3c\xc9\xa3\x55\x0c\x85\x6a\x00\x70\x51\
\x11\x13\xaa\x16\xb4\x81\x76\x00\xea\x8d\x7c\x33\xc6\x1a\xbb\x1e\
\x3d\x11\xbe\x5c\x60\xf5\xa7\x3b\x1b\x74\x19\x3c\xa5\xa0\xe7\xb1\
\xf0\xa8\xb5\xf0\xec\xa8\x9e\x6d\xfc\xd1\x4e\xd4\x8d\x51\xc2\x33\
\x02\x7a\x66\x0a\xff\x06\x54\x4f\xfe\x8a\xed\xdb\x57\xe4\xe3\x68\
\xff\x07\x48\x4b\x59\x05\xa3\x1c\x00\x83\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x24\x82\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x00\x00\x00\x01\xf6\x08\x03\x00\x00\x00\x31\x85\xc4\x51\
\x00\x00\x03\x00\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x23\xb7\xe1\x00\x00\x00\xff\
\x74\x52\x4e\x53\x00\x80\x48\x01\xfe\xfd\x03\x02\x05\x0f\x0d\xfb\
\x10\x04\xfa\x0a\xf0\x06\xfc\xf9\x0b\x0c\x11\xf7\x0e\x07\xf3\xea\
\xf5\xf8\xc4\x14\x37\x12\xf1\xf6\x18\xcc\x09\xe6\x1d\xe9\x26\xcd\
\x08\x16\xe7\xec\xe2\xbe\x1f\x2c\x97\x1e\x27\x29\xee\x13\x1c\x19\
\xc5\xef\x1b\x15\xc6\xe0\xce\xde\xe1\x30\xe3\x2d\x36\x21\x31\xc9\
\xf2\xd4\xd0\x1a\xcb\x34\xdc\xed\xd7\xeb\x87\x99\x51\x23\xe8\xc3\
\xf4\xca\xaa\xa4\xdf\xd2\x95\x38\x3a\x22\xc7\x9e\x20\xd3\x88\x28\
\x5f\xac\xd6\x2e\x17\xa5\xc0\xd9\x4e\xc8\x43\xc1\x45\x2f\xdd\xb7\
\xe5\xb9\x44\x62\x55\xe4\x63\x3f\x8c\x3b\x6a\x41\x24\x8b\xcf\x91\
\xb2\x25\x6f\x35\x72\xa9\xb3\x90\xa6\x4b\xc2\xd1\xae\x3e\xa7\x7b\
\x9b\x75\x7a\x92\xbc\xbf\x2b\xb4\xda\x68\xb0\x82\x32\x9c\xba\x94\
\x7c\x96\x3c\x6e\x2a\xb6\x33\xb1\x98\xa8\xaf\x5c\x57\xbd\xa0\x8f\
\x5e\x69\x40\xa1\x81\x7d\x76\x39\x49\x4a\xa3\x53\x5b\xdb\xb5\x3d\
\x7f\x71\x54\x73\x83\x8d\x64\x9f\xd5\x61\xad\x47\x56\x89\x4c\xab\
\x78\xa2\x42\x4d\x93\x86\x77\x85\x8a\x58\x60\x6b\x67\x6c\x4f\x7e\
\x9a\x5a\x59\x8e\x46\x79\x65\x70\x9d\xbb\x5d\x52\xb8\xd8\x6d\x74\
\x84\x50\x66\x14\xa2\xf8\xaf\x00\x00\x20\x32\x49\x44\x41\x54\x78\
\x5e\xec\xd2\x59\x6b\x5d\x55\x1c\xc6\xe1\x17\x76\x72\xf6\x31\xc9\
\x31\x73\xaa\xa8\x19\x30\x0e\x2d\x27\x1d\x62\x9b\x7a\xaa\x36\x5a\
\x2f\x82\x41\x2b\xb4\x06\x5b\xa7\x90\x82\x74\xd0\xa4\x69\xa1\x5a\
\xb1\x17\x8e\x58\xd4\x20\x41\x89\x55\x8b\xa9\xa0\xc5\x81\x80\x53\
\x45\x0a\x52\x10\xa9\x0a\x4a\xeb\x14\x11\x11\x25\x7a\x23\xc4\x1b\
\xe9\x95\x14\x5e\x6f\x0a\xfb\xa6\x8d\x98\xfd\x5f\x87\xbd\xb2\xde\
\xe7\x2b\xfc\x7e\xf0\x4d\xfb\xf2\xc3\x7f\xf6\x20\x54\xd2\x5e\x47\
\x92\xcb\xb7\xfe\xd5\x85\x30\xa9\xff\x19\x15\xeb\x9f\xf8\xb1\x16\
\x01\x52\xff\x44\xe3\x8a\x57\x9f\x8b\x11\x20\xf5\x4f\x5c\x3d\xfc\
\xc6\x66\x84\x48\xfd\x13\x37\xce\x3e\x52\x42\x80\xd4\x3f\x51\x79\
\xd9\x8e\x07\x8a\x08\x90\xfa\x27\x2e\x7d\xfb\xd3\xfe\x08\x01\x52\
\xff\xc4\xc0\x91\xc7\xba\x11\x22\xf5\x4f\xbc\x3e\xb5\xa9\x06\x01\
\x52\xff\x44\xc3\x9e\xd3\x8f\xd7\x63\xc1\x90\xe3\xfc\xff\xee\x7a\
\xef\xeb\x3e\x2c\x0c\x92\xff\x80\xf3\xb2\x74\xf0\xfe\xb5\xf0\x9f\
\x20\x7f\x98\xf3\x35\x32\xf9\x6b\x15\x02\xa6\x03\xc8\xea\xd6\xaf\
\x9e\x2c\xc0\x6b\x92\xff\x88\xa9\xdc\xfc\xec\x3f\xab\xe1\x31\xc9\
\x1f\x60\x5a\x8f\x1e\xfd\x62\x14\x21\xd2\x01\x89\xa1\x6f\x3e\x6b\
\x82\x97\x24\x3f\x4d\x13\x8d\xbb\x5f\x69\xcf\xc1\x3f\x52\x98\xa6\
\x95\xeb\x0e\x7c\x32\x06\xdf\x48\x61\x98\x86\x6e\x38\xf8\x71\x1b\
\x02\xa6\x03\xc8\x8a\x75\x3f\xed\x2d\xc2\x1f\x52\xf8\x99\xd6\x16\
\x1d\x9a\x79\x3f\x86\x27\xa4\x70\x0f\x1d\x58\x36\xfe\xf7\x36\x84\
\x48\x07\x24\x36\xac\xfc\xbc\x05\xd9\x27\x85\x71\xba\x52\x79\xf7\
\xef\x4f\xd5\x23\x40\x3a\x20\x71\xf1\xc3\x27\x76\x46\xc8\x32\xc9\
\x1d\xa1\x5b\x8b\x77\xfd\xb0\x06\xd9\x25\xb9\x97\xe9\xdc\x10\x82\
\xa6\x03\x2e\x41\x86\x49\xee\x05\x3a\xd6\x88\xa0\xe9\x80\xf3\x91\
\x69\x92\xdb\x15\xf6\x00\x92\x1b\xa4\x4b\xd5\x08\x9a\x0e\x68\x40\
\xd6\x49\x7c\x9c\xee\x54\xc2\x03\x3a\x20\xec\x01\x24\xfe\x85\xae\
\x54\xc0\x03\x12\x1f\xa3\x2b\x08\x9b\x0e\x80\x17\x24\x3e\xaa\x01\
\x5c\xd0\x01\x11\xfc\x20\xf1\x77\x61\x0f\x20\xf1\x56\x3a\x10\x23\
\x68\x3a\x20\x07\x6f\x48\x74\x8a\xe6\x0a\xf0\x87\x44\x6f\x69\x00\
\x5b\x3a\x20\x0f\x9f\x48\x74\x90\xb6\xea\xe1\x19\x1d\x10\xf6\x00\
\x12\xcd\xd2\xd2\x85\xf0\x8c\x44\xb7\xd3\x50\x11\xfe\xd1\x01\x61\
\x0f\x20\xd1\x87\x34\x73\x39\xfc\x23\xd1\x4a\x5a\xa9\x85\x97\x74\
\x40\xd8\x03\x48\x34\x45\x1b\x4d\xf0\x92\x44\x5b\x68\xa2\x19\x9e\
\x12\x9b\x03\xaa\xe0\x2b\x79\x27\xf0\x01\xe4\x37\xa6\x77\x01\x82\
\xa6\x03\xce\x83\xc7\x64\x52\x03\xa4\xa3\x03\x6a\xe0\x35\x79\x86\
\xe9\xb4\xc0\x6f\xf2\x7c\xd6\x06\x90\xfd\x83\xb5\xfe\x1c\xd0\x01\
\x63\xb2\x64\x1f\x57\x94\xf3\x80\xce\x3a\xa6\x50\x82\x2d\xe9\xd9\
\x47\x96\xf3\x80\xce\xdb\x98\x46\x1b\x4c\xc9\x7d\x57\x90\xe5\x3c\
\x60\x74\x84\x19\x1a\x40\x56\x2f\x3d\x13\xe0\xb5\x5a\x2f\xfa\xb3\
\x17\x86\xe4\xdd\x3b\xc8\xe4\x00\x0f\xfa\xb3\x0b\x76\xe4\xaa\xc5\
\x64\x39\x0f\x18\xfd\x9e\x19\x1a\x40\xfa\x2e\x62\x82\xdc\xdd\x04\
\xb7\xba\xd2\xf7\xe7\x28\xac\xc8\xce\x01\xd2\xf6\x00\xf7\xfd\xd9\
\x09\x23\xf2\xd0\x32\xb2\x9c\x07\x74\xdd\xc2\x0c\x0d\x20\xfd\x1b\
\x49\xeb\x03\xdc\xf7\xe7\x35\xb0\x21\xad\x3c\x9b\x07\x5d\x1d\xd0\
\x6b\xd3\x9f\xb7\xc2\x86\xfc\xc1\x72\x1e\xd0\xbb\x9d\x36\xd6\xc2\
\x86\xec\xa5\xd9\x01\xf6\xfd\xdd\x0f\x20\xc5\x45\x3c\xbb\x89\xa6\
\x0c\xf7\xe7\x1a\x18\x91\x43\x3c\x87\x89\x66\xd8\x6a\x1b\xa2\x99\
\x2b\x61\x44\x66\x68\x76\x80\x7d\x7f\xf7\x03\xc8\x9b\x3c\xa7\x3b\
\x9b\xb3\xda\x9f\xab\x60\x44\xa2\x81\xb2\x1c\xd0\xf6\x22\x2d\x75\
\xc3\x8a\x8c\xd3\xec\x00\xfb\xfe\xee\x07\x90\x6f\xe9\xfe\x80\x92\
\x71\x7f\x6e\x83\x15\xe9\xe6\x5c\x5a\x2d\x0e\x28\xad\xa7\xb1\x9b\
\x60\x46\xbe\x9c\xfb\x80\x2a\x8b\xfe\x19\x1e\x40\xa6\xe8\xf6\x80\
\xd2\xd3\x34\x77\x2f\xcc\xc8\x26\xce\xed\xda\x74\x07\x74\x38\xe8\
\xcf\xcd\x30\x23\x35\x0d\xc6\x07\xb8\xef\xcf\xfd\xb0\x23\x7b\xe8\
\xee\x80\x8e\x75\xcc\xfa\x00\x72\x92\xb6\x07\xb8\xef\xcf\x31\xd8\
\x91\x97\xf8\x9f\xae\x9f\xdf\x01\x2d\x8e\xfa\x73\x09\xec\x48\xfe\
\x5f\x76\xee\xfc\xb9\xca\xf2\x0e\xe3\xf0\xad\x89\x27\x39\x64\x21\
\x89\x24\x84\x24\x02\x49\x08\x49\x08\xeb\x92\xb0\x46\x93\x90\x18\
\x08\x61\x0d\x68\x81\x04\x03\x04\x64\x55\x0a\x86\x5d\x54\x04\x8d\
\x55\x5c\xb0\x28\x2a\x75\x41\xc6\x05\x51\xa9\x15\xc4\x5a\xeb\x82\
\x62\x5d\xc6\xd1\xaa\xad\x1b\x75\x74\x0a\x63\x8b\xad\x4e\x15\x14\
\xa7\xcc\xdd\xf1\x87\x8e\xf3\x9e\x2d\xe4\x9c\xe7\x7d\xce\xfb\x9c\
\xf3\xbd\xfe\x81\xf3\xc3\xfd\x99\x39\xcb\x7b\xe6\xdb\xc8\x76\x55\
\x64\x06\xb3\x7f\x09\x4d\x08\x40\xbc\x41\x05\x05\xe8\xdc\x9f\x65\
\x50\x48\x7c\xcb\x33\xb0\x20\xd3\x39\xfb\x73\x09\x14\x12\x43\x68\
\x43\x01\x19\x05\x34\x25\x00\x51\xa8\xbe\x80\xc4\x02\xda\x68\x18\
\x54\x12\x47\xa8\xba\x80\xc4\xc9\xb4\xd3\x3e\xa8\x24\x4e\xf2\xcc\
\xb4\x66\x3a\x63\x7f\xee\x82\x4a\x62\x14\xcf\x50\xab\x5b\xc3\xfe\
\xfa\x03\x10\x13\x94\x16\x90\xd8\x44\x9b\x0d\x81\x52\xe2\x39\x9e\
\xa9\xf1\xed\x17\xe0\x6e\xa2\xdd\x6a\xa1\x94\xd8\x4e\x05\x05\x58\
\xf6\x37\x2a\x00\x91\x9e\xac\xac\x00\xf7\x78\xda\xef\x72\xa8\x25\
\x56\x53\x41\x01\xda\xf6\xe7\x5a\xa8\x25\x5e\x62\x07\x34\xf9\x2f\
\xc0\xdd\x4a\x13\x03\x10\xf3\xd8\x11\x4d\x89\xf0\x2d\xb3\x95\x5a\
\xd4\x40\x2d\xe1\x4a\x51\x50\x80\xb6\xfd\x59\x07\xc5\xc4\x0e\x76\
\xc8\x64\x5f\x05\x64\x2e\xa0\x26\x8f\x40\x31\x71\x1d\x83\x2b\xc0\
\xba\xbf\xb1\x01\x88\xad\x0c\xb5\x80\xcc\x0a\x6a\xf3\x14\x54\x13\
\x95\x21\x16\xd0\xa9\x82\xfa\x4c\x81\x6a\xe2\x51\x76\x54\x41\xa2\
\x75\x7f\xa3\x03\x10\x07\xd9\x61\x05\x19\x3f\xef\x3f\x9a\x3a\xdd\
\x02\xd5\x44\x8f\xd8\x60\x0b\xd0\xbf\x3f\xc7\x42\x39\x51\xc5\x60\
\x0a\xb0\xec\x6f\x72\x00\xe2\x23\x06\xa1\x24\x03\x40\x7a\x77\x6a\
\xd6\x13\xca\x89\x8d\x0c\xb2\x00\xfd\xfb\x73\x1e\xf4\x91\x9b\x91\
\x81\x95\x94\xce\x62\x44\x04\x20\x4e\x30\x28\xe7\x52\xbf\x43\x50\
\x4f\xbc\x4c\x63\xb4\x40\x3d\x51\x47\x63\x6c\x81\x06\x72\x33\x32\
\xca\x02\x10\xfb\x69\x8a\x27\x60\x03\xb1\x89\xa6\x78\x13\x36\x10\
\x83\xa3\x3c\x00\xb1\x9b\x86\xb8\x0b\x76\x10\x73\x69\x88\x6b\x61\
\x07\x71\x9a\x86\x38\x0a\x3b\x08\x77\x1a\xcd\x90\xb5\x05\x76\x10\
\x0b\x68\x88\xfa\x16\xd8\x40\x3c\x4c\x53\xd4\x1f\x82\x7a\xe2\x2e\
\x1a\x23\x6f\x1e\x94\x13\xf1\x59\x06\x15\xd0\x13\xca\x89\x07\x69\
\x8e\xbc\xb1\x50\x4d\x7c\x4b\x83\xa4\x28\x2f\x40\xec\xa2\x49\x52\
\x6e\x81\x62\xa2\x90\x26\x29\x9f\x02\xb5\xc4\x0b\x34\x4a\xf9\x53\
\x50\x4a\x9c\xa4\x59\x86\x3f\x02\x95\xc4\x28\x1a\x66\x78\x1d\x54\
\x12\x8b\x68\x98\x86\x1a\x28\x24\x9e\x63\x54\x17\x20\x5e\xa5\x71\
\x06\xac\x85\x32\xa2\x4b\xb2\x81\x05\x5c\x0e\x65\xc4\x6a\x9a\x67\
\x7a\x2d\x54\x11\x7b\x69\xa0\xec\x21\x50\x44\xf4\x64\x54\x17\x20\
\x5c\xe5\x34\x51\xf1\x2e\xa8\x21\x76\xd0\x48\xc5\xfb\xa0\x84\x78\
\x9e\x66\x9a\x39\x0c\x2a\x88\x36\x1a\x6a\xe6\x12\xa8\x20\x2a\x69\
\xa8\xfe\x4a\x0a\x10\x3b\x69\xaa\xc2\x32\x84\x4e\x7c\x48\x63\x15\
\xf6\x41\xc8\xc4\x88\x58\x1a\x6b\xc3\x56\x84\x4c\x94\x30\xaa\x0b\
\x10\x8b\x69\xb0\x6b\xda\x10\x22\xb1\x8a\x26\xbb\x70\x06\x14\x90\
\x9b\x91\x51\x5c\x80\xd8\x43\xa3\x0d\x9c\x8f\x90\x88\xdf\xd0\x6c\
\xd5\xe3\x10\x0a\x51\x47\xc3\x55\x2f\x44\x08\x44\x4c\x36\x0d\x57\
\x19\x52\x01\xe2\x6e\x9a\xae\x79\x30\x82\x27\xae\xa2\xf1\x9a\x4b\
\x11\x34\x51\x4a\xf3\x8d\x09\xa1\x00\xb1\x92\xe6\x5b\x57\x84\x60\
\x89\xc7\x18\x01\xd6\x8d\x44\x90\xc4\x69\x46\x82\xa9\x0a\x0a\x90\
\x9b\x91\x26\x7b\xfd\x7c\x04\x47\x54\x50\x0a\x90\x9b\x91\x11\x60\
\xf7\x24\x04\x43\xbc\xc9\x08\xb1\x72\x14\x14\x90\x9b\x91\xe6\x3a\
\x15\x54\x01\x62\x1b\x23\xc5\xa9\x5e\xe8\x38\x71\x33\x23\xc6\xb4\
\x20\x0a\x10\xfb\x18\x39\x2e\x5b\x8e\x0e\x13\x1b\x18\x39\xde\xef\
\x87\x8e\x12\x6f\xad\xbe\x63\x56\xf7\xd1\x15\x0b\x5a\xc7\x37\x4d\
\x2e\x28\xa9\xfa\xd5\xc4\x3b\xe7\x2c\x9a\xf0\xfe\x65\xd3\x4e\xad\
\xdc\x3d\x75\xdd\x98\xe6\xca\xea\x81\x17\x5e\xb3\xa1\xb0\xff\xcc\
\xe2\xec\xe9\x03\x1a\x86\x97\xa7\xe4\xd5\x67\x35\xe6\xe4\x77\x3d\
\x37\x39\x21\xed\x9c\x58\x3a\xcc\x04\xdd\x05\x88\x98\x38\x57\x52\
\x7c\x6a\xb7\xbe\xbd\x3b\x77\x49\xef\x94\xe9\x4e\xcc\xc8\x1d\xd1\
\xa3\xdf\xf2\x5e\xa3\x26\x9d\x3f\xb2\xa8\x74\xf0\xc2\x71\xf3\x67\
\xb4\x6d\xed\x33\x54\x5f\x01\x17\xc0\x79\xc4\xc8\xa9\xd4\x65\x91\
\x13\x0b\x10\x45\xeb\xa8\xcb\x9c\x1e\x88\x6a\x52\xc0\x9d\x4e\x2c\
\x40\x94\x8e\xa1\x2e\x13\x47\xc0\x79\x44\x69\x73\x74\x17\x20\x06\
\xeb\x2b\xe0\x57\xb9\x70\x1e\xb1\xb0\x92\xba\x54\x45\x79\x01\x52\
\x40\x49\x06\x9c\x47\x8c\xab\xa6\x2e\x05\x4e\x2c\x40\x8c\x1b\x48\
\x5d\x26\x27\xc2\x79\xc4\xfc\x28\x2f\x40\xcc\xb8\x90\xba\x34\xb9\
\x11\xd5\xa4\x80\xf1\x4e\x2c\x40\xb4\x5d\x43\x5d\x5a\x33\xe1\x3c\
\x62\xeb\x06\xea\xb2\x20\xca\x0b\x90\x02\x2a\x3a\xc1\x79\x44\x9f\
\x42\xea\x32\xda\x89\x05\x88\x32\x7d\x05\x74\x4f\x87\xf3\x88\xb2\
\xfe\xd4\x65\x96\x13\x0b\x10\x4b\xf4\x15\x70\x87\x13\x0b\x10\xc3\
\x66\xea\x2b\xa0\x0b\xa2\x9a\x14\xb0\xda\x89\x05\x88\x7d\xc5\xd4\
\x65\x45\x67\x38\x8f\xd8\xa5\xaf\x80\x07\x9c\x58\x80\x18\x92\x4d\
\x5d\x06\x45\x79\x01\x52\xc0\xb3\xbd\xe1\x3c\xa2\x76\x3a\x75\xb9\
\xdf\x89\x05\x88\xcb\xf5\x15\x70\xa2\x2f\x9c\x47\xac\x1d\x40\x5d\
\xf6\x98\x5f\xc0\xa1\xfd\x05\x77\x4e\x9c\xfb\x71\x67\x18\x66\xc9\
\xad\x27\x26\x54\x55\xdc\x9a\x19\xe6\x02\xae\x34\xbc\x80\x7e\xdd\
\xf9\x13\xf2\xbc\x2b\x6f\xee\x03\x53\x74\xde\x3e\xb7\x99\x3f\x21\
\xeb\x9f\x87\x0f\x35\x0d\xd4\xe5\x87\x6e\x30\x58\xc6\x44\xcb\x7d\
\xd4\x1b\x6b\xe0\x7c\x5d\x36\xfd\xd0\x95\x3f\xe3\x97\xf0\xa1\x4e\
\x5f\x01\xeb\x4d\x2e\x60\x3d\x3d\xb4\x1e\xeb\x0d\x47\x1b\xf2\x58\
\x1e\xad\x62\xaf\x87\x0f\x8f\x0c\xa7\x2e\xbf\x35\xb7\x80\x32\x7a\
\x1b\x7e\xfb\x30\x38\x55\xb7\x67\xba\xd3\xdb\x0e\x84\xb9\x80\x37\
\x52\x61\xa8\xcd\xf4\x69\xc5\xd1\x24\x38\xd0\x8c\xcd\x03\xe8\x4b\
\xc2\x28\xf8\xf2\x54\x39\x75\x79\xd0\xd4\x02\x06\xd1\x8f\x95\xdf\
\xc3\x69\x7a\x3d\x9d\x40\x3f\xd6\xc0\xa7\x29\xfa\x0a\xd8\x66\x68\
\x01\x27\xe8\x57\xc5\x13\x70\x92\x4e\x0f\x37\xd2\xaf\x6b\xe1\xdb\
\x2d\x29\xd4\xe5\x26\x33\x0b\x58\xcf\x00\x1e\x1a\x06\xa7\x88\xff\
\x31\x9b\x01\xcc\x83\x1f\x63\xf5\x15\xf0\x50\x3c\x0c\xf4\x23\x03\
\x49\xbb\x78\x14\x9c\x20\xe6\xe0\x54\x06\x52\x98\x0a\x07\x14\x70\
\xc0\xc4\x02\xd2\xcb\x19\x50\xce\x3b\x9d\x10\x76\xd7\x4f\x66\x60\
\xf7\xc0\xbf\x9e\x79\xd4\x65\x87\x89\x05\xfc\x83\xed\x28\x3e\x88\
\xf0\xea\x75\x80\xed\x78\x3d\x17\x01\xcc\xd3\x57\xc0\x5f\x92\x22\
\xf2\xb2\xf7\xec\x1e\x08\xa3\x83\x03\xd8\x8e\x53\x93\x10\xd0\xa1\
\x7a\xea\xb2\xcc\xc4\x02\xae\x98\xc5\x76\x14\x7f\x8f\x70\xe9\x31\
\x9b\xed\x48\xb9\x71\x39\xda\xd1\xa2\xaf\x80\x4f\x92\x60\xa0\xda\
\xc5\x73\x18\xd8\xaf\x13\xc3\x14\x67\x31\x03\xca\x3a\xf0\x41\x67\
\x00\x0e\x2a\x60\x76\x12\x8c\x34\xf2\x3f\x07\xb2\x18\xc0\xd4\x5a\
\xe8\x17\xb7\x98\x81\x4c\x7b\x77\x55\x2a\xce\xcc\x96\x2c\xea\x72\
\x9b\x0b\x86\x4a\x5d\xf5\x75\x36\xfd\x6a\x3c\x0a\xdd\x72\xf7\xd0\
\xbf\x07\xae\x1b\x87\x0e\x78\x42\x5f\x01\xfb\x5d\x30\x56\xb7\xa5\
\x25\xf4\x6b\xb3\xe6\xb4\x6b\x9a\xe9\x4f\xfe\x57\xb5\xe8\xa0\x37\
\x1b\xa9\xcb\xdd\x2e\x18\xac\x65\x7f\x02\xfd\x78\xb6\x07\x34\x7a\
\x26\x9f\x7e\x54\x7f\x91\x8b\x8e\xbb\x4b\x5f\x01\xbf\x74\xc1\x64\
\x93\xde\x69\xa0\x6f\xbb\x4b\xa1\xcd\xbd\xf4\xa3\xfb\x6b\x2e\x04\
\xe5\x5a\x7d\x05\x5c\xe2\x82\xd1\x72\xe7\xa6\xd1\xa7\x81\x65\xd0\
\xe4\x06\xfa\x56\xfd\x36\x82\xf6\xc7\x1c\xea\x72\x38\x0e\x66\xab\
\x1d\x44\x9f\x8a\x6b\xa0\x43\xdc\x4e\xfa\x94\x7f\x56\x6f\xc0\x88\
\x02\x8e\xc4\xc1\x70\x6f\x57\xd2\x97\xf2\x16\xd8\x2f\x7e\x36\x7d\
\x5a\x36\x18\xa1\xb9\x5e\x5f\x01\x2f\x18\x5f\x40\xef\xe3\x39\xf4\
\x21\x67\x8d\xfd\xaf\xbc\x9e\xbe\x4c\x58\x85\x90\xfd\x2d\x9f\xba\
\xfc\x3e\x0e\xa6\x2b\xfa\xc4\x67\x01\x3d\x61\x2f\xd7\xef\xe8\x43\
\xde\x93\x49\x50\x60\x95\xbe\x02\xde\x8b\x83\xf1\x3e\xc8\xa1\xb7\
\xe9\x7d\x60\xab\xa1\xf4\xa1\x69\x3e\xd4\xd8\xa8\xaf\x80\xef\x62\
\x60\xbc\x7d\xef\xd3\xdb\x98\x5e\xb0\xd1\x3d\xf4\x16\xbb\x39\x1e\
\xaa\xac\xe9\x4a\x5d\x1e\x8d\x80\x02\x3a\x5f\x4a\x6f\x55\x9d\x60\
\x9b\xa5\xf4\x96\xfd\x0a\x14\xfa\x46\x5f\x01\x3b\x63\x60\xbe\xeb\
\xd2\xe8\x65\x4f\x3c\x6c\xf2\x4d\x32\xbd\x2c\x2a\x82\x52\x7f\xd5\
\x57\xc0\x57\x91\x50\xc0\x67\x39\xf4\x72\x29\xec\xb1\x2f\x8b\x5e\
\x06\xb9\xa1\xd8\xd9\xe7\x51\x97\x8b\x23\xa1\x80\x29\x33\xe9\xe5\
\x19\xd8\xa1\xef\x1c\x7a\xb9\x24\x15\xca\xbd\xa2\xaf\x80\xa1\x91\
\x50\xc0\xc2\x69\xf4\x54\xdf\x06\x1b\xcc\xa5\x97\xcf\x63\x60\x83\
\x57\xf5\x15\xf0\x78\x24\x14\xd0\x6f\x25\x3d\x15\xa4\x42\xb9\xd3\
\xf4\x72\x1c\xf6\xd8\x7e\x2e\x75\x99\x8b\x08\x50\x54\x49\x4f\xef\
\x42\xb5\x91\x0d\xf4\x74\x03\x60\x7e\x01\x8f\x21\x02\xb4\xf5\xa7\
\x87\xd8\x57\xa0\x56\xdc\x0a\x7a\x1a\x0a\xfb\x7c\xac\xaf\x80\xfb\
\x10\x01\x86\x34\xd0\x43\x76\x2f\x28\xf5\x12\x3d\x1d\x89\x81\x8d\
\x3e\xd3\x57\xc0\x8d\x88\x00\x63\xbb\xd2\xc3\x61\xa8\xd4\xc7\x6b\
\x8f\x6d\x2e\xd8\xea\x8a\x64\xea\x72\x3b\x22\xc0\x55\xf4\x10\xdb\
\x62\xeb\xa9\x92\xa9\x6e\xd8\xec\x9f\xfa\x0a\x78\x0e\x11\x60\x27\
\x3d\x14\xc4\x41\x99\x2b\xe8\x21\x7f\x2d\x6c\x77\x5a\x5f\x01\xef\
\xc2\x7c\xdd\x0a\xe8\xe1\xcf\x50\xa5\xef\x3a\x7a\x38\x06\x0d\x7e\
\xa1\xaf\x80\xcd\x30\xdf\xe0\x06\x5a\x65\xbb\x6d\xfb\x04\xf8\x15\
\xb4\xf8\x3e\x81\xba\xdc\x00\xf3\x9d\x1d\x6b\xd3\x5b\x5b\x69\x3e\
\xad\xaa\xba\x41\x8f\xd7\xf4\x15\xf0\x77\x98\xef\x3b\x5a\x25\x0c\
\x83\x12\xcb\x68\x95\x56\x07\x5d\xde\xd6\x57\xc0\x62\x18\x2f\x37\
\x9b\x56\xb7\x41\x85\xba\x70\x7e\x64\xba\x5a\x5f\x01\x17\xc1\x78\
\x27\x69\x95\xb6\x10\x0a\x1c\xa6\x55\x75\x17\x68\xf4\x61\x1a\x75\
\xf9\x1c\xc6\xbb\xdf\x86\x1f\xb9\xce\x4f\xa6\xd5\xc7\xd0\xea\xa0\
\xbe\x02\xfe\x0b\xd3\xb5\x75\xa5\x45\x56\x22\x42\xf6\x11\xad\x96\
\x41\xb3\xa3\xfa\x0a\x78\x07\xa6\x3b\x4e\xab\x4f\x95\x1f\xaa\x6a\
\x9c\x04\xdd\xfe\xad\xaf\x80\x87\x61\xb8\xf4\x06\x5a\x6c\x48\x45\
\x88\x9e\x74\xc0\xd7\xa5\x7f\xe9\x2b\xe0\x2c\x18\x6e\x2f\xad\x96\
\x22\x34\xae\x31\xb4\xc8\x5f\x8e\x30\xf8\xd3\x39\xd4\xe5\x2d\x98\
\xcd\x5d\x4e\x8b\x39\x08\xcd\x6b\xce\x78\x72\x76\x52\x5f\x01\xc7\
\x61\xb6\x17\x69\xd5\x82\x80\x5c\x08\x6c\x0f\x2d\xce\x9b\x84\xf0\
\xf8\x83\xbe\x02\xf6\xc2\x68\x19\xf5\xb4\xf8\x1a\x3e\xc4\x94\x2d\
\xbd\xe8\xe9\xd9\xf7\x57\x55\xe6\x31\xb9\x78\xda\xe8\x6d\x47\x6e\
\xff\x74\x8d\x1b\x3e\x8c\x48\xa0\xc5\xe3\x08\x97\xa5\xfa\x0a\xb8\
\x07\x46\xfb\x9c\x16\x03\x63\x3c\x37\x7d\xf5\xc5\x2b\xcb\xe9\x2d\
\xf6\xb2\xf7\xbe\xac\x71\xc1\x6a\x13\x2d\x92\x4b\x11\x36\xc7\xf4\
\x15\x70\x2f\x4c\x76\x41\x32\x2d\x7a\xe2\x67\x28\xfa\x62\x0e\x03\
\x69\xdc\xbf\x3d\x29\xc0\xbd\xf2\xc3\x08\xa3\x0f\xf4\x15\xf0\x29\
\x4c\x76\x93\xbf\x47\xdd\xee\x4d\x0f\xc4\xb2\x5d\xd9\xf7\x8d\xc5\
\xff\x65\x78\xc4\xb4\x11\xe1\x74\x55\x2c\x75\xf9\x02\x06\xbb\x9a\
\x16\xcd\xf8\x09\xb0\x71\x76\x57\x9e\xa1\x95\x7b\x73\xf1\x13\x1c\
\xa3\xc5\x86\x38\x84\xd5\x26\x7d\x05\xbc\x0c\x73\xf5\x4d\xa1\xc5\
\x14\x00\xd8\xb2\x82\x1d\x91\xf7\x52\x3a\x00\x3c\xe8\xb0\x7f\x4d\
\xfc\x8f\xbd\x3b\xf1\x8e\xb2\x4a\xd3\x00\xfe\x40\x55\xa5\x42\x56\
\x92\x00\x21\x81\xb0\x93\x44\xc0\xb0\xef\xb2\x89\x09\x02\x0a\x06\
\xc4\x00\xca\xd2\x2c\x22\x20\xd0\x48\x1a\x05\x44\x40\x04\x5a\x45\
\x44\x06\x70\x00\x19\x01\x0d\x2e\xd3\x0d\x46\x5c\x1a\x1a\xb1\x5d\
\x18\xd1\x96\x46\xb1\x55\x7a\xec\x76\x19\xb5\x59\xec\x9e\x76\xeb\
\x69\x7b\x74\x7c\xe6\x1c\x4e\x3b\x73\xc2\x0d\x55\xf5\x7d\xf7\x7e\
\x6f\x7d\xdf\x57\xfc\xfe\x80\x9c\x93\x9c\xa7\x52\x77\x79\xdf\xfb\
\x1e\x97\x4b\xc0\xb7\xfe\xa9\x0b\xf8\x1d\x50\xfa\x10\xad\x6a\x72\
\xb4\x03\xea\xe5\xb0\x86\x79\x88\xb7\x2f\xe5\x12\x70\x0c\x9e\x75\
\x88\x35\x2c\x6e\xf7\x74\x90\x36\x94\x7d\xb9\x8c\x35\xdc\x8f\xf8\
\xfb\x5e\x2e\x01\xeb\xe1\x55\x81\x4e\xac\x21\x44\x9b\x42\x2e\xfc\
\xaf\x78\x4a\x2e\x01\x5f\xc1\xab\x66\xd1\x09\xa1\x91\x70\x83\x2a\
\xb9\x04\xac\x83\x47\x6d\xa1\x13\x2e\x85\x3b\xac\xa6\x98\xbd\xf0\
\xa6\xbc\x02\x5f\xf7\xcf\x9c\xa4\x98\xff\x86\x37\xdd\x4c\x07\x8c\
\x85\x5b\x9c\xa6\x98\x93\xf0\xa4\x47\x68\x5e\xb0\xb7\x9b\x06\x2a\
\x8a\x59\x0d\x2f\x5a\x48\xf3\x66\xc2\x45\xf6\x52\x4a\xf0\x39\x78\
\x50\x66\x86\xdf\x9f\xd2\x58\x47\x29\xc1\x2a\x78\xd0\x25\x34\x6e\
\x39\x5c\xe5\x2b\x4a\x09\x9e\x82\xf7\x5c\x43\xe3\x7e\x0c\x77\x59\
\x4f\x29\xc1\x2f\xe1\x39\xdd\x68\x5a\xc6\x10\xb8\xcc\x31\x4a\x09\
\x1e\x87\x43\xb2\x3c\xb4\x53\xee\xd4\x17\x8e\x08\xa7\xc3\xa6\x6f\
\x29\x25\x78\x3b\x8c\x6b\xb0\xed\xf1\xb9\xad\x82\x13\x5e\x28\x85\
\x79\x9b\xb2\xe9\x80\xb6\x0d\x60\x5c\xc3\xb1\x33\x2e\x0a\x15\x4d\
\x78\xe7\x36\xd8\xb1\x9d\x52\x82\xcb\x60\x56\xf2\xea\xf1\xfc\x87\
\x7d\x29\x30\xac\x34\x9f\x8e\xd8\x17\x86\x61\xef\x77\xe4\x3f\x2c\
\x2d\x85\x0d\x2f\x52\x4a\xe8\x6d\x98\x54\x38\x81\xff\x6f\x6e\x3b\
\x18\xd5\x7f\x85\x57\x9e\xd3\x3a\x9a\xad\xbb\xd2\xfa\x0f\x4a\x09\
\xdd\x08\x83\x26\xd5\x1c\xb1\x3d\x14\x06\x25\x35\xa3\x63\x7e\xe1\
\x60\x0b\x43\xce\x34\xd8\xf0\x05\xa5\x84\x0e\x3a\xb7\x83\xd9\x08\
\x83\xb6\xd1\x39\xbd\xfa\xc0\x9c\xee\xb9\xac\xa1\xa4\xbf\xc6\x0c\
\x4b\x01\xa1\xff\x82\x21\x0d\x0b\x58\x53\xf0\x49\x18\x53\x9e\x41\
\x07\xed\x0f\xc0\x98\xe7\xcd\x9c\x35\x6e\xa3\x94\xd0\xb3\x30\xe3\
\x6f\xea\x90\x0f\x98\x92\xb9\xc0\x2b\x95\x72\x5d\x43\x3c\xc7\x20\
\x8d\x7e\x58\x11\xa1\x23\x30\xe2\x2d\x9e\x2b\xd8\x57\xe6\x04\x48\
\x5f\xc1\x3c\x07\xcb\x96\x6c\xfe\xec\xcf\x28\x25\xfb\xcf\x30\xa1\
\x13\x15\xa7\x60\xc6\x4a\x3a\xad\x6d\x00\x66\x5c\x4d\xc5\x51\xd8\
\xf3\xba\x5c\x02\xfe\xd3\xa1\x5f\xbd\x1a\x46\x84\x2f\xa5\xe3\xfe\
\x08\x23\xda\x51\x55\x05\x9b\xea\xc8\x25\xe0\x17\xd0\xd7\x99\x8a\
\x82\x14\x98\xb0\x8c\xce\x2b\x6a\x08\x13\xb6\x53\x55\xa9\xb1\xa3\
\x94\x92\x7d\x02\xda\xe6\x3b\x75\xd9\x9a\x52\x42\x01\x1f\x3b\x76\
\x61\x7d\xaf\x4e\x0d\x94\x94\xd4\xe5\xd0\x55\x87\xaa\x49\x52\x97\
\xc0\xfa\x72\xbb\x40\xdf\xc5\x21\x2a\x42\x2d\x61\xdf\x77\x72\x09\
\xf8\x0d\x34\x95\x53\xd5\x2b\x1d\xda\xba\x14\x50\xc4\x56\xe8\xbb\
\x9d\xaa\x66\xd0\xf1\xb1\x5c\x02\x5e\x81\x9e\x40\x19\x55\xef\x43\
\xdb\x83\x94\x11\x1a\x07\x6d\x6b\xa9\xda\x0e\x2d\x1f\xc9\x25\xe0\
\x6f\xd0\x33\xc5\x91\xd9\x45\x2d\xaf\xf3\xce\xa4\xad\xcc\x5c\xaa\
\xee\xd1\xfe\x06\x94\x92\x31\x16\x5a\x3e\xa7\x6a\x10\x34\x08\x9f\
\x8a\x5f\xd7\x12\x9a\xc6\x52\x35\x17\xba\x66\xc9\x25\xe0\x7f\xa0\
\x23\xb9\x15\x55\xa5\xd0\x93\x57\x42\xef\x3c\xa2\x73\xc0\x99\xd7\
\xba\x3f\x91\x4b\xc0\x19\xe8\xd8\xea\xc0\x0b\x85\xb7\x53\x4e\x51\
\x16\xb4\x24\x75\xa4\x6a\x2a\xf4\xdd\x29\x97\x80\x4a\x68\xf8\xd4\
\x81\xde\xcb\x2b\x29\xe8\x59\xf3\xdd\xab\x25\x01\x18\x70\x07\xa5\
\xe4\x7c\x00\xfb\xea\x65\x50\xd5\x1d\x3a\x16\x51\xd2\x12\x68\xf9\
\x0b\x55\x15\x30\xe2\x4d\xb9\x04\x6c\x82\x7d\x4b\x8d\x77\xa2\x5e\
\x43\x49\xa9\xcd\xa1\x63\x31\x55\x87\x61\xc6\x5f\xe4\x12\x70\xd8\
\x6c\x8f\xfb\x0e\xe8\x18\x4c\x51\xa7\xa0\xa1\x2b\x55\xf9\x79\x30\
\xe4\x97\x94\x52\xf0\x32\xec\xea\x43\x55\x6a\x1a\xec\x6b\x41\x59\
\x3b\x4d\x57\xad\xbd\x07\x63\xee\x96\x4b\x40\x5d\xd8\x75\x15\x55\
\x7f\xd6\x8a\xbd\xac\x50\x5f\xd8\x37\xc1\xe8\xb2\x52\xf5\xa0\x5c\
\x02\xde\x05\xcc\xd5\x31\x4d\x84\x6d\x81\x5b\x29\x6c\x2f\x6c\xeb\
\x1f\xa4\x22\xb5\x11\x0c\xfa\x90\x52\x72\x3f\x87\x3d\x43\xa8\xba\
\x22\x0b\x76\x3d\x41\x69\x6b\x61\xdb\x73\x54\xed\x87\x51\xff\x2a\
\x97\x80\x5b\x0c\x2e\x84\xeb\xc2\xae\x2a\x4a\xeb\x15\x80\x5d\xdf\
\x08\x3c\xc7\x34\x45\x2e\x01\xb7\x99\xdb\xad\x74\xd3\xa8\x31\x11\
\x37\x04\x36\x35\xce\xa1\xaa\x0b\xcc\x0a\xfc\x9c\x52\x5a\x2f\x34\
\x56\xbe\x79\x6b\x00\x36\x95\x51\xdc\x49\xd8\x74\x82\xaa\x11\x30\
\x2d\xd0\x4d\x2e\x01\xef\x1b\x3b\x0e\xbf\x1c\xf6\x0c\xa5\xbc\xfb\
\x60\xd3\x46\x99\x61\x5d\x81\x0a\x4a\x69\xba\x06\x36\x1c\x30\x58\
\x6e\x77\x23\xe5\x95\xc0\x9e\xe4\xfa\x54\x0d\x83\x79\x81\x3f\xc9\
\x25\xe0\x90\xa1\x2b\xf1\x2b\x35\x16\xbd\xe2\xfa\xc0\x96\xdb\xa8\
\xba\x0c\x4e\x08\x3c\xe0\xea\x04\x34\xc8\xa5\x6a\x9c\xc6\xd5\x82\
\xb4\x35\xe6\xf6\xe8\x1f\xc2\x11\x81\xc7\x28\xe5\xba\x95\x66\xca\
\xe2\x8e\x69\x5c\x04\x48\xfb\xde\x58\x5f\x14\x17\xc2\x19\x81\xaf\
\xe5\x12\xb0\xc5\x48\x05\xc7\x66\xd8\x91\x15\xa2\x10\xfd\xf9\x21\
\x8f\x52\xd5\x2f\x19\x0e\x09\xcc\x90\x4b\xc0\xaf\x4d\x94\xc6\x67\
\x37\x87\x0d\x3d\x19\x0f\xfb\x8c\xf5\xf0\xcc\x87\x63\xc2\x6f\x51\
\x4a\xfe\x1e\x58\xd4\xcc\x54\xe7\x5d\x25\xa5\xe8\x17\x71\x8e\xa0\
\xea\x04\x9c\x13\x3e\x20\x97\x80\x55\x06\xda\xe3\x9e\xd1\x78\x71\
\x44\x58\x41\x18\xd6\xdd\x40\x55\x4e\x0a\x1c\x14\x7e\x9e\x52\xae\
\x98\xa6\xdf\x20\xdb\xb4\x50\xa3\x1a\x48\x58\x9a\xa1\x37\x7e\x97\
\xc0\x51\xe1\x3f\xc8\x25\xe0\x25\xed\x3e\x71\x56\x6a\xec\xac\x84\
\x15\x9b\xe9\x8c\x66\x15\x9c\x15\xfe\x37\x4a\x69\x35\x55\xfb\x93\
\xfb\x35\xac\x9b\xc1\xb8\xe8\x09\xcb\x1a\xa5\x52\x11\x1c\x09\x87\
\x25\x4d\x92\x4b\x40\x0b\x58\x30\x95\xaa\x26\x61\x58\x36\x91\x32\
\xf4\x6f\x2e\x9e\xa5\x6a\x38\x1c\x97\x34\x9f\x52\xea\x5b\xf9\xab\
\x04\x4a\xa8\x5a\x05\xcb\x5e\xa3\x0c\xfd\xe3\x9b\xf7\xe2\x34\xab\
\x3b\x69\xa3\x5c\x02\xee\x45\xec\x2a\xcc\x9c\xaf\xdc\xcc\xb8\x38\
\x03\xab\xf2\xf2\xa9\xda\x05\x01\x49\x5b\x29\xa5\x9f\x85\x04\x1c\
\xa6\xea\x61\x58\xd6\x96\x1a\x24\xdb\x83\x0e\x53\xf5\x2a\x44\x24\
\x3d\x2d\x97\x80\x72\xbd\x4f\x44\x4f\x58\x35\xb5\x6e\x5c\xf4\x87\
\x55\x15\x54\xdd\x01\x19\xe9\x13\xe5\x12\xf0\xa4\xd6\x77\xe2\xef\
\xe1\x29\xba\x6b\x9e\x5f\x43\x48\xfa\x7b\x94\xd2\xeb\x51\x9d\x55\
\xf1\xf5\xf0\xab\x16\x54\x8d\x49\x82\x94\xf4\x39\x72\x09\x18\xa6\
\xb3\x2f\x6e\x03\x9f\xba\x86\xaa\x19\x90\x93\xfe\x0e\xa5\x8c\x2e\
\x45\x6c\xf6\x53\xf5\x3d\x7c\x6a\x2e\x55\x67\xe0\xcf\x04\x74\x5c\
\x64\x7f\x02\xe2\x6b\xf0\xa7\x7b\xa8\x6a\x9d\x09\x49\xc9\xff\xec\
\xb6\x04\x74\xa1\xaa\xe0\x62\xf8\xd2\x67\x54\xed\x83\xac\xe4\x67\
\x28\x65\xcc\x3c\xdb\xf7\xe3\xd9\x97\x7c\xd6\x22\x0c\x5f\x69\x74\
\xe2\xeb\x15\x54\xf1\x6d\x08\x4b\xde\x2d\x97\x80\x21\x3a\xef\x1c\
\x8f\x7e\xfa\xed\x91\xf0\x87\xa4\xa9\x75\xae\x0f\xb1\x56\xa1\x01\
\x90\x96\xbc\x8f\x52\x9a\x74\x45\x74\xc3\x78\x7e\x3f\xba\xf3\xb6\
\x3c\x78\x5c\xff\x65\x13\x7b\xf1\xbc\xae\x85\xbc\xac\xb5\x72\x09\
\xd8\x85\xe8\x2e\x63\x24\x4d\x5f\xdb\xdb\x43\xed\x05\xae\xe3\x46\
\xeb\x70\xae\xbc\x5b\xee\xb8\x92\x11\x7d\x0b\x79\xc8\xda\x40\x29\
\x17\xed\x32\xf1\x9a\xc1\xe4\xa7\xc6\x36\xae\x11\x80\x17\xe8\x46\
\xd9\x35\x07\x4b\xf6\x58\x57\xdd\x9a\xd1\xfc\x14\xf1\x90\xf5\x90\
\x5c\x02\x9e\x30\x33\xeb\x3f\x75\xf3\xb6\xf2\x80\xd2\x51\xe0\x32\
\x87\xf0\x83\xc6\x63\x2b\x06\x31\x06\xbf\x45\x7c\xe4\xbd\x46\x29\
\xe3\x7b\x22\x8a\xf4\x7e\x8c\x4d\x93\xf9\x07\xdb\xe0\xac\x12\xba\
\x86\x7a\x89\x11\x28\xdf\x76\x6d\x2a\x63\xf3\x11\xe2\x24\xaf\x5a\
\x2e\x01\xb3\x11\xc5\x24\xc6\x2c\x78\xd7\xac\x35\x59\xe8\x43\x77\
\xda\x0d\xb4\x39\x38\x7f\x0c\x63\xd7\x02\xf1\x92\xb7\x84\x52\x4a\
\xa2\x25\x60\x39\x2d\xc9\x5f\xfb\x00\xdd\x69\xcc\xac\xbb\x82\xb4\
\xa2\x28\x80\xb8\xe9\xf0\x0d\xa5\x14\xf5\x40\x44\x29\x05\x4c\x50\
\x4f\x21\x8e\x3a\x2c\x95\x4b\x40\x3b\x44\xb4\x84\x09\xea\x65\xc4\
\x53\x87\xc7\x29\xe5\xd6\x7b\x10\xc9\x29\x26\xa6\x2b\xf2\x10\x57\
\x1d\xfe\xea\x92\x04\xf4\x0d\x32\x21\x4d\x44\x9c\x15\xee\xa0\x94\
\x15\x3f\x45\x04\xc3\x99\x90\x8e\x20\xde\x0a\xf7\x53\x4a\x37\x8d\
\x21\xf8\x3e\x95\x9a\x86\x44\x4a\x40\xa4\xa1\xb3\x43\xab\x5b\x33\
\xe1\x14\x7d\x04\x17\xc8\xec\x4c\x21\x4d\x17\xa1\x06\xe5\xda\x64\
\x26\x13\x47\xc1\x8e\xed\xf3\xe0\x0e\x99\x3b\x29\x64\x70\xcb\xa8\
\x17\xa7\xf7\x8d\x66\x02\x58\xf0\xe0\xa6\x4c\xb8\x47\x83\x9b\x28\
\xe4\x9b\x30\xa2\x09\x4f\x7d\x7d\x60\x36\x7d\xac\xd5\x33\x55\x43\
\xe1\x32\x0d\x6e\xa6\x90\x6b\x10\x8b\x46\xcb\x1f\x2b\xa3\x1f\x05\
\xdb\x7e\xbc\x25\x1d\x2e\xd4\x60\x33\x85\xdc\x8b\x18\xed\x5a\xbf\
\x34\x97\xbe\x32\xfe\x0f\x47\x06\xc0\xad\x1a\x5e\x4b\x19\xdf\x20\
\x76\x85\x75\x7f\x39\x97\xfe\x90\xb1\xf3\xf7\xc3\x02\x70\xb3\x86\
\xcd\x28\x63\x15\x2c\xe9\xfe\xe5\x9c\x7e\xf4\xb8\x51\x53\x2a\x1b\
\x42\x91\xa8\x09\xe8\x0c\xab\x92\x56\x3d\x32\x3c\x44\x8f\xca\x5f\
\x7b\x72\x1c\xbc\x21\xe5\x12\x6a\x6b\xbd\xb8\xd9\x92\xf6\x45\xd9\
\xe6\xdf\xd7\xed\xfd\xe9\x5b\x45\xf4\x02\xb5\x76\xc5\x3b\x52\x06\
\x52\x47\xb0\xd9\x17\xf3\x70\x56\xe1\xe1\x29\x9d\x78\x5e\xcd\x60\
\xd3\xbc\xed\x3b\x0a\xe8\x66\x6a\xf5\x9a\xc7\x34\xbe\x9e\xf6\x55\
\xd7\xe8\x06\x4e\xba\x71\xb0\xfe\xbb\x3a\xaa\xcc\x4d\x0f\x2e\xa0\
\x0b\xa9\xf5\xab\x4f\x06\xe0\x41\x8d\x7f\x42\x9b\x06\x6f\xc1\x39\
\x92\x8f\x66\x38\x53\x04\x33\xb4\x6a\x09\xdd\xac\x5f\x8d\x0a\xf6\
\x04\x49\xc0\xce\xe6\x50\xed\xb9\x88\xb5\xea\x04\x4d\xa5\x74\xb3\
\x9b\xe0\x65\xf5\x86\xd3\x86\x03\xe9\xa8\x4d\xf1\xdf\x59\xab\xae\
\xd0\xb3\x8a\x6e\x76\x15\x3c\xad\xde\x04\x1b\x99\x4f\x47\xed\x7a\
\xd4\x67\x6d\xb6\x43\xcf\xbb\x74\xb3\xab\xe1\x6d\x69\x57\xd1\xa2\
\xcb\x06\xe0\x7c\x36\x85\xf4\xa7\x2e\xab\x5e\xa1\x9b\x75\x82\xc7\
\xa5\xb5\xa5\x35\x2b\xad\x4e\xaf\xcc\x68\x0c\x2d\x07\xe9\x66\xbd\
\xe0\x75\x8d\xac\x25\xa0\x1a\x11\xb4\x69\x4a\x15\x2b\xa1\x65\x35\
\xdd\x2c\x03\x9e\xd7\xa8\x3d\x63\x17\x1c\x66\xfd\x1d\xff\xd3\xd0\
\xb2\x9d\xae\x96\x0c\xcf\x6b\x79\x29\x63\xb6\x19\x11\x15\x53\xc5\
\x37\xa0\xe5\x57\x74\xb5\x46\xf0\xbe\x96\x77\x31\x56\x5f\xd8\x18\
\x90\x53\x01\x2d\x6f\xd2\xd5\x8a\xe1\x03\xbd\x47\x30\x46\xa5\x88\
\xec\x3b\xaa\x76\x43\xcb\x53\x74\xb5\xd9\x48\xa4\x04\xe4\xda\x19\
\xe7\x35\x10\x5a\x26\xd1\xd5\xca\xe1\x0b\xbd\xef\x67\x2c\x06\x23\
\x8a\x72\xaa\x46\x41\xcb\x6e\xba\xda\x21\xf8\x43\xf3\x1f\x31\x06\
\x97\x20\x8a\xfe\x54\xb5\x82\x96\x0d\x74\xb5\x85\x48\xa4\x04\x54\
\x23\x8a\xc6\x54\x95\x41\xcb\x46\xba\xda\xe5\xf0\x8b\x01\x33\x19\
\x55\x7b\x44\x31\x8e\xaa\xe1\xbe\x5e\x04\xf6\x84\x6f\x0c\xb8\x92\
\xd1\x14\x21\x8a\x69\x54\x3d\x03\x2d\x9f\xd0\xd5\xfa\xc0\x3f\x2e\
\xfe\x2d\xa3\xc8\x2e\x44\x64\x9f\x52\x35\x05\x5a\x7e\x46\x57\xab\
\x87\x84\x4a\x40\xa5\x8d\x89\x8e\xdb\xa0\x65\x1d\x5d\x2d\x0c\x3f\
\xf9\xf1\x5c\x46\xf6\x18\x22\x0a\x8c\xa7\x6a\x19\xb4\xbc\x4d\x37\
\x6b\x0a\x7f\x69\x73\x35\x23\xba\x28\x0b\x91\xec\xa1\x8a\xef\xfa\
\xb9\x1e\xa0\x04\x09\x96\x80\x93\x88\xe4\x71\x07\xde\xc5\xf8\x9c\
\x6e\xf6\x30\xfc\xa6\xef\xdf\x19\x49\x51\x21\xce\x6f\x8d\x7e\x73\
\x90\x6a\x2a\xdd\xac\x3d\x7c\xa7\xef\x02\x46\xf2\x3a\xce\x2b\xaf\
\x3d\x55\x5c\x0f\x3d\x4f\x5c\xa8\x0a\x16\x36\xf2\x61\x46\x10\x3a\
\x8c\xf3\x79\x8b\xb5\x69\x07\x3d\xc5\x74\xb3\x0d\x48\xb8\x04\xb4\
\x9a\x8d\xda\x1d\x65\x6d\x16\x40\x53\x3d\xba\xd9\x46\xf8\x51\xff\
\x57\x19\xc1\x8a\x47\x51\x9b\x63\x21\xd6\xe6\x4d\x68\x0a\xd3\xcd\
\x2a\xe0\x4b\xfd\x47\x31\x82\xa6\xbf\x81\x22\xef\x00\x6b\xb7\x05\
\xba\xf2\xe9\x62\x77\xc2\x9f\xfa\x44\x4c\x40\xf0\x4f\x23\x51\xd3\
\x2d\x77\x45\xb8\x40\xd6\x92\x7c\x24\x97\x2e\xd6\xb9\x07\xfc\xa9\
\x78\x31\x23\x69\x3d\xab\x18\xff\x27\x69\x4d\x67\x2a\xcc\x8c\xca\
\x4e\x7b\x71\x05\xdd\x2d\xb8\xe1\x50\x42\x26\x80\xbc\xff\xa3\xca\
\x7b\xfb\xd4\x6b\xb7\xf2\xc6\xf9\xa3\xa9\x30\x33\x14\x76\xfa\xdd\
\xf9\xf4\x80\xf6\x47\x92\xe1\x43\xdd\x07\x53\x5b\x70\x11\xec\x9b\
\x36\x27\x44\x8f\x58\xf1\x62\x9a\x1f\x13\x30\x99\xba\xe6\xc3\xae\
\xa4\xe5\xd7\xd3\x4b\xf2\xef\x9e\x0e\xdf\xe9\xa2\x9b\x80\x8c\xe9\
\xb0\x27\x65\xdd\x64\x7a\x4d\xe8\x9d\x69\xf0\x9b\x1b\x06\x51\xcb\
\xcf\x60\x4b\xf1\x27\xf5\xe9\x49\x3f\x59\x9e\xe4\xb7\x04\x5c\x46\
\x0d\x13\x03\xb0\xe1\xc9\xf9\xa9\xf4\xac\xc9\x5f\xa5\xc0\x57\x86\
\x6a\x24\xe0\xd2\x4c\x58\x16\xa8\xbc\x89\xde\x56\xff\xce\x62\xf8\
\xc9\xf4\x4e\xb4\x69\x7c\x31\xac\x2a\x7c\x6e\x01\xbd\x2f\x75\x6b\
\xf9\x85\x04\x90\x05\x2f\xc1\xa2\x36\x6f\x74\xa4\x4f\x6c\x3e\x13\
\x86\x6f\x8c\x2b\xa3\x1d\x7f\x84\x35\x43\x66\xe4\xd0\x47\x5e\x5d\
\x9d\xe9\x9f\x04\xfc\x2e\x44\xab\x72\x8e\xc3\x92\x77\x97\xd2\x6f\
\x46\x7f\xdc\x17\x7e\x51\xb7\x17\xad\x69\xb2\x07\x16\xe4\x2d\x9b\
\x49\x3f\xca\x79\x7e\x11\x7c\x62\xfa\xfd\xb4\x62\x44\x17\xc4\xae\
\xf9\x0b\xe3\xe9\x5b\x3b\x5e\x0e\xc0\x17\x32\xb7\x32\x76\xef\x34\
\x40\xcc\x7a\x74\x6b\x4d\x5f\x9b\x7b\xbc\x03\x7c\x61\x7d\x36\x63\
\x93\x5d\x27\xf6\xd0\x1f\x5a\x1b\xa4\xef\x35\x79\x7d\x00\xfc\x60\
\xf6\xc4\x20\x63\x30\x67\x76\xec\xa5\x1e\xed\x99\x18\x72\x1f\xe8\
\x09\x3f\x28\x7d\x88\xd1\xfc\xb5\x3c\xf6\x52\x8f\x32\x26\x8e\x60\
\xf5\x42\xf8\xc1\x4b\x9d\x19\xc9\xf0\xf7\x11\xa3\xa1\x77\xe7\x33\
\xc1\x8c\x38\x98\x05\x1f\x58\xf9\xf3\xc9\xac\xdd\xe4\x0f\x63\x0f\
\x79\x52\x2e\x2d\x18\x75\xfa\x31\xba\xd2\xa7\xd6\x56\xb0\x9f\xc0\
\x1f\x7a\x1e\xdb\x9f\xc1\x9a\x42\x03\xbf\xe8\x0a\x0b\x7a\x32\x76\
\xd7\x8e\x0d\xe3\x03\xba\xd1\x98\x80\xb5\x3d\xec\x52\xf8\x46\xca\
\xbf\x9f\x7e\xa4\x62\xf7\xc0\xc5\xad\xca\xae\x5a\x5b\xf1\xab\xaa\
\xca\x01\xb0\xe6\x04\x63\x94\xfa\xf4\xe5\x00\x90\x92\x4a\x17\xba\
\xcf\xe2\x29\x56\x11\x14\x89\xea\x0d\xc6\xa4\xd5\x1d\xdd\x71\x16\
\x9a\xd1\x85\x8e\x5b\x3d\xc7\xee\x0d\x45\x82\xda\xc7\x18\x0c\x3a\
\x9a\x82\x1f\xd4\xa1\x0b\xdd\x80\x1f\x74\x9d\x51\xc0\x18\xac\x81\
\x22\x41\x0d\xb6\x5a\x5e\xb5\x87\xee\x33\xca\xf2\x5d\xf6\x5e\x9c\
\x75\x41\x4a\x90\x91\x85\xe6\x4c\x43\x0d\xe9\x57\xd0\x75\xba\xa1\
\x86\xc2\xaa\x05\x8c\xe2\x01\x9c\x75\xc1\x4b\xd6\x4b\xac\x37\xd0\
\x75\x5e\xc1\x39\x02\x1f\xdc\x44\x95\xfa\x62\xe2\x05\xdf\x33\x82\
\xb2\xed\x69\x9e\x98\x1b\x92\x93\x66\xb9\xa2\xf5\xba\xc0\xff\xb2\
\x77\x7f\xa1\x59\xd5\x71\x18\xc0\x1f\x78\xdf\x77\xbd\xe6\x36\x9d\
\xef\xd6\x9f\xb9\x9a\xce\x99\xbc\x95\x9b\xda\xbf\xe1\x62\x9b\x69\
\xb5\x49\x22\x85\x51\x22\xa6\x14\x26\x9a\xb2\x0b\x2f\x5c\x17\xc6\
\xcc\x30\x4a\x13\x6d\x85\x36\x68\x44\x0b\xa4\xd2\x22\x8c\xd4\xf0\
\x5f\x39\xc8\x94\x34\x8c\xa8\xa9\x0d\xb5\xc0\xac\xcc\xba\xea\x42\
\xe1\x89\x5d\x7f\x8f\x9e\xdf\xfb\x3b\x7f\x5f\xbe\xe7\x73\x7f\x2e\
\xce\xd5\x39\xe7\x77\x9e\xef\xf7\xc1\xb0\xc4\x59\x5e\xd3\xc4\x4b\
\x65\x70\x52\x9f\x61\xcc\xf4\xc3\xd1\xf4\xeb\x65\xda\xe7\x63\x58\
\x62\x35\x9d\x65\x76\xad\x33\xbd\x26\x7a\x2b\xe0\x0c\x55\x9b\x66\
\xd2\x19\x9f\xc6\xb0\x44\x13\x9d\xd4\x3c\xfb\x06\xae\xed\x62\x6c\
\x9f\x00\x52\xc9\xf1\x36\x3a\xfa\x13\x09\x00\x2d\x94\x38\xa9\x6f\
\x3c\xae\x67\x6e\xba\xa8\x96\x03\x1d\x5e\x39\x86\xd2\x69\x24\x00\
\x0c\x50\xea\xab\x80\x8b\x05\x8c\x95\x7d\x70\xd1\xf1\x90\xf3\xc2\
\xa4\x04\xbe\xa6\xd4\x0c\x37\x9f\x32\x4e\x72\x23\xe0\x66\x90\x42\
\xe6\x0e\x24\x1c\x5b\x80\x1a\xe1\xaa\x25\xcd\x18\xd9\x08\x57\xbb\
\x9d\x1b\x86\x12\xd8\x6a\xb7\x4e\x74\x2a\x63\xe4\x67\xb8\xea\xa4\
\xf4\x23\x12\xc8\xe6\x28\x6c\x80\xbb\x3d\x8c\x8f\x7c\x16\xae\x2a\
\x46\x52\xb8\x8c\x04\xee\xa1\x74\x12\xee\x2a\xf2\x8c\x8d\xe5\x30\
\x30\x87\xc2\x7b\x48\xe0\x6f\x4a\xe3\x60\x60\x90\x71\x91\x5e\x02\
\x03\xbd\x14\xf2\x48\xe0\x1c\x85\x74\x25\x0c\xcc\x4f\x33\x26\xd6\
\xc0\x44\x1f\xa5\xc9\x48\xf4\x5b\x77\x4b\x7e\xc4\x98\xf8\x0f\x26\
\x4e\x51\x7a\x09\x89\x59\x14\xbe\x28\xae\xf2\x90\x86\x32\x98\x98\
\x46\xe9\x3b\xa8\x37\x82\xd2\x76\x18\xc9\x4e\x62\x2c\x7c\x0f\x33\
\x75\x14\x96\x42\xbd\x4f\x28\x75\xc3\xcc\x09\xc6\x41\x4d\x2b\xcc\
\x3c\x48\x61\x22\xd4\xdb\xef\xa1\x83\xbd\xb2\x91\x31\xd0\xe3\xa1\
\xf4\x74\xc2\x0d\xd0\x6e\x27\x85\x9b\x4b\x8a\xa9\x45\x32\xd7\x02\
\x43\xeb\x29\xdd\x09\xed\x6e\xf7\xd2\xbc\x54\x5a\x57\x4c\x1d\x11\
\xc7\x28\x1d\x87\x76\xa3\x29\x7c\x09\x63\xbf\x32\x6a\x23\x97\xc0\
\x54\x29\xa5\x43\x50\xee\x3e\x4a\x27\x60\xac\xb5\x86\x11\xfb\x05\
\xe6\x1e\xa1\xb0\x0d\xca\x9d\xa2\xd4\x05\x73\x97\x19\xad\xcc\x0b\
\x30\x77\x94\xc2\x13\x50\xee\x5f\x4a\xf5\x30\x57\x3b\x9a\x91\xfa\
\x10\x05\x58\xee\x78\xea\xad\xdb\x5b\x14\x9e\x44\x21\xde\x65\x94\
\xaa\x9b\x51\x80\x23\x94\xda\xa1\xdb\x1c\xaf\xe5\xb2\xb7\xce\x64\
\x84\xbe\x42\x21\x3a\x29\x9d\x84\x12\xe6\x29\x89\x0d\x16\xcb\x05\
\x22\xb2\x76\x14\x0a\x51\x51\xee\x74\xb7\xaa\x75\x52\x7a\x19\x85\
\x69\x8b\x6b\x16\x58\x5a\x46\x61\x2a\x54\xdb\x4d\xa9\xbd\x68\xfa\
\xc4\xef\x4a\x79\x7f\xe3\x69\x84\x6a\x83\x7e\xbc\x17\x2f\x62\x34\
\xd2\xef\xf8\xf1\xcd\xd3\x0c\xcd\x3e\xf3\xe3\xcb\xb8\x76\x2c\x23\
\xb1\xd3\x97\x53\x8f\x7f\xa0\xd9\x5a\x8b\x34\x88\xd4\xcd\x28\xcc\
\xa8\x82\x60\x91\x09\xb9\x02\xc5\x6e\xf2\xe9\x74\xfc\x34\x43\x65\
\x1f\xe7\xaa\xa3\x70\x10\x8a\xad\xf3\xe9\xff\xd8\xe4\x46\x86\xee\
\x2f\x9f\xfe\x7d\xbe\x0e\xc5\x2e\x52\xba\xdf\x2e\x5b\x1e\xb6\x29\
\xb7\xc0\xc2\xbd\x14\x6e\x2b\x83\x5e\x4f\x59\xa6\x41\xa4\x6d\x0c\
\xd9\x4f\xb0\xb1\x9e\xd2\x03\xd0\xeb\x79\xcb\x34\x88\x34\x7e\x06\
\x43\xf5\x1a\xac\x1c\xa3\xb4\x07\x6a\xa5\xaa\x29\xfc\x00\x3b\xed\
\x39\x86\x68\x41\x19\xac\x94\x52\xba\x0a\xb5\x3a\x28\x6d\x86\xa5\
\x0b\x0c\x4f\x7e\x2e\x2c\x4d\xa1\xb0\x0b\x6a\xfd\x41\xa9\x0b\xb6\
\x7a\x19\x96\xcc\x10\x6c\x1d\xa5\xb0\x18\x6a\xed\xa0\x54\x0f\x5b\
\x55\xf3\x18\x92\x1d\x10\xbc\x64\x42\x6e\x84\x56\x5b\x28\x8c\x85\
\xbd\xf7\x1f\x63\x28\xfa\x53\xb0\x76\x89\xd2\x61\x68\x35\x8f\xc2\
\x5e\x78\xd0\x55\xce\x10\x2c\xac\x84\xbd\x47\x29\xfd\x0e\xa5\xee\
\x1e\x43\xa1\x07\x5e\x1c\x49\x33\x70\x1f\xb4\xc2\x83\x6c\x39\x85\
\x8f\xa1\xd4\x6c\x8b\x34\x48\xe4\x09\xc1\x86\x0e\x78\xb2\x8c\xc2\
\x6a\x28\xb5\x22\x80\x8c\x64\x0f\x83\x55\xf7\xb0\xff\x29\xd8\x26\
\x28\xf5\x0a\x85\xcc\x28\x78\x93\xda\xc2\x20\xe5\x86\xe0\xd1\x19\
\x4a\xd3\xa1\xd3\x1a\x0a\xdf\xc2\xab\xec\x33\x0c\xce\x84\x01\x78\
\xf5\x36\xa5\x01\xe8\xd4\x10\xc8\xa4\x54\xc9\x52\x06\xa5\xfa\x40\
\x30\xb3\x70\xe7\xa1\x52\x6d\x40\xb3\x92\xa9\x1e\x06\xa3\x69\x5c\
\x40\xd3\xb0\x8b\xa0\xd2\xab\x81\x4d\x4b\x6f\x67\x10\xf2\x2f\x06\
\x35\x0f\xbf\x15\x2a\x6d\x0a\x6e\x5f\xc2\x95\x34\x7d\xf7\x7f\x7b\
\x57\xf7\xd2\x64\x1c\x46\x37\xd4\x6a\x7e\xf4\x41\x17\x12\x66\xb0\
\x22\x09\xc3\x12\x1d\xb3\xa6\x37\xba\x17\x03\xcd\x54\x88\x39\xf4\
\x62\x68\x5f\x0a\x13\x26\x5d\x18\x86\x13\x8d\x49\xf3\xc2\xa9\x15\
\x54\x84\x95\x1f\xa5\x84\x06\xea\x8d\xcc\x10\x07\x2d\x82\xba\x90\
\xc0\x70\x88\x03\x03\x03\xc1\x21\x0c\xba\xad\x1b\x85\xb6\xf7\x61\
\xb5\xf9\x7b\xb6\x8b\xf3\xbe\xcf\x1f\x70\x38\xcf\x73\xde\x9b\x1f\
\x87\x73\xf2\xf5\x6c\x89\x18\xc7\x0f\x41\x0a\x60\x92\x31\x31\x65\
\x4b\xf8\xab\x70\xbb\x8d\x31\x13\xa7\x10\x52\x00\x95\x9c\x99\x49\
\x45\x77\xc5\xde\xff\x65\x06\x67\x2a\x56\x00\xf1\xfe\x07\x4f\x92\
\x6e\x10\x51\xa3\x11\xd9\x2c\x97\xb9\xc0\x9b\x8b\x67\x42\x14\xc0\
\x43\xda\x0d\x22\x6c\x92\xde\x0a\x8b\x92\x95\x66\x44\x12\xcf\x27\
\x2b\xc7\xf0\xc6\xcc\x9e\x9c\x7a\x4f\x12\x73\x7f\x8b\x96\x3b\x1b\
\x57\x42\x14\xc0\x17\x62\xd5\x36\xb1\x10\xa9\x75\x22\x82\x60\xb7\
\x04\x13\x37\x11\x20\x1a\x40\x01\xb4\x92\x6e\x10\xc1\x63\xdf\xb7\
\x59\xd8\xab\xa5\x01\xc4\x7a\x42\xa6\x01\x05\x30\x46\xba\x41\x44\
\x4f\xaa\x6e\x5f\x1d\xa3\xdd\x7e\xd1\xb4\x69\x4f\x48\x2f\xde\xfd\
\x73\x88\xc3\xb8\x39\x80\x46\xde\xc4\xfe\xf6\xeb\x61\xa8\xf5\x22\
\x3d\x21\x3a\x3c\x01\xd4\x12\x0b\xef\xe2\x81\xba\xf1\x3a\xb6\x10\
\x68\x53\x3a\x0f\xf5\x46\x39\xd6\x29\x3c\x01\x74\x11\x2b\x77\x71\
\x81\x7d\x3f\x1b\x7d\x04\xb0\x8e\x2d\xb9\xa1\x8d\xf0\x99\xe0\x09\
\xc0\x4d\xbb\x41\xb8\xe6\xe6\x62\x66\x34\xe7\xef\xd8\x60\x2b\x73\
\xa1\x3d\x21\x05\x70\x02\x78\x45\xba\x41\x18\x47\x1b\xbc\xfe\xbf\
\xf1\xbf\x4f\x77\x18\x9b\xfd\x69\x4f\x88\x0f\x4e\x00\x44\xac\xcb\
\x0a\x37\xe6\xb3\xe5\x7f\x6b\x20\xbb\x33\x58\xcd\x4c\xfd\x1c\x01\
\x3b\x8c\x76\x7f\x1b\xb1\x04\x75\x1c\x70\x9b\x3f\x0c\x45\xc8\x97\
\x36\x36\xf9\x8e\x24\x26\x21\xfd\x39\x9a\x00\xec\xc4\xfa\x3f\xc5\
\x07\xfa\xc4\x8c\xd5\x79\xbf\x3c\xfc\xc7\xbf\x66\x59\xf3\x37\x73\
\xc2\x46\x6e\x3e\xaf\x42\x13\xc0\x40\xa2\xdb\x33\x34\x13\xeb\x66\
\xeb\xfb\x25\xe7\x0f\xcf\x78\xe0\xa3\x23\xf7\x40\x82\x5b\x52\xd2\
\x8e\xa1\xb9\x41\x50\xfa\x73\x14\x4f\x08\x3d\x8b\xf2\x15\xd4\xc0\
\x90\x77\x10\x02\x98\x00\x13\xc0\x1c\x72\x87\x9e\x46\x71\x85\xa9\
\x1e\x41\xb7\x68\x4a\x4a\x7d\x18\xd1\xa4\x3c\x82\xc3\xfe\x85\xfc\
\x15\x14\xad\x46\xda\x20\x2b\x0a\x28\x01\x62\xef\x92\x07\x05\x29\
\x5d\x31\x56\x24\xf6\x7d\xe1\xec\x1f\x10\x00\x58\x3f\x41\x79\x0e\
\x12\xfb\x5f\x44\x3a\x22\xda\xfc\x0c\x31\xed\xd6\xeb\xb1\xd8\xab\
\x43\x4d\xa1\xb7\x54\x80\x5f\xcf\x5f\x9e\xa0\x8b\x85\xc8\x55\x19\
\xc6\x02\x15\xe4\x67\x2e\xdb\x5d\xc0\x61\xaf\x1e\x8f\xfd\xe6\x5e\
\x58\x4c\xf6\xa0\x41\x85\xfa\x55\xbc\xab\xea\xab\x6c\x30\x55\x63\
\xb2\xb7\xb7\x96\xd4\x34\xde\xee\xbf\x8c\xc6\xfb\x0f\xca\x8f\xcb\
\x0d\xe2\x5d\x1e\x58\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x00\xd1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x02\x03\x00\x00\x00\xbe\x50\x89\x58\
\x00\x00\x00\x09\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\x00\x00\
\x00\x7e\x51\xa9\x5d\x00\x00\x00\x02\x74\x52\x4e\x53\x00\x80\x9b\
\x2b\x4e\x18\x00\x00\x00\x75\x49\x44\x41\x54\x78\x5e\xed\xd6\xa1\
\x0d\xc0\x30\x0c\x44\xd1\x8c\xe6\x11\x3d\x84\x89\x6f\xca\x4a\x56\
\x51\xd5\xf4\xc3\x82\xdc\x07\x0e\x79\xf0\x40\x96\xee\xd6\x23\x29\
\xe6\xfd\x00\x49\xa0\x08\x34\x01\x05\x81\x24\x50\x04\x7a\x03\x86\
\xcf\x0d\x02\x49\xa0\x08\x34\x01\x05\x81\x24\x50\x04\x9a\x80\x82\
\x40\x12\x28\x02\x4d\x40\x41\x20\x09\x14\x81\x26\xa0\xdf\x81\x81\
\xc1\x74\x20\xd8\xf4\x0f\x30\x58\x9b\xce\x03\x7a\xe9\x2c\xe0\x3d\
\x78\x0f\xde\x83\xf7\xe0\x3d\x30\xf0\xff\x41\x17\x36\x28\x6e\xa1\
\x4b\x8a\x16\x57\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x03\xeb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x03\x00\x00\x00\x9d\xb7\x81\xec\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x11\xb7\x00\x00\x11\xb7\x01\
\x9a\x63\x53\x2b\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x35\x50\x4c\x54\
\x45\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\xca\xfd\x3f\x34\x00\x00\x00\x66\x74\x52\
\x4e\x53\x00\x01\x02\x03\x04\x05\x06\x07\x0d\x10\x11\x12\x13\x16\
\x18\x19\x1a\x1b\x1f\x23\x25\x28\x29\x2e\x2f\x30\x31\x32\x34\x38\
\x3d\x3e\x43\x47\x48\x49\x4a\x4b\x4d\x54\x55\x5a\x5f\x60\x63\x66\
\x67\x68\x6d\x73\x79\x7f\x83\x84\x86\x8c\x90\x93\x98\x99\x9a\x9b\
\x9f\xa0\xa4\xa5\xa9\xab\xb2\xb5\xb8\xbc\xbd\xbe\xbf\xc0\xc4\xc9\
\xcb\xce\xd0\xd1\xd7\xd8\xd9\xdd\xe1\xe2\xe3\xe8\xea\xeb\xee\xef\
\xf2\xf3\xf6\xf7\xfa\xfc\xfd\xfe\x2c\x4b\x04\x5d\x00\x00\x01\xb6\
\x49\x44\x41\x54\x58\xc3\xed\xd7\x6b\x37\x02\x51\x14\x06\xe0\x48\
\xee\x94\x5c\x32\x72\x49\xc8\xb5\x46\x91\x4b\x22\x85\xe4\x3e\x31\
\xe4\x52\x42\xda\xff\xff\x27\x28\xd5\x4c\x3a\xbb\x99\xb3\xcf\xea\
\x8b\xb5\x7a\xbf\xce\xda\xcf\x3a\x73\xe6\x3d\x39\x2c\x96\x76\x5a\
\x97\x9e\x15\xd9\x38\x6b\xfd\xc6\xc0\x19\x98\xe5\xc9\x6a\x08\x14\
\x4f\x24\xe3\x84\xc1\x65\x08\x40\xc4\xe4\x1d\x65\x90\xfe\x05\x30\
\x91\x7a\x61\x73\x6a\xe7\x07\x0e\xd0\xfd\x0f\x99\x02\x33\x1b\x72\
\x05\x58\xfd\x46\xe6\xbf\xe6\x2a\x40\x38\xe0\xb1\xa1\xe3\xdd\xbf\
\x1d\xa8\xec\x81\xd3\xcb\x66\xb8\xba\x07\xa5\xa4\x1d\x18\x10\x02\
\x1d\x30\xda\xc4\x72\x12\xd8\xa3\x4b\x80\x67\x85\x03\x50\x55\x80\
\x1c\xf6\x48\x01\x90\x6b\x9f\x71\x16\x39\x05\xee\xda\x67\x94\x4a\
\x4b\x30\x01\x36\xd1\xaf\xe0\xe3\x07\xf6\x51\x20\xc8\x0f\x38\xe3\
\x0a\x9b\x68\xe9\x20\x4b\x57\x1f\x9a\x97\x4b\x38\x9a\xef\x41\x93\
\xd8\xb3\x7f\x96\x94\xb6\x51\x81\x60\xc3\x4b\x79\x0c\x00\xf7\xde\
\x1d\xfb\x16\x6f\x0d\x40\x46\x51\x6e\x0f\x47\x51\x60\xaa\x00\xbc\
\xc9\x8e\x61\xc0\x0d\xf0\x27\x8a\x00\x23\x84\x79\x48\x22\xc0\x32\
\x05\x88\x21\xc0\x2e\x05\x08\x20\xc0\x35\x05\x98\x64\x01\xdb\x27\
\x61\x3e\x6f\x65\x81\x69\xca\x02\x52\xdc\xa7\xb1\x49\xc2\x08\x10\
\xa7\x00\x4b\x08\xf0\x48\x01\x06\x59\x60\x88\x32\xaf\x22\x87\xc9\
\x27\x52\xa3\x7a\x60\x47\xa4\x46\xf5\xc0\x05\x05\x70\xb1\x80\x35\
\x4f\x98\x7f\xef\x64\x01\x17\x65\x01\xe7\xc8\x2f\x92\x9f\x02\x6c\
\x23\xc0\x91\x50\x8d\xea\x80\x7b\xa1\x1a\xe9\x40\x5f\x51\xa8\x46\
\x3a\x30\x2f\x56\x23\x1d\xd8\x12\xab\x91\x0e\x24\xc5\x6a\xa4\x01\
\x1d\x59\xb1\x1a\x69\xc0\xb8\x60\x8d\x34\x60\x5d\xb0\x46\x1a\x10\
\xa1\x00\x8b\x08\xa0\x50\x80\x01\x16\x38\x2e\x10\xe6\x1f\x90\x3f\
\xef\xaa\x68\x8d\x6a\xd7\x3c\x52\xfc\xc8\x45\x93\x92\x4c\x2f\x72\
\xd5\x25\xe4\xd5\x8b\x5c\xb6\xcb\x09\x98\xfc\xdf\x55\x7d\xbe\xd0\
\x65\x69\xa7\xa5\xf9\x01\x88\x34\x42\xd9\x0a\x21\xed\x1d\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0d\x8f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x03\x00\x00\x00\x6b\xac\x58\x54\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x06\xec\x00\x00\x06\xec\x01\
\x1e\x75\x38\x35\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\xf4\x50\x4c\x54\
\x45\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xc5\x17\xe8\xfd\x00\x00\x00\xfb\x74\x52\x4e\
\x53\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\
\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\
\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\
\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\
\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\
\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\
\x5f\x60\x61\x62\x63\x64\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\
\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\
\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\
\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\
\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\
\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\
\xc0\xc1\xc3\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\
\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\
\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\
\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\x0c\xb6\xe8\x0b\
\x00\x00\x09\x06\x49\x44\x41\x54\x18\x19\xed\xc1\x7b\x7c\x53\xe5\
\x01\x06\xe0\x37\x49\x93\x16\x0a\xbd\x60\xd1\xca\xa5\x54\x14\xac\
\x4e\xb1\x15\x10\x1d\x83\x21\x15\x11\x15\x9d\x0c\xa8\x0a\x0a\x82\
\x80\x9b\x02\x05\x45\x65\x3a\x99\x0a\x3a\x54\xc0\x51\xe5\x56\x36\
\x44\xc1\x0e\x54\x1c\x78\x81\x75\x54\x26\xcc\xa1\xa0\x72\x11\x6c\
\x9d\x5c\x0a\xd2\x0a\xad\x5c\x7a\x49\xd3\xe6\xfd\x67\xfa\xa3\x4a\
\xdb\x7c\x27\xf9\x9a\x1c\x93\xc3\x39\xdf\xf3\x00\x8a\xa2\x28\x8a\
\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\
\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\x62\x5c\x09\x69\xd7\
\x74\x8d\x83\x35\xd9\xfb\xce\xdd\x53\xc5\x1f\x54\xec\x7c\xe6\x2a\
\x1b\x2c\x26\xfe\x99\x52\x36\x54\x3c\xbd\x05\x2c\xc4\x35\xe9\x28\
\x9b\x2a\x1e\x63\x87\x55\x74\xde\x49\x91\x2d\xc9\xb0\x86\xcc\x63\
\x14\x2b\xee\x01\x2b\x18\xe5\xa1\x96\xaa\x9b\x61\x7e\x7d\x6a\xa8\
\xed\x64\x37\x98\x5d\x4a\x29\xfd\xd9\xd7\x16\xe6\x66\xdb\x4a\xff\
\xf2\x11\x21\xae\xc9\x6f\x6d\xd2\xcf\xaa\x71\x0e\x08\xdd\xc5\x40\
\x6e\x44\x44\xb8\x3e\xa1\xbe\x36\xd8\x20\x10\xb3\x9f\x81\xec\xb0\
\x23\x12\x26\x53\x6f\xb7\x43\x60\x0a\x03\x1b\x81\x48\x78\x93\x7a\
\x7b\x09\x02\x9f\x33\xb0\x0d\x88\x84\x5c\xea\xed\x69\xf8\xea\x4c\
\x09\x35\x09\x88\x80\x01\xd4\x99\x27\x03\xbe\xa6\x52\xc6\x08\x44\
\xc2\xb4\x1a\xea\xa9\x62\x0c\x04\xd6\x53\xc6\x32\x44\x44\xc7\xe1\
\x13\xf4\x33\xe4\x3c\x88\xec\xa4\x8c\x7c\x98\xd6\x51\xca\xd8\x05\
\xb3\x72\x7a\x29\xe3\x28\xcc\x2a\x91\x52\xdc\x30\x2b\x9b\x9b\x32\
\x0e\xc2\xb4\x0e\x50\xc6\x56\x98\xd6\x56\xca\x58\x0b\xd3\x7a\x85\
\x32\xfe\x0c\xd3\x1a\x42\x19\xbf\x84\x69\xc5\x56\x32\xb0\x6f\xec\
\x30\xaf\xb7\x19\xd8\x42\x98\x58\x1f\x06\x54\x7b\x09\xce\x6a\x49\
\x19\xfd\x2e\x6b\x6b\x87\x96\xb7\x19\x48\x2e\xce\x4e\xf1\x83\xc6\
\x3d\xb5\x6c\xe3\x97\x95\xfc\x41\xed\x37\x9f\x6d\x58\xfe\xdc\x94\
\x9e\x36\x34\x95\x56\x4b\xff\x2a\xdb\xc3\x47\xf4\x90\xd5\x95\x95\
\x65\x87\x3e\x18\xd5\x12\x06\x15\x7f\xf7\x3a\x37\x05\x4a\x96\x0d\
\x8f\x47\x63\x4f\xd1\xbf\x29\xf0\x91\xfd\x1d\xeb\x9d\xc8\x89\x85\
\xf1\x24\x8c\x7a\xc7\x4d\x4d\x9e\x4d\xd3\x7e\x81\x06\x6c\x6b\xe9\
\xcf\x32\xf8\x98\xc5\x06\x3e\x4f\x85\xb1\xd8\xef\x7c\xd7\xcd\x40\
\xf6\x3d\x7f\x3e\x7e\xd2\x7a\x37\xb5\x6d\x76\xa1\x09\xfb\x02\x36\
\xf2\xed\x35\x30\x92\x7e\x9f\x51\x4a\xd5\x5f\xda\xe3\x47\x1d\xb6\
\x51\x4b\x41\x1b\x34\xb5\x98\x4d\x14\x39\x61\x18\x17\xbc\x41\x69\
\xd5\x2f\x75\x40\xbd\x16\xaf\x53\x2c\x27\x0a\x4d\x65\xd3\xc7\x44\
\x18\x44\xab\x59\xd5\x6c\x0e\xf7\x82\x14\xd4\xcb\x3e\x41\x5f\xdf\
\xde\x03\x1f\x83\x6a\xe9\xe3\x68\x6b\x18\x81\x6d\xf4\x37\x6c\xae\
\x9a\xc5\xa9\x38\xad\xed\xfc\x1a\x36\x56\x39\x2b\x0e\x3e\x2e\x3d\
\x4e\x81\x2b\x61\x00\xa9\x5b\x19\x8c\x9a\x97\xda\xe2\xb4\x8b\x66\
\x7e\xc1\x33\xb6\x3f\xde\x01\xbe\xce\xf9\x8a\x22\xfd\x11\x79\xbd\
\x4a\x18\xa4\xe3\x0f\x47\xa3\xde\xc5\x93\xe7\xe5\x6d\xfa\xbc\x60\
\xe5\x9c\xdf\x77\x82\x48\xfc\x87\x14\x1a\x82\x88\x1b\x5a\xc9\xe0\
\xed\xbb\xdd\x06\x19\x17\x7e\x41\xb1\xeb\x11\x69\x0f\x7b\x19\x92\
\x8f\x7a\x23\xb0\xbe\x47\x29\x56\x68\x47\x64\x45\x2d\x61\xc8\xd6\
\x74\x43\x00\xa3\xdd\xd4\xf0\x00\x22\x2b\x3e\x9f\x3a\xf0\xfe\xfd\
\x12\xf8\x91\x38\x9f\x5a\xbe\x6b\x85\x88\x4a\xdd\x4d\x7d\xd4\xbd\
\xd6\x05\x1a\x5c\x53\x8e\x51\xd3\x03\x10\x68\x37\x7b\xfd\x26\xdd\
\x14\x2c\xee\x03\x4d\x6d\x0a\xa9\x9b\xda\x37\xb3\x62\xe1\xcb\x76\
\xfb\xff\xa8\x6d\x29\x04\x2e\x2d\xa3\xae\xbc\xe3\xa1\x21\xaa\x80\
\xba\xaa\x5c\x3d\xbc\x25\x1a\x72\xfc\x6a\xe6\x2e\xfa\xb1\xc5\x05\
\x81\x0f\xa8\xb3\x8a\x44\x88\x2d\xa0\xee\x2a\x3e\x58\x34\x75\xf0\
\xc5\xce\xa8\xa4\x8b\x7a\x5c\x37\x76\x55\x39\xfd\x2a\x4e\x86\x40\
\x54\x35\xf5\x36\x00\x42\xf7\xf3\xe7\x52\x47\x19\x55\x3d\x20\x74\
\x8c\x7a\xeb\x09\x91\x01\x1e\xfa\xf7\x65\x5e\xce\x8c\xfb\xef\x9a\
\x38\x33\x77\xdd\x57\xfc\x39\x8c\x80\x58\x0e\x75\xb6\xd7\x09\x81\
\xae\xe5\xf4\xa3\x7c\xc5\x3d\x29\x38\xa3\xe3\xc8\xdc\xfd\xd4\xd9\
\x6c\x68\x68\xf5\x2e\x75\x55\x94\x01\x81\x84\xbd\xd4\xb6\x3f\xbb\
\x15\x9a\x5a\x4e\x7d\xfd\xd5\x0e\x4d\x57\x8e\x9e\xa0\x9b\x71\xfd\
\x63\x20\x10\x95\x4f\x4d\x7b\x47\x44\xc1\x47\xba\x97\xba\x9a\x6b\
\x43\x44\x4d\xa3\x16\xef\x8b\x2d\x20\xf0\x4f\xea\x6a\x06\x22\xcb\
\x75\x88\x1a\x0e\x64\x42\x64\x00\xf5\xe4\x9d\x8c\x08\x1b\x45\x0d\
\xef\xc6\x43\xc4\xf6\x29\x75\x54\x3b\x06\x91\xb6\x83\x62\x79\x4e\
\x08\x8d\xa4\x8e\xdc\x43\x11\x69\x31\x14\x5b\x62\x87\x50\xf4\x3e\
\xea\x67\xe7\x15\x88\xbc\x6a\x8a\xbc\x00\x0d\x0f\x52\x37\xde\x39\
\xd1\x30\x80\x12\x0a\xbc\x0e\x0d\x89\x65\xd4\xcb\xc1\xfe\x30\x84\
\x4f\xe9\x6b\x7b\x0b\x68\x78\x8e\x7a\x59\x99\x00\x63\x18\x4e\x1f\
\x25\x29\xd0\x90\x52\x4d\x7d\xec\x19\x06\xc3\xf8\x0f\x9b\xa8\xe9\
\x03\x2d\xcb\xa9\x8b\xed\xc3\xec\x30\x8e\x5e\x95\x6c\xc4\x93\x05\
\x2d\xe9\x75\xd4\xc1\x87\x83\x60\x2c\xdd\x0f\xb2\x81\xea\xc1\xd0\
\xb4\x81\x21\x73\xaf\xeb\x0b\xc3\x49\xde\xc2\x9f\x9c\xca\x84\xa6\
\x01\x94\x51\xfe\x64\xfe\x09\x8a\x1d\x58\x74\x6b\x2b\x18\x91\x2d\
\x73\x65\x35\x7f\xb0\xe7\x89\xce\xd0\x64\xdb\x4e\x19\x53\x00\xfb\
\x15\x13\x96\x17\xb1\xb1\xd2\x4d\xd3\x2e\x87\x81\x25\xde\xf8\xdb\
\x91\xf7\x5e\x09\x7f\x46\x52\x46\xa1\x0b\xa7\x9d\x7b\xf5\x0d\x59\
\xe3\xa7\xcd\x7a\xf9\xd5\x79\x0f\xdd\xd1\xa7\x73\x34\xce\x7a\xce\
\xaf\x29\xe3\x36\x98\xd5\x78\xca\xf8\x37\xcc\x2a\xfa\x00\x25\x78\
\xbb\xc3\xac\xee\xa7\x8c\x57\x60\x56\x31\x87\x29\xa1\xa2\x3d\xcc\
\x2a\x9b\x32\x66\xc0\xac\x5a\x96\x50\xc2\xa1\x58\x98\xd5\x34\xca\
\x18\x0d\xb3\x6a\xfd\x2d\x25\x6c\xb3\xc3\xac\xfe\x40\x19\xbf\x86\
\x59\xc5\x97\x51\xc2\x1a\x98\xd6\x0c\x4a\xa8\xe9\x02\xb3\x4a\x3c\
\x4e\x09\x73\x60\x5a\x33\x29\xe1\x58\x22\xcc\x2a\xe9\x24\x25\x4c\
\x82\x69\xcd\xa6\x84\xaf\x5d\x30\xab\xf3\x2a\x28\xe1\x2e\x84\x95\
\xad\xeb\xe0\xdf\xe8\xe6\xc6\x14\xf8\x33\x97\x12\x76\xda\x11\x4e\
\xed\x36\x51\x57\xab\xe2\xa0\xa9\x5d\x15\x25\xdc\x82\xb0\xda\x48\
\x9d\x2d\x84\xa6\x1c\x4a\xd8\x8c\xb0\x4a\xa5\xde\x2a\x1c\xd0\x90\
\xe2\xa6\x84\xbe\x08\xab\x5e\xd4\x5d\x02\x34\xe4\x51\xc2\x7b\x08\
\xaf\xd8\x0a\xea\x6c\x0f\x34\x5c\x4b\x09\xde\x74\x84\xd9\x38\xea\
\xcb\x33\x10\x62\x51\xbb\x29\xe1\x75\x84\xdd\x0d\x6f\xed\xde\xa3\
\x9b\x1d\x2b\xbb\x43\xc3\x14\x4a\xf0\x5c\x04\xb3\x4a\x3e\x4e\x09\
\x0b\x60\x5a\x79\x94\x50\x79\x3e\xcc\x6a\x22\x65\x3c\x0b\xb3\xea\
\xe7\xa1\x84\xf2\x44\x98\x54\x4a\x29\x65\x3c\x02\x93\x8a\xd9\x46\
\x19\x87\x5b\xc2\x9c\x6c\xaf\x51\xca\x7d\x30\x27\xe7\x0a\xca\x59\
\x92\x04\x33\x6a\xf9\x3e\x65\x95\x67\x3b\x61\x3a\x6d\xb6\xb0\x19\
\xf6\xdc\x04\x93\x69\xbf\x8b\xcd\xb3\xfe\x12\x98\xc9\x4d\x87\xd8\
\x5c\x9e\xa7\x5d\x38\x2b\x38\x92\xd2\x7a\x77\x81\x5f\x49\x2b\x18\
\x8c\x1d\x19\x30\xbe\x6e\xff\xf0\xf2\x7b\x9f\x64\xb7\x83\xa6\x3b\
\x4a\x19\x9c\x9a\x27\x9c\x30\xb6\xce\x79\x5e\xd6\xab\xcb\xbf\xcd\
\x01\x91\xcb\xd7\x32\x78\xdb\x2f\x87\x91\xa5\x1e\x66\x43\xfb\x1f\
\x3e\x07\x4d\xc4\x8e\xfd\x88\x21\x71\x4f\x77\xc0\xb0\x92\xf6\xb2\
\x89\xaa\xa5\x19\x38\xc3\xd1\x73\xd1\x09\x86\xac\x20\x19\x06\xe5\
\xfc\x2f\x05\x4e\xee\x7a\x27\xe7\xc1\xa1\x83\x27\xe7\xbc\x5f\x58\
\x43\x5d\x1c\xc9\x84\x31\x0d\x63\x98\xd4\xfd\xc9\x0e\x23\x2a\x60\
\xc8\x4a\x72\x2b\x28\xa3\x20\x19\xc6\x93\xc6\x90\x95\x5f\x81\xc4\
\x47\x8a\x29\xe1\x48\x26\x0c\x67\x12\x43\x75\xea\x1a\x7c\xcf\x39\
\x72\x1b\x03\xab\x9b\x61\x87\xc1\x3c\xc4\x10\x55\x67\xa2\x5e\xbf\
\xb5\x5e\x06\xb4\x31\x19\xc6\xf2\x28\x43\xe3\xb9\x15\x67\x74\x79\
\xb9\x82\x81\x1c\xe9\x0f\x43\x79\x9c\x21\xa9\x1b\x81\x46\xce\xcd\
\xad\x63\x00\x75\x77\xc3\x48\xae\x63\x28\xdc\xf7\xa0\xa9\xee\x9b\
\x19\x40\x6d\x16\x8c\x64\x27\x83\xf7\x55\x77\x08\xdc\x59\x4c\xff\
\x3c\xb7\xc0\x40\xc6\x32\x68\xab\xe3\x21\x14\xfb\x74\x15\xfd\x3a\
\x79\x21\x8c\x23\xa6\x90\xc1\xa9\xfe\x1d\x34\x5d\xf0\x26\xfd\xda\
\xe2\x80\x71\x9c\xbb\x95\xc1\x28\xcc\x80\x3f\x03\x0f\xd0\x9f\x3f\
\xc2\x40\x62\xdf\x61\xb3\xd5\xe6\xb6\x86\x7f\x71\x4b\xe8\x87\xa7\
\x27\x0c\xc4\xf1\xa2\x87\xcd\xe2\x5d\x95\x86\xc0\x06\x1e\xa0\xb6\
\x8f\x60\x28\x9d\xe6\x9d\xa2\xbc\x75\xe9\x90\x12\x97\x4b\x6d\xc3\
\x61\x2c\x6d\x1e\x2b\xa1\x9c\x8d\x57\x43\xda\x0d\x07\xa9\xa5\xc8\
\x05\x83\x89\x99\x50\xc8\x80\x4e\xae\xba\x16\xcd\x11\x9f\x47\x2d\
\x93\x60\x38\xf6\xa1\x05\x55\xf4\xa3\x74\xe9\xcd\x31\x68\xae\xfb\
\xaa\x29\x76\x34\x1e\x06\x14\xdd\x67\xfa\xfa\x13\x14\xf9\x7a\x6e\
\x5f\x07\x82\x91\x51\x44\xb1\x67\x11\xb4\xa8\x4e\xa9\x21\x4b\x89\
\x82\x06\x47\x8f\xec\x35\xfb\xca\xdc\xac\x77\x6a\xc7\x9a\xe7\xef\
\xbb\xbe\x33\x82\x16\xb7\x9a\x42\x55\x1d\x11\x9c\x73\x56\x54\x51\
\x07\x95\xcb\x12\xe0\x8f\x33\xb1\x63\x5a\x8f\x7e\xbd\x93\x11\xba\
\xf9\x14\x7a\x05\x41\xb1\x6d\xa4\x4e\xde\x43\x98\xd8\xdf\xa0\x88\
\x37\x1d\xc1\xb8\x8a\xba\xe9\x86\x30\x89\xf9\x92\x22\xf9\x08\xc6\
\x9d\xd4\xcd\x10\x84\x4b\x3f\x0a\xf5\x46\x10\xd2\xa9\x9b\x34\x84\
\x4d\x2e\x45\xd6\x21\x18\xab\xa9\x93\x57\x11\x3e\x89\x65\x14\xf0\
\x5e\x86\x20\x44\xcf\x2e\xa6\x0e\x0e\xce\x72\x21\x8c\x1e\xa5\xc8\
\xab\x08\x4e\x74\x4c\xc8\x5c\x08\xaf\xd8\x23\x14\xf0\x74\x82\x65\
\x4c\xa4\xc8\x7c\x58\x46\xf4\x7e\x0a\x54\xb6\x85\x65\x8c\xa1\xc8\
\x53\xb0\x0c\xc7\x5e\x0a\x94\xb5\x82\x65\x64\x51\x64\x2a\x2c\xc3\
\xf6\x19\x05\x0e\xb9\x60\x19\xc3\x28\x32\x16\x96\x61\x2f\xa2\xc0\
\x5e\x3b\x2c\x63\x02\x45\x86\xc2\x32\x62\x4a\x28\xf0\x31\xac\x63\
\x3a\x45\xae\x83\x65\x24\x9c\xa4\xc0\xbf\x60\x1d\x2f\x50\xa4\x07\
\x2c\xa3\x43\x0d\x05\xde\x80\x75\xfc\x8d\x02\x75\x5d\x61\x19\x97\
\x7a\x29\xb0\x10\xd6\xb1\x96\x02\x07\x61\x1d\xbd\x29\xd2\x05\xd6\
\xb1\x99\x02\xe3\x61\x1d\xb7\x50\xe0\x35\x58\x87\xed\x0b\xfa\x9a\
\x07\x0b\x19\x4d\x5f\x77\xc3\x42\x9c\xc5\xf4\x71\x19\xac\x64\x2a\
\x9b\x2a\x72\xc0\x4a\xa2\xb7\xb1\x31\x4f\x2f\x58\xcb\x05\xe5\x6c\
\xe4\x31\x58\xcd\x60\x2f\x1b\x58\x60\x87\xe5\xdc\x5b\xca\x1f\x1d\
\x1e\x04\x2b\x6a\xfd\xc4\x09\x7e\xcf\xfb\xf1\x93\x6d\x60\x51\x71\
\xe9\x99\x59\x59\x6d\xa1\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\
\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\xa2\x28\x8a\
\xa2\x28\x8a\xa2\x28\x8a\x62\x71\xff\x07\xec\xc8\x9a\xe0\x08\x32\
\x3b\xff\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\xcc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x03\x00\x00\x00\xf4\xe0\x91\xf9\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x03\x76\x00\x00\x03\x76\x01\
\x7d\xd5\x82\xcc\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\xf0\x50\x4c\x54\
\x45\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\xe5\xf8\x92\xe5\x00\x00\x00\x4f\x74\x52\x4e\x53\x00\x01\x03\
\x04\x05\x06\x07\x08\x09\x0b\x0c\x0d\x0f\x11\x15\x18\x19\x1c\x1d\
\x25\x2d\x33\x39\x3b\x3e\x46\x49\x4a\x4c\x4d\x58\x5a\x5f\x62\x63\
\x67\x69\x70\x77\x7f\x80\x82\x84\x8a\x8c\x8f\x97\x98\xa4\xa7\xab\
\xb0\xb2\xb3\xb6\xbb\xbe\xc1\xc3\xc8\xce\xd2\xd8\xd9\xda\xde\xe0\
\xe3\xe4\xe7\xe8\xf2\xf3\xf5\xf7\xf8\xf9\xfd\xfe\xad\xb0\xf5\x75\
\x00\x00\x02\xf3\x49\x44\x41\x54\x78\xda\xed\x9a\x69\x57\xe2\x30\
\x14\x86\x41\x51\xa0\xb8\x14\x50\x40\x64\x51\x29\x2a\x3b\x8a\xb2\
\x0b\x88\x42\x59\x84\xfb\xff\xff\x8d\x33\xc7\x33\x03\x4d\x43\xdb\
\xd0\x2c\x73\xe6\xe4\xfd\xfc\xde\xdc\x87\x34\xb9\x09\x49\x3c\x9e\
\x3d\x15\x4c\xe7\x4b\xf5\xf6\x40\xd7\x07\xed\x7a\x29\x9f\x0e\x7a\
\xb8\xea\xec\xf6\x65\x01\x06\x2d\x5e\x6e\xcf\x78\x65\x3f\xd2\xba\
\x2b\xc0\x68\xd5\xd5\x8e\x38\xa4\xf7\x66\xdf\x61\xa7\xde\xb3\x5e\
\xd6\xf9\xaf\xfa\x60\xa9\xfe\x15\xd3\xf4\xe1\x16\xd8\xaa\x15\x66\
\x97\x3f\x36\x01\x07\x9a\xc4\x58\xe5\xbf\x59\x82\x23\x2d\x6f\x98\
\xa4\x3f\x28\x83\x63\x95\x0f\x18\xe4\x6f\x00\x81\x1a\xf4\x09\xca\
\x40\xa4\x32\xf5\xef\x0f\x84\xa2\x3c\x0e\x62\x4b\x52\x80\x25\xd5\
\xb9\x10\x9e\x00\xb1\x26\x34\xeb\x41\x0b\xf6\x50\x8b\x62\xfd\x85\
\xbd\x44\xad\x2a\x7b\xf1\xf5\x7f\xd9\xcc\x25\xa2\x8a\x12\x4d\xe4\
\x9a\xf8\x11\xd2\xa7\xb5\x32\x65\x71\xad\x4f\xb5\xd0\xc6\x11\xd2\
\xa6\x38\x4f\x96\xd2\xfa\x8f\x59\x7f\xbf\x0a\x8a\xd1\xa4\x14\xbe\
\x30\xab\x33\x9d\xfd\x81\x86\xf9\xf9\x71\xb3\x2d\x8e\xe9\x04\x8d\
\x0a\x40\xd7\xd4\xee\x5b\x04\xe7\x8b\xbc\x99\x8c\x5d\x2a\xfb\x3f\
\xd3\xfe\x6b\x1a\xc1\x3b\x23\xa6\x3e\x58\xd1\xd8\x27\xde\x99\xbe\
\x7f\x7c\x97\x35\x6e\x1a\x07\x77\x14\x00\x5e\xd1\x46\x0b\xbb\xbd\
\x05\xd4\xfb\xea\x3e\x7f\x70\x81\x7e\x00\x65\xb7\x59\x41\x3f\xc2\
\xc2\xfd\xff\x85\x34\xd1\xc8\x36\xcd\x98\xb4\x6b\x80\x3c\x5a\xff\
\x42\x56\xee\x10\x5a\x13\xf3\xae\x01\x4a\x48\x8b\x4d\x6b\x7b\x13\
\xb1\x97\x5c\x03\xd4\x91\x16\x73\xd6\xf6\x1c\x62\xaf\xbb\x06\x68\
\x23\x2d\x26\xac\xed\x09\xc4\xde\x76\x0d\x30\x40\x5a\x8c\x5a\xdb\
\xa3\x88\x7d\xe0\x1a\x40\x47\x5a\x54\xac\xed\x0a\x62\xd7\x5d\x03\
\xa0\xf3\x8a\xb6\x5f\x02\x48\x00\x09\x20\x01\x24\x80\x04\x90\x00\
\x12\x40\x02\x48\x00\x09\xf0\x2f\x03\x1c\x27\x6b\x9d\xd1\x0c\x5c\
\x6a\x36\xea\xd4\x92\xc7\x7b\xa4\x57\xab\x3a\x50\x93\x5e\x55\x09\
\xd3\xfb\x8b\x73\xa0\xaa\x79\xd1\x4f\x92\xff\xbc\x07\xd4\xd5\x3b\
\x77\x9e\xff\xe2\x03\x18\xe8\xe3\xc2\xf1\xef\x67\x92\xff\x17\x81\
\xc3\x3e\xf0\xf7\x80\x91\x7a\xce\xc6\x41\x11\x98\xa9\xe8\x68\xfe\
\xcd\xd9\x01\xcc\x9d\xcc\xc6\x2a\x30\x54\xd5\x41\xfd\xd3\x59\x02\
\xe8\xf6\x35\x31\x09\x4c\x95\xb4\x05\xa8\xb1\x05\xa8\xd9\x02\x74\
\xd8\x02\x74\x6c\x01\x46\x6c\x01\x46\xb6\x00\x33\xb6\x00\x33\xe2\
\x0d\x08\x6d\x49\x00\x09\x20\x01\xfe\x2f\x80\xe1\x63\x4a\x55\x53\
\x0f\x43\x41\x00\xeb\x4a\xe0\x27\x24\x50\x59\x8b\x00\x58\x5f\x6f\
\x82\xae\xd7\x02\x00\x2a\xdb\x51\x15\xfe\x00\xc3\xc0\x76\x54\x60\
\xc8\x1d\xe0\xd1\x18\xf6\xc0\x1d\x20\x65\x0c\x4b\x71\x07\x40\x36\
\xd8\x2a\x77\x80\x53\x63\xd8\x29\x77\x80\x4b\x63\xd8\x25\x77\x80\
\x7b\x63\xd8\x3d\x77\x80\x4f\xdf\x76\x94\xef\x93\x7f\x21\x7a\x3a\
\xdc\x04\x1d\x3e\x89\x58\x0b\x9e\xff\xf6\x81\xef\x59\xcc\x6a\xd8\
\xf8\x13\x43\xf2\xde\x95\xea\x72\xec\x26\x46\x02\x48\x00\x09\x20\
\x01\x24\x80\x04\xa0\x02\xa0\x12\x6e\x48\xa9\x03\x64\x7e\x42\x32\
\xc2\x00\xc6\x27\xbf\x23\x4e\xc6\xe2\xce\x07\xc6\x19\x55\xcd\x10\
\xe5\xb7\x07\x10\x7e\x50\x29\xfc\xa8\x56\xf8\x61\xb5\xf0\xe3\x7a\
\xe1\x17\x16\xc2\xaf\x6c\x84\x5f\x5a\x89\xbf\xb6\x13\x7e\x71\x29\
\xfe\xea\x56\xf8\xe5\xb5\xf8\xeb\x7b\xf1\x0f\x18\xc4\x3f\xe1\x10\
\xff\x88\x85\xdf\x33\x9e\x6f\x19\xba\x0e\x2b\x6c\xc7\xd6\x01\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0e\x27\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\
\x00\x00\x0d\xee\x49\x44\x41\x54\x78\x9c\xed\x9d\x7f\x70\x54\xd5\
\x15\xc7\xbf\xe7\xee\x26\x81\x40\x44\xa7\x06\xdb\x69\x19\x9d\x5a\
\xaa\x56\x11\x45\x42\x07\x34\xec\xbd\x9b\xa4\x19\x10\xad\x62\x0d\
\x56\x4b\xed\x0f\xad\x38\x62\xc7\xa2\x23\xad\xad\x62\xa7\x6a\xeb\
\x74\xb4\x3f\xac\x3a\xda\xfa\x63\x68\x3b\x55\xc0\x5f\x20\x41\x48\
\x76\xdf\xdd\xc4\x88\x12\x05\x07\x74\x14\xb5\x6a\xb5\xb6\x4a\x50\
\x71\x48\x14\xb2\xbb\xf7\xf4\x8f\x6c\x28\x09\x81\xec\xdb\xf7\xde\
\xee\xdb\xdd\xf7\x99\x61\x98\x85\x77\xcf\x3d\xc9\xfd\xee\xfd\x71\
\xee\xbd\xe7\x91\xd6\x1a\x01\xe5\x8b\x28\xb4\x03\x01\x85\x25\x10\
\x40\x99\x13\x08\xa0\xcc\x09\x04\x50\xe6\x04\x02\x28\x73\x02\x01\
\x94\x39\x81\x00\xca\x9c\x40\x00\x65\x4e\x20\x80\x32\x27\x5c\x68\
\x07\x4a\x05\xa5\xd4\x31\x00\xce\x60\xa2\x69\x60\x3e\x15\x44\x06\
\xcc\x9b\xc1\xbc\x99\x88\x3a\x2d\xcb\xfa\x77\x81\x5d\x1c\x11\x0a\
\x42\xc1\xce\x50\x4a\x85\x99\x68\x09\x98\x97\xe0\xe0\x5f\xa8\x3d\
\x60\xbe\x25\x91\x48\xdc\xcd\xcc\x9c\x4f\xff\x46\x23\x10\x80\x03\
\x1a\x1a\x1a\x8e\x4d\x33\xdf\x0f\xe6\x13\xb3\x2c\xb2\x89\x80\x1f\
\x58\x96\xf5\xbe\xa7\x8e\xd9\x20\x98\x03\xe4\x88\x52\x2a\x9c\x36\
\xe6\xcf\x36\x1a\x1f\x00\x66\x30\x70\x17\x11\x91\x67\x8e\xd9\x24\
\x10\x40\xee\x5c\x05\xe0\xe4\x1c\xca\xcd\x8e\x44\x22\xdf\x77\xdb\
\x99\x5c\x09\x04\x90\x03\x52\xca\x49\x0c\x5c\x9d\xb3\x01\xa2\x1b\
\x1b\x1b\x1b\x3f\xe7\xa2\x4b\x39\x13\x08\x20\x37\x66\x02\xa8\x70\
\x50\xbe\x3a\x95\x4a\x4d\x73\xcb\x19\x27\x04\x02\xc8\x05\x21\x4e\
\x73\xc1\xc6\x29\x2e\x78\xe2\x18\x5f\xc6\x01\x94\x52\x5f\x82\x10\
\xdf\x63\x63\x66\x83\xe8\x7d\x18\xf3\x0a\x55\x56\xae\xb3\x36\x6c\
\x78\xb1\xd0\xbe\x01\x00\x98\x4f\x75\xc1\x4a\x20\x80\x91\x50\x4a\
\x7d\x85\x89\x9e\x80\x31\x47\x01\x00\x98\x01\xa2\xb9\x9c\x4c\x5e\
\x2d\xa3\xd1\x7f\x02\x58\x05\x63\x56\x69\xad\xdf\x2a\xa0\x9b\x7b\
\x1d\x5b\x30\xc6\xb9\x0d\x17\xf0\x9d\x00\x18\xb8\x1b\xcc\x47\x8d\
\xfc\x9f\x7c\x2c\x80\xa5\x20\x5a\x2a\xa5\xdc\x0c\x21\x56\x71\x3a\
\xfd\x58\x22\x91\xe8\xc9\xab\x93\x44\x5b\xc0\x3c\xd3\xa1\x15\x5f\
\xf4\x66\xbe\x12\x40\x24\x12\x99\x01\x20\xbb\xee\x75\x20\xe4\x3a\
\x8d\x84\xb8\x09\x40\x02\xcc\x8f\x85\xc3\xe1\x58\x7b\x7b\xfb\x07\
\x9e\x3a\x09\x80\x98\x37\xbb\x10\xce\x0b\x04\x30\x1c\x0a\x85\x72\
\x19\x17\x05\x00\x05\x22\x95\x4a\xa7\x59\x4a\xb9\x95\x88\xda\x8c\
\x31\x1b\x3a\x3a\x3a\xb6\x78\x11\x7a\x35\xc6\x74\x91\x10\xbd\x00\
\xc6\xe7\x68\xe2\xa3\x74\x3a\xbd\xa5\xb3\xb3\xd3\x4d\xb7\x72\xc2\
\x57\x02\x00\x30\x72\xd7\x9f\x3d\x04\xa2\xa9\x0c\x4c\x25\x21\xae\
\x01\xb0\x13\xcc\x31\x84\x42\xed\x15\x42\xc4\xdb\xda\xda\x3e\x71\
\xc3\xc9\x44\x22\xd1\x03\xa2\x1b\xc0\x7c\x7b\x6e\x4e\x62\x49\x67\
\x67\xe7\x6e\x37\x7c\x71\x8a\xaf\xf6\x02\x94\x52\x17\x30\xf0\x27\
\x8f\xcc\xa7\x00\x6c\x05\xf3\xb3\x00\xba\xc3\xe1\xf0\x26\xa7\xc3\
\x85\x94\x72\x05\x88\xa2\x36\x8b\xad\xd4\x96\x75\xb9\x93\x7a\xdd\
\xc4\x57\x3d\x00\x33\x77\x81\x28\x05\x6f\xfc\x0a\x03\x98\x06\xa2\
\x69\x00\x90\x4a\xa7\x21\x95\x7a\x07\xcc\xcf\x01\xd8\x44\x44\xdd\
\x5a\xeb\x97\xed\x0c\x19\xe1\x70\xf8\xf2\x94\x31\xbf\x03\xf3\xdc\
\xac\x0a\x10\x3d\x5c\x55\x51\x71\x6d\x2e\xce\xcf\x9d\x3b\xb7\x6a\
\xdd\xba\x75\xfd\x6e\x0f\x69\xbe\xea\x01\x00\x40\x35\x34\xfc\x82\
\x8d\xb9\xaa\x40\xd5\xf7\x82\x79\x1b\x80\xd7\x00\xbc\x0a\x60\x7b\
\x38\x1c\xde\x3e\x5a\x4f\x21\xa5\x6c\x01\xd1\x6f\x00\x1c\x76\x90\
\x47\x76\x82\x79\x89\xd6\xba\xd5\xae\x43\x91\x48\x64\x1a\x85\x42\
\x8b\xc1\x3c\x0f\x00\x40\xb4\x8d\x98\x97\x59\x96\xf5\xb4\x5d\x5b\
\x23\xe1\x3b\x01\xd4\xd5\xd5\x55\x8c\xab\xa9\xb9\x15\xcc\x0b\x01\
\xf8\x65\xd7\x6c\x17\x88\xb6\x03\xd8\x0e\xe0\x55\x18\xb3\xad\xaf\
\xaf\xef\xf9\xee\xee\xee\xe4\xe0\x03\x4a\xa9\xf1\x18\x08\xee\x9c\
\xc2\xc0\x54\x30\xa7\x10\x0a\x6d\x45\x3a\xfd\x62\x55\x55\xd5\xd6\
\xf5\xeb\xd7\xf7\xd9\xa9\x30\x12\x89\x7c\x81\x42\xa1\x5f\x82\x79\
\xfe\x88\x0f\x30\xff\x48\x6b\xfd\xa8\x83\x9f\x09\x80\x0f\x05\x30\
\x88\x94\x72\x26\x80\xc5\x99\x31\xd6\x49\xdc\xdd\x2b\x3e\x05\xd1\
\xd3\x10\xe2\x01\xdd\xde\xde\xe6\x96\xd1\xba\xba\xba\x8a\xf1\x87\
\x1d\x76\x05\x1b\xb3\x04\x40\xf5\x21\x1e\xdd\x43\xc0\x19\x96\x65\
\xbd\xed\xa4\x3e\xdf\x0a\x60\x90\xfa\xfa\xfa\x9a\x50\x28\x34\x07\
\x42\x9c\x03\x66\x05\x3f\x8a\x81\xf9\x3e\x22\x5a\x66\x59\xd6\x9e\
\x5c\x4d\x28\xa5\xc2\xcc\x3c\x07\x44\xd7\x03\xf8\x72\x96\xf5\xfe\
\x41\x6b\xfd\xab\x5c\xeb\x04\x8a\x40\x00\xfb\x53\x5f\x5f\x5f\x13\
\xae\xaa\x3a\x93\xd3\xe9\x6f\x02\x90\xf0\x93\x18\x98\x37\xf7\xf5\
\xf5\x9d\xb9\xff\xb0\x90\x0d\x52\xca\xc9\x44\x74\x11\x03\x0b\x00\
\xd4\xda\xac\x73\x9b\xd6\x5a\xd9\x2a\x33\x8c\xa2\x12\xc0\xfe\x34\
\x35\x35\x4d\x48\xa5\x52\x73\x18\xf0\x8f\x18\x88\xee\xd2\xf1\xf8\
\x0d\x87\x7a\x64\xf6\xec\xd9\x47\x08\x21\x4e\x80\x10\x27\x81\xf9\
\x5c\x00\x75\x0e\x6a\x7c\x53\x5b\xd6\x0c\x07\xe5\x8b\x57\x00\xfb\
\xb3\x4f\x0c\xcc\x67\x81\x68\x16\x80\x9a\x02\xb9\xc2\x00\xba\x01\
\xf4\x81\xf9\x53\x00\x9f\x02\xf8\x0c\x40\x1a\x44\xc7\x02\x38\x01\
\x76\xbf\xe5\x87\x82\xe8\x39\x1d\x8f\x9f\xe9\xc4\x84\xaf\xe2\x00\
\xb9\x92\x89\xf0\x3d\x04\xe0\xa1\x96\x96\x96\x50\xcf\xae\x5d\x53\
\x38\x95\x9a\x05\xe0\x74\x18\x33\x03\x44\x47\xe4\xc9\x15\x02\x30\
\xf0\x8d\xcc\xc7\xb1\x3f\x63\x56\x39\x35\x51\x12\x3d\xc0\xa1\x20\
\x22\x52\x4a\x7d\xcd\x18\x33\x2b\xd3\x3b\xcc\x04\x70\x64\xa1\xfd\
\x72\x0c\xd1\x0b\x55\x15\x15\xf3\xed\x2e\x2f\x0f\x30\x53\xea\x02\
\x18\x89\xe8\x9c\x39\xc7\x99\xfe\xfe\x59\x30\x66\x16\x80\x06\x1c\
\x3c\x80\xe3\x4f\x88\x5e\x48\x27\x93\xdf\x72\x63\x3f\xa1\x2c\x05\
\x30\x88\x52\xea\x18\x06\xe2\x28\x1e\x01\xec\x84\x10\x37\x27\xe2\
\xf1\xbf\xb9\x15\x12\x2e\x89\x39\x40\x2e\x28\xa5\xc6\x30\xf0\x00\
\x8a\xa3\xf1\x93\x24\xc4\x3d\xa9\xfe\xfe\xdb\x3a\x3b\x3b\x77\xbb\
\xb9\x1d\x50\xb6\x02\x60\xe6\x5b\x40\x34\xa5\xd0\x7e\x8c\x0a\xf3\
\x5a\x22\x5a\x66\xc5\x62\x6f\x7b\x71\x7e\xa0\x2c\x05\xa0\x1a\x1b\
\x2f\x00\xd1\x77\x0b\xed\xc7\x21\xf8\x0c\xcc\xad\x00\x1e\xd4\x5a\
\x6f\xf4\xb2\xa2\xb2\x13\x80\x94\xf2\x04\x10\xfd\xb6\xd0\x7e\x8c\
\x00\x83\xa8\x83\x84\x58\x59\x19\x0a\xad\x71\x3a\xbb\xcf\x96\xb2\
\x12\x40\x66\xc7\xee\x41\x00\x63\x0b\xec\xca\x20\x3b\x01\xbc\x06\
\xa2\xa7\xc2\x42\x3c\x92\x8f\xf3\x8c\xc3\x29\x2b\x01\x30\xf3\xad\
\x99\x88\x9c\x17\xc6\x37\x42\x88\x4e\x00\xe3\xc0\x5c\x0d\xe6\x6a\
\x08\x31\x1e\xc6\x8c\x85\x10\xe3\x00\x7c\x08\x63\x5e\x07\xf0\x06\
\x11\xbd\x0e\xe0\x0d\xcb\xb2\x76\x79\xe2\x8b\x0d\xca\x46\x00\x4a\
\xa9\xe9\x20\x6a\xf1\xc8\x7c\x67\x32\x99\xbc\xb0\xab\xab\xeb\x33\
\x8f\xec\x7b\x46\xd9\x5c\x0d\x63\xa2\x9b\xe1\xcd\x01\x93\xce\x64\
\x7f\x7f\x51\x36\x3e\x50\x26\x02\x90\x52\xce\x07\xb3\xf3\xfb\x7c\
\x07\x52\xd4\x8d\x0f\x94\x81\x00\x94\x52\x63\x20\xc4\x32\x0f\x4c\
\x17\x7d\xe3\x03\x65\x20\x00\x08\xb1\x18\xcc\x5f\x74\xd9\x6a\x49\
\x34\x3e\x50\xe2\x02\x68\x6c\x6c\x3c\x8a\x8d\xf9\xb1\xcb\x66\x5f\
\x29\x95\xc6\x07\x4a\x5c\x00\xa9\x54\x6a\x29\x0e\x7d\xb0\xd2\x2e\
\x49\x41\xb4\xa8\x54\x1a\x1f\x28\x61\x01\xd4\xd7\xd7\xd7\x80\xe8\
\x7c\x57\x8d\x0a\x71\x4b\x3c\x1e\x7f\xd9\x55\x9b\x05\xa6\x64\x05\
\x10\xae\xac\xfc\x21\xdc\x8c\xf8\x31\x77\x25\xe2\x71\xaf\xae\xad\
\x15\x8c\x92\x0c\x04\x49\x29\x27\x83\xe8\x1a\x17\x4d\xee\x26\xa2\
\x2b\xfc\x96\xe4\xd1\x0d\x4a\xae\x07\x68\x69\x69\x09\x81\xe8\x4e\
\x00\x63\x5c\x33\x2a\xc4\xb5\x7e\x4d\xf5\xea\x94\x92\x13\xc0\x8e\
\x1d\x3b\xae\x00\xe0\x66\x06\xae\x37\xc3\x44\x1d\x2e\xda\xf3\x15\
\x25\x75\x24\x2c\x1a\x8d\x1e\x67\x98\x2d\x00\x95\x2e\x9b\x66\x10\
\x3d\x03\xe0\x11\x53\x5d\xbd\xa6\xe3\xc9\x27\x3f\x76\xd9\x7e\xc1\
\x28\x19\x01\xb4\xb4\xb4\x84\x76\xf4\xf4\x3c\x85\x6c\x53\xcc\xe4\
\x4e\x0a\x80\x05\x21\x1e\xad\x0a\x87\x5b\xf3\xb5\x6f\xef\x15\x25\
\x33\x09\xec\xe9\xe9\xb9\x12\xde\x37\x3e\x30\xf0\x3b\x6b\x82\x31\
\x4d\x7b\xfb\xfb\xf7\xc8\x68\x74\x3d\x8c\x79\x64\xe2\xc4\x89\xeb\
\x57\xac\x58\x91\xce\x43\xfd\xae\x52\x12\x3d\x80\x52\xea\x78\x06\
\x2c\x14\xf6\x7a\xd8\x9b\x04\xdc\x0e\x60\x95\x65\x59\xa9\x02\xfa\
\x61\x8b\xa2\x17\xc0\xec\x79\xf3\x8e\x10\xbd\xbd\x6b\x41\xf4\xd5\
\x42\xfb\x02\x00\x20\xfa\x17\x8c\xb9\x8d\x88\x56\x14\x83\x10\x8a\
\x7a\x15\xd0\xdc\xdc\x3c\x4e\xf4\xf5\x3d\xec\x9b\xc6\x07\x00\xe6\
\xa3\x41\xf4\x47\x26\x8a\x49\x29\xf3\x31\x24\x39\xa2\x68\x05\x50\
\x57\x57\x57\xb1\x37\x99\x5c\x0e\x77\x97\x7c\xee\xc1\x7c\x22\x88\
\x36\xc8\x68\xf4\xe6\xe6\xe6\xe6\x71\x85\x76\xe7\x60\x14\xa5\x00\
\x88\x88\xc6\xd5\xd4\xdc\x03\xe6\x88\xad\x82\xcc\x9b\x41\x74\x2f\
\x98\x37\x02\xe8\xf5\xc6\xbb\x21\x10\x98\x2f\xdb\xdb\xdf\xff\xb4\
\x52\x6a\x7a\x1e\xea\xb3\x4d\x51\xce\x01\xa4\x94\xb7\x81\xe8\x62\
\x9b\xc5\xfe\x1b\x0e\x85\x1a\x07\x4f\xde\x12\x11\xc9\xe6\xe6\x63\
\x39\x99\x9c\x42\xc0\x54\x36\xe6\x64\x00\x53\x00\x78\x75\x93\x78\
\x0f\x01\x8b\x2d\xcb\x7a\xdc\x23\xfb\x39\x51\x74\x02\x90\x52\x5e\
\x07\xa2\x25\x36\x8b\xf5\xb3\x10\xf3\x12\xb1\xd8\xe6\x2c\xec\x4f\
\x22\xa2\xd3\x19\x68\x00\xb3\x74\xf9\x6a\x39\x13\x70\xb3\x65\x59\
\xbf\x77\xd1\xa6\x23\x8a\x4a\x00\x32\x1a\x5d\x02\xe6\xeb\x6c\x17\
\x64\xbe\x52\x6b\xfd\x0f\xbb\xc5\x88\x88\xa4\x94\xa7\x31\x73\x53\
\xe6\x44\xf1\x24\xdb\x75\x8f\xec\xcf\x7d\x5a\xeb\xa5\xae\xd8\x72\
\x48\x51\x08\x40\x29\x75\x38\x03\x77\x00\x98\x63\xbb\x30\xf3\xbd\
\x5a\x6b\xfb\xa2\x19\x06\x11\x51\x24\x12\xa9\x07\x70\x11\x88\xce\
\x84\xd3\xcd\x26\xa2\xeb\x74\x3c\x7e\xaf\x53\xbf\x9c\xe2\xfb\x49\
\x60\xa4\xa1\x61\x5a\x26\xc8\x63\xbf\xf1\x89\xba\x88\xe8\x90\x39\
\x7b\xb2\x85\x99\x59\x6b\xdd\xa1\xb5\xbe\xac\x22\x1c\x3e\x31\x93\
\xa5\x3c\xf7\xfb\xf9\xcc\x37\x49\x29\x67\xbb\xe1\x9b\x13\x7c\xdd\
\x03\x28\xa5\x16\x31\x70\x23\x72\x0b\x59\xbf\x9b\x99\xf4\x7d\xe8\
\xb2\x5b\xfb\xc8\x24\x7c\x5a\x02\xa2\x4b\x90\x5b\x14\x72\x17\x98\
\x9b\x0a\xf9\xf2\x0b\x5f\x0a\xa0\xa9\xa9\x69\x42\x32\x9d\xbe\x23\
\xeb\x1c\xbc\x07\xb2\x8b\x80\x73\x2c\xcb\x7a\xc9\x55\xc7\x0e\x42\
\x43\x43\xc3\xd1\x69\xe6\x5f\x83\xf9\x1b\xb6\x0b\x33\xc7\xb5\xd6\
\x5e\xdd\x58\x1a\x15\xdf\x09\x20\x12\x89\xcc\x20\x21\xee\x41\xee\
\x13\xae\x1d\x60\x3e\x4f\x6b\xfd\x8a\x9b\x7e\x8d\x06\x11\x91\x8c\
\x46\x7f\x9e\x53\x9e\x63\xe6\xb3\xb5\xd6\xcf\x78\xe0\xd6\xa8\xf8\
\x66\x0e\xa0\x94\x3a\x5e\x46\xa3\xcb\x49\x88\x56\xe4\xde\xf8\xef\
\x10\x30\x37\xdf\x8d\x0f\x0c\xcc\x11\xac\x58\xec\x26\x0a\x85\x16\
\x01\xb0\x97\x31\x74\x20\x3b\x68\x41\x28\x78\x0f\x20\xa5\x9c\x0c\
\x21\xae\x02\x73\x0b\x9c\xdc\xdd\x23\xda\x4e\xcc\xe7\xf9\xe1\xbd\
\xbc\x4a\xa9\xe9\x0c\x3c\x0e\x3b\x2b\x05\xe6\x0b\xb5\xd6\x1b\xbc\
\xf3\x6a\x64\x0a\x72\x1e\x40\x29\x75\x38\x0b\x71\x2e\x8c\xf9\x76\
\xe6\xdd\x3f\x4e\x4d\x6e\x36\xd5\xd5\x0b\xfc\x72\x52\xc7\xb2\xac\
\xe7\x65\x43\xc3\xf5\x30\x26\xfb\x44\x14\x42\x7c\x07\x40\xde\x05\
\x90\x75\x0f\xa0\x94\x0a\x03\x98\xc7\xcc\xcd\x00\x56\x27\x12\x89\
\xa7\xec\x9e\x92\xcd\xa4\x40\xff\x29\x98\xcf\x87\x7b\xc7\xb6\x3a\
\xaa\x2a\x2b\x17\xfa\xf1\x64\x8e\x94\xf2\x7e\x10\x9d\x9d\xe5\xe3\
\xbb\x09\x98\x9c\xef\x2d\x64\x3b\x73\x80\x4b\x18\xf8\x0b\x88\xce\
\x07\xd1\x5f\x23\x91\xc8\x05\x76\x2a\x92\x52\xce\x21\x21\xba\xc1\
\x7c\x11\xdc\x6a\x7c\xe6\xc7\xfa\x7a\x7b\x17\xf8\xb1\xf1\x01\xa0\
\xa2\xa2\xe2\x27\x00\xde\xcd\xf2\xf1\x1a\x63\x4c\xde\x77\x36\xb3\
\x16\x00\x0f\xbf\x65\x63\xe3\xd6\x8d\x94\x72\x92\xab\x47\xb5\x99\
\x3f\x86\x10\x97\x6b\xad\x2f\xb5\x9b\x9d\x3b\x9f\xb4\xb5\xb5\x7d\
\x42\x42\xdc\x99\xed\xf3\x42\x08\xbb\xef\x1f\x72\x8c\x9d\x1e\xa0\
\x6b\xff\x0f\x24\x44\xd6\xcb\x16\x22\x5a\x00\xb7\xf2\xf1\x31\xaf\
\x65\xe6\x59\x3a\x16\x5b\xe9\x8a\x3d\x8f\x09\x11\xad\xc1\x40\x12\
\xe9\x51\x61\xe0\xeb\x1e\xbb\x73\x00\x59\x0b\x20\x2c\xc4\x9d\x60\
\x5e\x0e\xa2\x0f\xc0\x7c\x5f\x7a\xec\xd8\xfb\xb2\x2d\xcb\x44\x8e\
\x52\x9a\x67\xf8\x08\x42\x5c\xaa\xb5\xbe\x38\xef\x6f\x0a\x75\x40\
\x7b\x7b\xfb\x07\x20\xda\x54\x68\x3f\x0e\x46\xd6\xab\x80\xcc\x3e\
\xfa\xbe\x6d\x58\x5b\x37\x25\x98\x9d\x9d\x96\x25\x5a\x4d\xcc\xd7\
\x5a\xb1\xd8\x4e\x47\x76\x0a\x85\x31\x4f\x80\x68\xf4\x6f\x37\x51\
\xde\xdf\x25\x98\xaf\x65\x60\xae\xd7\xaa\xde\x05\xf3\x0d\xda\xb2\
\xd6\xb8\xea\x4d\x9e\x21\xa2\xd5\x0c\x8c\x9a\xa3\x88\x84\x58\x91\
\x27\x97\xf6\xe1\x6a\x24\x50\x29\x75\x78\x26\x17\xdf\x10\xa8\xa2\
\xe2\xef\x36\xcc\x30\x80\x0e\x30\x2f\x9c\x58\x5b\x3b\x5d\x6b\x5d\
\xd4\x8d\x0f\x00\x99\xe0\x94\x1e\xe5\xb1\x6e\x1d\x8b\xad\xcd\x83\
\x3b\x43\x70\xad\x07\x68\x6e\x6e\x1e\xc7\xc0\x16\x30\xef\x26\xa2\
\xa9\xfb\xc7\x08\xac\x0d\x1b\x5e\x94\x4a\xad\x04\x70\xa8\x95\xc3\
\x6e\x00\x0f\x81\xf9\x7e\xad\xf5\xeb\x6e\xf9\xe5\x17\x08\xb8\x95\
\x81\xe9\x18\xf9\x6d\x26\xdd\xe9\x54\xaa\xa5\xb3\xb3\x33\xef\xb7\
\x8f\x5d\xeb\x01\x26\x4c\x98\xb0\x07\xc0\x1b\x10\xe2\xd5\x91\x02\
\x44\x13\x6b\x6b\x17\x83\x68\x39\x06\xae\x56\xed\xcf\x6b\x60\xbe\
\xb6\xaa\xb2\xf2\x24\x6d\x59\x3f\x2b\xc5\xc6\x07\x06\xa2\x83\x60\
\x9e\x0b\xa2\x27\xc0\xfc\x31\x80\x1d\x00\x36\x52\x28\xb4\x88\x80\
\xb3\x0a\xf5\x2e\xe1\xbc\xef\x05\x44\x22\x91\x5a\x22\x92\x00\x7a\
\x99\xf9\xbd\x44\x22\xb1\x35\xaf\x0e\x04\x0c\x21\x2b\x01\x28\xa5\
\xc2\xc5\x70\xcb\x25\xc0\x3e\xa3\x0e\x01\x4a\xa9\xcf\x33\xf0\x92\
\x54\xca\xf1\x6b\x4a\x03\xfc\x47\x36\x73\x80\x23\x33\x7f\x26\x13\
\xe5\xe3\x55\x58\x01\xf9\x64\x54\x01\x58\x96\xf5\x92\x18\x33\xe6\
\xf4\x70\x28\x24\x4b\x31\x47\x4e\xb9\x53\xf0\x03\x21\x01\x85\xc5\
\x37\x47\xc2\x02\x0a\x43\xc9\x64\x08\x29\x05\x94\x52\x63\x98\x79\
\x35\x84\xa8\xf2\xac\x12\xe6\x0f\xb5\x65\xcd\x1f\xfc\x38\x44\x00\
\x32\x1a\x5d\x0e\x63\x26\xed\xf7\xda\x53\xce\x14\x1a\xf8\x5b\x88\
\x41\x23\x66\xc8\xbf\x67\x9e\xef\xeb\xed\xb5\xfd\xf6\xec\x80\xff\
\x63\x59\xd6\x1e\x19\x8d\xbe\xef\xe0\x38\x7c\x36\x0c\xd9\x49\x1d\
\xde\x03\x1c\x37\xe2\x2b\x55\x06\x05\x31\x7c\x0e\x38\x6c\x51\x50\
\x5b\x5b\x2b\x5a\x5b\x5b\x1d\x7b\x58\xe6\xac\x02\xe0\xa5\x00\x86\
\x30\x74\x0e\x60\x8c\x23\x63\x3d\x3d\x3d\xce\x0c\x04\xa0\x7a\xcc\
\x98\xf5\x70\x72\xe5\xcc\x26\x43\x05\xe0\x70\x9d\x3f\x7e\xfc\xf8\
\x60\x99\xe8\x90\xd6\xd6\xd6\xbd\x20\x5a\x9d\xaf\xfa\x82\x55\x80\
\x0f\xa1\x50\x28\x6f\xc7\xdd\x86\x0b\x20\x88\xf4\xf9\x00\xdd\xde\
\xde\x05\xe6\xff\xe4\xa3\xae\xa0\x07\xf0\x21\xcc\xcc\x14\x0a\xe5\
\x65\xef\x65\xf8\x1c\xc0\x91\x20\x6a\x6b\x6b\x83\x39\x80\x4b\x70\
\x3a\xfd\x70\x3e\xea\x71\xb5\x07\x58\xb9\x72\x65\xb0\x0a\x70\x89\
\xcc\x05\x57\xcf\x2f\xb9\x06\x43\x80\x9f\x61\xf6\xfc\x90\xe8\x50\
\x01\x30\x07\x93\x40\x1f\x11\xaa\xa9\x79\x14\x59\x5e\x2a\xc9\x15\
\x57\x57\x01\xc1\x76\xb1\xbb\xc4\xd6\xac\x79\x0f\xcc\x9e\x26\x8e\
\x08\x86\x00\x9f\x43\x44\x9e\xc6\x04\x02\x01\xf8\x9c\x54\x2a\xf5\
\x04\x80\x7e\xaf\xec\x07\x02\xf0\x39\x99\xe3\xe2\xeb\xbd\xb2\xef\
\x66\x1c\x20\x18\xff\xbd\x82\xd9\xb3\x61\x20\x58\x05\x14\x01\x7d\
\x7d\x7d\x6d\x00\x76\x79\x61\x3b\x18\x02\x8a\x80\xee\xee\xee\xa4\
\x57\x3b\x84\xc3\x7b\x00\x27\xb6\x82\x21\xc0\x4b\x84\xf0\x64\x18\
\x70\xf3\x3c\x40\x10\x06\xf6\x90\x44\x2c\xf6\x2c\xb2\xcf\x37\x94\
\x35\xc1\x10\x50\x24\x30\x33\x83\xc8\xf5\x1d\x42\x57\x77\x03\x03\
\xbc\x45\x00\xae\xef\x0d\x04\x0d\x5e\x44\xc4\xe3\xf1\xed\x00\xb6\
\xb9\x69\xd3\xcd\x65\x60\x30\x09\xcc\x07\x2e\xc7\x04\xdc\xbc\x18\
\x12\x08\x20\x0f\x10\xd1\xa3\x0c\x2c\x74\x30\x5c\x0f\x49\xb4\x35\
\x54\x00\x44\x6f\xc1\x98\xdd\x99\xd5\x00\xed\xab\x64\xa0\x67\xa0\
\xcc\x33\x02\xcc\x04\xe6\x81\x55\xc3\xe0\x67\x60\x6f\x8e\x0e\x05\
\xd8\x20\x93\x6f\x68\xa6\x5b\xf6\x82\xcb\xa1\x65\x4e\x30\x09\x2c\
\x73\x02\x01\x94\x39\x81\x00\xca\x9c\x40\x00\x65\x4e\x20\x80\x32\
\x27\x10\x40\x99\x13\x08\xa0\xcc\x09\x04\x50\xe6\x04\x02\x28\x73\
\xfe\x07\x7b\xaa\x60\xbe\x78\x1b\x95\x37\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x09\x38\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x88\x08\x03\x00\x00\x00\x18\xb3\x13\x94\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x3a\xe5\x00\x00\x3a\xe5\x01\
\xaa\x0a\x01\x66\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\xa0\x50\x4c\x54\
\x45\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x05\x2f\x4d\xa3\x00\x00\x00\xdf\x74\x52\x4e\x53\x00\x01\x02\
\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\
\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\
\x23\x24\x25\x27\x28\x2a\x2b\x2c\x2d\x2e\x31\x32\x33\x34\x35\x36\
\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\
\x48\x49\x4b\x4c\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x58\x59\
\x5a\x5b\x5c\x5d\x5e\x5f\x60\x62\x63\x64\x65\x66\x67\x68\x69\x6a\
\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x76\x77\x78\x79\x7a\x7b\x7c\
\x7d\x7e\x7f\x80\x81\x85\x86\x87\x89\x8a\x8b\x8c\x8d\x8f\x90\x92\
\x93\x94\x95\x96\x97\x99\x9a\x9b\x9c\x9d\xa0\xa1\xa2\xa3\xa5\xa6\
\xa8\xa9\xaa\xab\xac\xad\xb0\xb1\xb3\xb4\xb5\xb6\xb7\xb9\xba\xbc\
\xbd\xbe\xc0\xc1\xc2\xc3\xc4\xc6\xc7\xc8\xca\xcb\xcc\xcd\xcf\xd0\
\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\
\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\
\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\x9c\xc2\x5a\xeb\
\x00\x00\x05\x1f\x49\x44\x41\x54\x18\x19\xed\xc1\x09\x57\x54\x65\
\x00\x06\xe0\x97\x65\xcc\x40\x84\x04\x43\x32\x34\xca\x50\x31\xb5\
\x28\xb5\xc5\x72\x8f\x40\x8d\x4a\x53\x71\x0b\x73\xc9\x72\x2b\x13\
\x2b\x33\x48\xb0\xb4\x94\x42\x44\x24\x35\x85\x28\xb7\x20\xc3\x10\
\xb7\x44\x45\x0c\x10\x09\x54\x2c\xd6\x79\xff\x4a\x6c\x73\xe7\xce\
\x65\xee\x75\x46\xee\xf7\xcd\xe9\x1c\x9e\x07\x3d\xf1\xd8\xea\x83\
\x45\x55\x0d\x65\x05\x7b\xe6\x04\xc2\x03\x66\x14\x52\xd1\x94\x3d\
\x1c\x92\x8d\x3a\x46\x07\x2d\xdf\x06\x41\xa6\xb8\x7a\x6a\x9d\x1f\
\x06\x79\x56\x59\xd9\x5d\x4d\x34\x64\x89\x6d\xa5\x33\x15\xe1\x90\
\x23\xea\x0e\x9d\x2b\xea\x0b\x29\x7e\xa6\x9e\xb5\x90\x61\x2a\x75\
\xfd\xfd\x10\x24\x38\x49\x7d\x9b\x20\xde\x60\x2b\xf5\x15\x43\xbc\
\xe5\x34\xf2\x04\x84\xdb\x4f\x23\x8b\x21\x5c\x01\x8d\x6c\x86\x70\
\x65\x34\xf2\x0d\x84\xab\xa3\x91\xbd\x10\xee\x02\x8d\xa4\x42\xb8\
\x5f\x68\x64\x3d\x84\xdb\x4d\x23\xf1\x10\x6e\x16\x0d\x34\x05\xc2\
\x24\xe9\x6b\x7d\xe1\x9c\xdf\x3f\xd4\x97\x0b\x93\xc4\x90\x45\x63\
\xe1\x5c\x06\xf5\xcd\x85\x33\x96\x05\x16\xb8\x27\xe0\x1a\xc9\x96\
\x64\x3f\x38\x13\xd1\x48\x3d\x7f\x78\xa3\x3b\xff\xd5\x65\x7c\x06\
\xee\xd9\xce\x0e\xa5\x53\xe0\xcc\x36\xea\x99\x86\x6e\x42\x3e\xa9\
\x21\xb9\x12\x6e\x19\x67\x65\x97\x8c\x10\x74\xe7\x5f\x44\xe7\xd2\
\xa0\x35\x34\xf5\x2e\xdb\x65\xc2\x1d\x7d\xce\x52\x51\x3d\x0f\xdd\
\x0d\xa9\xa2\x33\x3f\xf9\xc2\x51\x54\x46\x33\x3b\x95\xc2\x1d\x49\
\x54\xcb\x8d\x40\x37\xa3\xcb\xd9\x5d\x7e\x10\x1c\x4c\x38\x68\xa5\
\x22\x14\xae\x1b\xd1\x48\x07\x77\xd7\xf8\x40\xeb\x91\x42\x6a\xed\
\xf0\x85\x8a\x57\xcc\x09\xaa\xc5\xc2\x75\x3f\x52\xeb\xd4\x68\x68\
\x3d\xf0\xc1\x4d\xaa\x15\xcf\x80\x8a\xef\xfc\x12\x3a\xda\x02\xd7\
\x0d\xda\x47\xad\xe6\xad\x0f\x42\x2b\xf0\xc3\xd3\xec\xd2\x94\x3b\
\xd7\x1b\x76\x7e\xab\xae\x52\xa3\x7c\x3a\xdc\x11\x73\x8d\x5a\x97\
\x26\xa1\xbb\x88\x45\x49\x3b\xf7\xa6\xae\x8f\x0f\x84\x4a\x70\xd2\
\x4d\x6a\x34\x7f\xd1\x0f\xee\x09\x48\x6b\xa5\x56\x7a\x30\x5c\x10\
\xbe\xad\x9e\x5a\x47\x47\xc2\x7d\xcf\x9e\xa1\xd6\x8d\x39\xd0\xd5\
\xb7\x53\xd4\x77\x4d\xd4\xaa\x9c\x87\xfb\x62\xd9\xd8\x40\xad\x23\
\x43\xd1\x69\xd0\xb4\x0d\x59\x17\x8a\x03\x60\xf3\x29\xf5\xb4\x6e\
\x0f\xc2\xfd\x8a\x3c\x4a\xad\xfa\xd5\xbe\x4f\xc6\x7f\x76\xb8\x82\
\x1d\x0e\x79\xa3\xd3\x42\xea\x29\x18\x83\x1e\xf0\x5a\x52\x4b\xad\
\x46\xaa\x24\xa3\xc3\x2b\x4d\x74\xae\x66\xa9\x17\x7a\x26\x2c\x9b\
\x86\x16\xa2\xcd\x88\x3a\x3a\x65\xdd\x15\x82\x9e\x8b\x2d\xa7\x81\
\xc6\xe7\x81\xd0\x2b\x74\xaa\xe8\x39\x98\xa2\xff\x57\x56\xea\xab\
\x8e\xf0\x2b\xa4\x33\xb7\x56\xf9\xc0\x2c\xe3\xce\x52\x5f\xc9\x01\
\x3a\xb3\x67\x10\x4c\xd4\x67\x53\x03\xdd\x72\x6e\x22\x4c\x36\xfc\
\x18\x5d\x57\xbf\xce\x02\xd3\x79\xbd\x53\x47\x17\xe5\x84\x43\x88\
\xb0\x1c\xba\x64\x09\x84\x89\x2b\xa7\x0b\x46\x42\x9c\xfe\x5f\x5b\
\x79\x4f\xa1\x10\x69\xfc\x39\xde\x8b\x05\x42\xf5\x49\x6a\xa4\xa1\
\x5b\x10\x2d\x9a\x86\x2e\x41\x34\x9f\x16\x1a\x29\x84\x70\x65\x34\
\x72\x18\xc2\x9d\xa4\x91\x0c\x08\x97\x45\x23\x5f\x42\xb8\x64\x1a\
\xd9\x08\xe1\xde\xa3\x91\x65\x10\x6e\x36\x8d\xc4\x43\xb8\x68\x1a\
\x79\x19\xc2\x85\xd1\xc8\x53\x10\xce\xbb\x89\x06\x06\x43\xbc\x2b\
\x34\xd0\x17\xe2\x1d\xa7\xbe\x7a\x48\x90\x49\x7d\x57\x21\xc1\x56\
\xea\xfb\x1d\x12\xac\xa3\xbe\x3c\x48\xb0\x9c\xfa\x32\x21\xc1\x7c\
\xea\x4b\x83\x04\x33\xa9\x6f\x33\x24\x98\x4c\x7d\x2b\x20\x41\x44\
\x3e\xf5\xe4\x3d\x0d\x19\x12\xa9\x27\x01\x52\x24\x52\x4f\x02\xa4\
\x48\xa4\x9e\x04\x48\x91\x48\x3d\x09\x90\x22\x91\x7a\x12\x20\xc5\
\xfb\x54\x9c\x4a\x4f\x4f\x2f\xa1\x62\x31\x64\xb0\x5c\xa4\x4d\xc3\
\x10\x00\x09\x54\xec\x80\x0c\xef\x52\x91\x82\x36\x11\x54\x9c\x83\
\x04\xfd\x2a\x69\x53\x17\x8c\x76\x65\x54\x3c\x0c\xf1\x36\x51\xb1\
\x01\x1d\xbe\xa7\xe2\x75\x98\xce\x17\x8e\x06\xde\xa6\xcd\x75\x3f\
\x74\x58\x44\x45\x1a\xcc\xe6\x5f\xba\x25\x08\x6a\xa9\x54\x2c\x41\
\xa7\x61\x54\x14\xc3\x6c\x1f\x93\xb5\xeb\xfd\xa0\x78\xbc\x89\x36\
\xe7\x7d\xd0\xe5\x3a\x6d\xac\xc1\x30\x57\xf0\x2d\xb6\xf9\x6b\x99\
\x05\x5d\x32\xa9\x98\x09\x9b\x3d\x54\xcc\x84\xb9\x52\xd8\xe9\xcf\
\x39\x5e\x68\x37\xc6\x4a\x9b\x5f\xa1\x58\x4a\xc5\x36\x98\x2a\xfc\
\x5f\xda\x9c\x9e\x81\x36\x79\x54\xbc\x00\x45\x24\x15\x45\x30\xd5\
\x6e\xaa\x1c\x9f\x80\x49\x54\x1c\x82\x4a\x05\x6d\x5a\x83\x60\xa2\
\xe1\x2d\x74\x70\xa8\x98\x36\xad\x51\x50\xc9\xa2\x22\x06\x26\xca\
\xa1\xae\x74\xa8\x2d\xa3\x22\x19\xe6\x89\xa6\xae\x86\x21\x50\x1b\
\x41\xc5\x6f\x30\x4f\x3e\x75\x25\xc3\xd1\x0d\xda\xb4\xf4\x87\x59\
\x26\x53\x57\xed\x00\x38\xda\x4f\xc5\x74\x98\xa5\x80\x64\x4a\x4a\
\x03\x9d\xd8\x00\x8d\x15\x54\x7c\x0e\xb3\x44\xee\xb3\x56\x07\x20\
\x7c\x57\x0b\xb5\x2a\xfc\xa0\x31\x8a\x8a\x02\x98\x67\xcc\x2c\xb4\
\x89\xcc\xb6\xd2\xd1\x72\x68\xc5\x52\xd1\xec\x0f\xd3\x8d\xcd\xa3\
\x5a\xa9\x05\x1a\xc1\x95\xb4\x9b\x02\x01\x26\x16\xd0\xee\x6d\x68\
\xed\xa5\x5d\x4d\x18\x84\x88\x2b\x61\x97\x33\xde\xd0\x88\xa7\xca\
\x5b\x10\xc4\x7b\xfe\x65\x76\x78\x0d\x1a\xa1\xd5\xb4\xcb\x81\x38\
\x7d\x56\x56\x91\x3c\x09\xad\x1f\x68\x77\x63\x20\x44\xf2\xdf\x58\
\xc7\x97\xa0\x31\x8f\x2a\xb3\x21\xd8\x80\xa5\xd0\x18\x5c\x4b\xbb\
\x4c\xc8\x77\x84\x76\x15\x03\x20\xdd\x62\xaa\xc4\x40\xba\xa1\xb7\
\x69\x97\x0e\xe9\xbc\xf2\x69\x77\x2d\x10\xd2\xad\xa0\xca\x54\x48\
\x37\xec\x2e\xed\x76\x42\x3a\xef\x13\xb4\xbb\x1c\x00\xe9\xd6\xd0\
\xce\x3a\x11\xf2\xbd\x5a\x49\x45\x1a\x3c\x21\x38\x8b\x5d\x2e\xfa\
\xc1\x33\xde\xb8\xc9\x0e\xa5\xe3\xe1\x21\xa1\x07\xd8\xa1\xf5\x23\
\x78\xca\x82\x3a\xb6\x7b\x13\x1e\xf3\x68\x2e\xc9\x6c\x78\x52\xe2\
\x9d\xaa\x10\x78\x54\xc4\x8b\xe8\xd5\xab\x57\xaf\x5e\xff\x57\x71\
\x1e\x06\x7a\x18\xe8\x61\xa0\x87\x81\x1e\x06\x7a\x18\xe8\x61\xa0\
\x87\xfd\x07\x99\x92\xb0\xe2\x47\x64\x21\xce\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x08\
\x0b\x63\x58\x07\
\x00\x73\
\x00\x74\x00\x6f\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x06\
\x07\xc3\x57\x47\
\x00\x75\
\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\xe1\x5a\x27\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x08\x38\x8c\x47\
\x00\x66\
\x00\x72\x00\x61\x00\x6d\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x00\x8d\x23\x87\
\x00\x64\
\x00\x69\x00\x72\x00\x65\x00\x63\x00\x74\x00\x6f\x00\x72\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x00\xea\x9d\x27\
\x00\x73\
\x00\x74\x00\x65\x00\x70\x00\x5f\x00\x69\x00\x6e\x00\x74\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x09\x64\x4b\xe7\
\x00\x66\
\x00\x69\x00\x78\x00\x5f\x00\x62\x00\x75\x00\x67\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x08\xc8\x58\x67\
\x00\x73\
\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x0b\x21\x03\x87\
\x00\x66\
\x00\x69\x00\x6c\x00\x65\x00\x4f\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x09\xc1\x57\xa7\
\x00\x72\
\x00\x75\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x01\xb9\x3c\x07\
\x00\x6c\
\x00\x6f\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x09\x9d\x9a\x67\
\x00\x73\
\x00\x74\x00\x65\x00\x70\x00\x5f\x00\x6f\x00\x76\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x09\x92\xdb\x87\
\x00\x73\
\x00\x74\x00\x65\x00\x70\x00\x5f\x00\x6f\x00\x75\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0d\x00\x00\x00\x03\
\x00\x00\x00\x66\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xeb\
\x00\x00\x00\x86\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x07\
\x00\x00\x01\x0a\x00\x00\x00\x00\x00\x01\x00\x00\x64\x0f\
\x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x11\xf0\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x0f\x0e\
\x00\x00\x00\x4e\x00\x00\x00\x00\x00\x01\x00\x00\x17\x1a\
\x00\x00\x00\xc2\x00\x00\x00\x00\x00\x01\x00\x00\x51\xb8\
\x00\x00\x00\xa6\x00\x00\x00\x00\x00\x01\x00\x00\x2d\x32\
\x00\x00\x01\x44\x00\x00\x00\x00\x00\x01\x00\x00\x77\x0a\
\x00\x00\x01\x24\x00\x00\x00\x00\x00\x01\x00\x00\x68\xdf\
\x00\x00\x00\xf6\x00\x00\x00\x00\x00\x01\x00\x00\x56\x7c\
\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x01\x00\x00\x52\x8d\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| c0deforfun/LLL | ui/resources_rc.py | Python | mit | 138,528 | 0.000036 |
#Aqui resolveremos los puntos de la tarea
import time
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as const
from astropy import units as un
from scipy import integrate
#PRIMERA PARTE
#Cargamos los datos y definimos arreglos para la Longitud y el Flujo
Datos = np.loadtxt("sun_AM0.dat")
Longitud = Datos[:,0]
Flujo = Datos[:,1]
#declaramos las unidades y las convertimos
UniLongitud = Longitud*un.nm #longitud en nanometros
UniFlujo = Flujo*un.W*(un.m**-2)*(un.nm**-1) #Flujo en watts, dividido metros cuadrados, dividido nanometros
Longitud_um= UniLongitud.to('um') #convertimos a [um]
Flujo_cgs= UniFlujo.to('erg/(s cm2 um)') #convertimos a [cgs]
plt.clf()
plt.plot(Longitud_um, Flujo_cgs)
plt.xlim(0,8)
plt.xlabel('Longitud de onda [$ \mu m $]')
plt.ylabel('Flujo de Energia [$ erg / s * cm^2 * \mu m$]')
plt.title('Grafico de Flujo de Energia en relacion con la Longitud de onda incidente')
plt.savefig('Grafico1.png', bbox_inches='tight')
#plt.show()
#Segunda Parte, Tenemos que integrar la funcion anterior
#lo haremos usando el metodo del trapecio visto enclases
m = len(Longitud_um)
n = len(Flujo_cgs)
CSolar=0
Ttrapecio=time.time() #Enserramos la funcion, con contadores para ver cuanto se demora
for i in range(n-1):
paso = (Longitud_um[i+1] - Longitud_um[i])
trapecio = ((Flujo_cgs[i+1] + Flujo_cgs[i]) * paso /2)
CSolar += trapecio # El area calculada corresponde a la constante Solar
Ttrapecio = time.time()-Ttrapecio #asignamos a una variable el tiempo que demora nuestro metodo
#En paralelo usamos el metodo de python para calcular la misma integral
TcompTrapecio = time.time() #Iniciamos el contador para le metodo de python
ConstanteComparacionT1 = np.trapz(Flujo_cgs , Longitud_um)
TcompTrapecio = time.time()-TcompTrapecio #Cerramos el contador
print 'constantes solares con el metodo del trapecio propio y el de python, respectivamente'
print (CSolar)
print(ConstanteComparacionT1)
#TERCERA PARTE
#Buscamos calcular el flujo energetico del sol atravez de una unidad de superficie en la atmosfera solar en una unidad de tiempo
CantiInter = input('Indique la cantidad de intervalos para la integracion (maximo 100)')
Salto = (np.pi/2-0.01)/CantiInter #Me indica la distancia entre los cuadros a integrar
Intervalo = np.arange(0.01, np.pi/2, Salto) #Intervalo discreto a integrar,no se puede partir de 0 asi que elejimos 0,01
Paso = Intervalo[1] - Intervalo[0] #la distancia entre los elementos del intervalo es la misma asi que usamos un salto cualquiera.
AreaS = 0
T = 5778*un.K
Constantes = ((2*np.pi*const.h)/((const.c)**2)) * ((const.k_B*T)/(const.h))**4 #constantes que acompañan la integral
Tamano = len(Intervalo)
TSimpson=time.time() #Iniciamos el contador para el metodo de simpson
def Integral(y): #Definimos el argumento de la integral como una funcion para simplificar los calculos
funcion = (np.tan(y)**3 + np.tan(y)**5) / ((np.exp(np.tan(y)))-1)
return funcion
#Ahora iteramos para integrar a los largo de los elementos [k] del intervalo evaluados en la funcion
for k in range(0, (Tamano-2)):
simpson = (Paso/6.0)*((Integral(Intervalo[k])) + 4*Integral(Intervalo[k+1]) + Integral(Intervalo[k+2]))
AreaS += simpson
FlujoSolar = Constantes*AreaS # las constantes por el area calculada (integral)
TSimpson= time.time() - TSimpson #Cerramos el contador
#Y ahora usamos el metodo de comparacion Quad de python
TCompSimpson=time.time() #iniciamos el contador
FlujoCom = integrate.quad(Integral, 0, np.pi/2)
FlujoCom = FlujoCom * Constantes
TCompSimpson=time.time() - TCompSimpson #cerramos el contador
print 'flujos solares, calculados con el metodo de Simpson y Quad respectivamente'
print FlujoSolar
print FlujoCom
#Ahora calculamos el radio del sol en base al flujo de energia en una seccion de la atmosfera terrestre y la constante a0.
a0= const.au
CSolar= CSolar.to('J /(m2 s)') # cambio de unidades de la constante Solar, para que calzen
#El radio esta dado por la raiz cuadrada de la relacion entre la constante solar y el flujo, multiplicada por la constante a0
Radio = (np.sqrt((CSolar / FlujoSolar)))*a0
print 'Radio'
print Radio
print ' Tiempo que demoran las integracion que realizamos, con sus respectivas comparaciones'
print 'Constante Solar(metodo del trapecio)'
print Ttrapecio
print TcompTrapecio
print 'Flujo Solar (Metodo de Simpson y Quad)'
print TSimpson
print TCompSimpson
| faridborbar/01Tarea | Codigo.py | Python | mit | 4,518 | 0.016383 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
lassplit.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from sextante.lidar.lastools.LasToolsUtils import LasToolsUtils
from sextante.lidar.lastools.LasToolsAlgorithm import LasToolsAlgorithm
from sextante.parameters.ParameterFile import ParameterFile
from sextante.outputs.OutputFile import OutputFile
from sextante.parameters.ParameterNumber import ParameterNumber
class lassplit(LasToolsAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NUM_POINTS = "NUM_POINTS"
def defineCharacteristics(self):
self.name = "lassplit"
self.group = "Tools"
self.addParameter(ParameterFile(lassplit.INPUT, "Input las layer"))
self.addParameter(ParameterNumber(lassplit.NUM_POINTS, "Point in each output file", 1, None, 1000000))
self.addOutput(OutputFile(lassplit.OUTPUT, "Output las file basename"))
self.addCommonParameters()
def processAlgorithm(self, progress):
commands = [os.path.join(LasToolsUtils.LasToolsPath(), "bin", "lassplit.exe")]
commands.append("-i")
commands.append(self.getParameterValue(lassplit.INPUT))
commands.append("-o")
commands.append(self.getOutputValue(lassplit.OUTPUT))
commands.append("-split")
commands.append(self.getParameterValue(lassplit.NUM_POINTS))
self.addCommonParameterValuesToCommand(commands)
LasToolsUtils.runLasTools(commands, progress)
| slarosa/QGIS | python/plugins/sextante/lidar/lastools/lassplit.py | Python | gpl-2.0 | 2,455 | 0.001222 |
from staffjoy.resource import Resource
class Manager(Resource):
"""Location managers"""
PATH = "organizations/{organization_id}/locations/{location_id}/managers/{user_id}"
ID_NAME = "user_id"
| Staffjoy/client_python | staffjoy/resources/manager.py | Python | mit | 206 | 0.004854 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._healthbot import Healthbot
from ._version import VERSION
__version__ = VERSION
__all__ = ['Healthbot']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| Azure/azure-sdk-for-python | sdk/healthbot/azure-mgmt-healthbot/azure/mgmt/healthbot/__init__.py | Python | mit | 680 | 0.002941 |
#-*- coding: utf-8 -*-
"""\
======
Grades
======
For managing student grades, most teachers use spreadsheet tools. With these
tools, it is hard to maintain grades in plain text files that are easily
readable by humans. The goal of **Grades** is to let teachers manage their
students's grade in plain text file while providing tools to parse the file and
calculate students and group means.
The table format that **Grades** use is the one Emacs `org-mode
<http://orgmode.org/index.html>`_ uses. Using org-mode, grades tables can be
easily set up and then **Grades** will happily compute all the required values.
"""
from __future__ import print_function # For Python 2 compatibility.
__author__ = "Loïc Séguin-C. <loicseguin@gmail.com>"
__license__ = "BSD"
__version__ = "0.3dev"
from . import gradestable
from . import parsers
from . import ui
from . import writers
| loicseguin/grades | grades/__init__.py | Python | bsd-3-clause | 878 | 0.001142 |
from django.core.management.base import BaseCommand
from radioco.apps.radioco.utils import create_example_data
class Command(BaseCommand):
def handle(self, *args, **options):
create_example_data()
| txenoo/django-radio | radioco/apps/radioco/management/commands/create_example_data.py | Python | gpl-3.0 | 212 | 0 |
import unittest
from rtruffle.node import Node
class NodeTest(unittest.TestCase):
def test_adopt_child(self):
child = ChildNode()
parent = RootNode()
self.assertIsNone(child.parent)
parent.adopt_child(child)
self.assertIs(parent, child.parent)
def test_adopt_children(self):
children = [ChildNode() for _ in range(0, 10)]
parent = RootNode()
self.assertIsNot(children[0], children[1])
for child in children:
self.assertIsNone(child.parent)
parent.adopt_children(children)
for child in children:
self.assertIs(parent, child.parent)
def test_replace_1(self):
child1 = ChildNode()
parent = RootNode(child1, None)
self.assertIs(child1, parent.child_node1)
self.assertIsNone(parent.child_node2)
child2 = ChildNode()
child1.replace(child2)
self.assertIs(child2, parent.child_node1)
self.assertIsNone(parent.child_node2)
def test_replace_2(self):
child1 = ChildNode()
parent = RootNode(None, child1)
self.assertIsNone(parent.child_node1)
self.assertIs(child1, parent.child_node2)
child2 = ChildNode()
child1.replace(child2)
self.assertIsNone(parent.child_node1)
self.assertIs(child2, parent.child_node2)
def test_replace_in_children(self):
child1 = ChildNode()
child2 = ChildNode()
parent = RootNodeWithChildList([child1, child1, child1])
for each in parent.child_nodes:
self.assertIs(each, child1)
child1.replace(child2)
for each in parent.child_nodes:
self.assertIs(each, child2)
class RootNode(Node):
_child_nodes_ = ["child_node1", "child_node2"]
def __init__(self, child_node1=None, child_node2=None):
Node.__init__(self)
self.child_node1 = self.adopt_child(child_node1)
self.child_node2 = self.adopt_child(child_node2)
class RootNodeWithChildList(Node):
_child_nodes_ = ["child_nodes[*]"]
def __init__(self, child_nodes=None):
Node.__init__(self)
assert isinstance(child_nodes, list)
self.child_nodes = self.adopt_children(child_nodes)
class ChildNode(Node):
pass
| SOM-st/PySOM | tests/rtruffle_tests/test_node.py | Python | mit | 2,292 | 0 |
from django.http import HttpRequest
import mock
import pytest
from nose.tools import assert_false
from olympia import amo
from olympia.amo.tests import TestCase, req_factory_factory
from olympia.amo.urlresolvers import reverse
from olympia.addons.models import Addon, AddonUser
from olympia.users.models import UserProfile
from .acl import (action_allowed, check_addon_ownership, check_ownership,
check_addons_reviewer, check_personas_reviewer,
check_unlisted_addons_reviewer, is_editor, match_rules)
pytestmark = pytest.mark.django_db
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons,Localizers:*',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Personas:Review',
'Locales:Edit',
'Locale.de:Edit',
'Reviews:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
def test_anonymous_user():
# Fake request must not have .groups, just like an anonymous user.
fake_request = HttpRequest()
assert_false(action_allowed(fake_request, amo.FIREFOX, 'Admin:%'))
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/en-US/admin/models/'
r = self.client.get(url)
self.assert3xx(r, '%s?to=%s' % (reverse('users.login'), url))
class TestHasPerm(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasPerm, self).setUp()
assert self.client.login(username='del@icio.us', password='password')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = Addon.objects.get(id=3615)
self.au = AddonUser.objects.get(addon=self.addon, user=self.user)
assert self.au.role == amo.AUTHOR_ROLE_OWNER
self.request = self.fake_request_with_user(self.user)
def fake_request_with_user(self, user):
request = mock.Mock()
request.groups = user.groups.all()
request.user = user
request.user.is_authenticated = mock.Mock(return_value=True)
return request
def login_admin(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
return UserProfile.objects.get(email='admin@mozilla.com')
def test_anonymous(self):
self.request.user.is_authenticated.return_value = False
self.client.logout()
assert not check_addon_ownership(self.request, self.addon)
def test_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
assert check_addon_ownership(self.request, self.addon)
assert check_addon_ownership(self.request, self.addon, admin=True)
assert not check_addon_ownership(self.request, self.addon, admin=False)
def test_require_author(self):
assert check_ownership(self.request, self.addon, require_author=True)
def test_require_author_when_admin(self):
self.request = self.fake_request_with_user(self.login_admin())
self.request.groups = self.request.user.groups.all()
assert check_ownership(self.request, self.addon, require_author=False)
assert not check_ownership(self.request, self.addon,
require_author=True)
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert not check_addon_ownership(self.request, self.addon)
self.test_admin()
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert not check_addon_ownership(self.request, self.addon)
self.request.user = self.login_admin()
self.request.groups = self.request.user.groups.all()
assert not check_addon_ownership(self.request, self.addon)
def test_ignore_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert check_addon_ownership(self.request, self.addon,
ignore_disabled=True)
def test_owner(self):
assert check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
def test_dev(self):
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
def test_viewer(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
def test_support(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, support=True)
class TestCheckReviewer(TestCase):
fixtures = ['base/addon_3615', 'addons/persona']
def setUp(self):
super(TestCheckReviewer, self).setUp()
self.user = UserProfile.objects.get()
self.persona = Addon.objects.get(pk=15663)
self.addon = Addon.objects.get(pk=3615)
def test_no_perm(self):
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_addons(self):
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_perm_themes(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert not check_unlisted_addons_reviewer(req)
assert check_personas_reviewer(req)
def test_perm_unlisted_addons(self):
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
req = req_factory_factory('noop', user=self.user)
assert not check_addons_reviewer(req)
assert check_unlisted_addons_reviewer(req)
assert not check_personas_reviewer(req)
def test_is_editor_for_addon_reviewer(self):
"""An addon editor is also a persona editor."""
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert is_editor(req, self.addon)
def test_is_editor_for_persona_reviewer(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert is_editor(req, self.persona)
assert not is_editor(req, self.addon)
| jpetto/olympia | src/olympia/access/tests.py | Python | bsd-3-clause | 9,223 | 0 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008-2013,2015-2016,2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq compile`."""
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.host import hostname_to_host
from aquilon.worker.templates import Plenary, TemplateDomain
class CommandCompileHostname(BrokerCommand):
required_parameters = ["hostname"]
requires_readonly = True
def render_old(self, session, logger, hostname, pancinclude, pancexclude,
pancdebug, cleandeps, **_):
dbhost = hostname_to_host(session, hostname)
if pancdebug:
pancinclude = r'.*'
pancexclude = r'components/spma/functions.*'
dom = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
logger=logger)
plenary = Plenary.get_plenary(dbhost, logger=logger)
with plenary.get_key():
dom.compile(session, only=plenary.object_templates,
panc_debug_include=pancinclude,
panc_debug_exclude=pancexclude,
cleandeps=cleandeps)
return
def render(self, session, logger, hostname,
pancinclude, pancexclude, pancdebug, cleandeps, **_):
template_domain, plenary = self._preprocess(session, logger, hostname)
if pancdebug:
pancinclude = r'.*'
pancexclude = r'components/spma/functions.*'
self._compile_template_domain(session, template_domain, plenary,
pancinclude, pancexclude, cleandeps)
@staticmethod
def _preprocess(session, logger, hostname):
dbhost = hostname_to_host(session, hostname)
template_domain = TemplateDomain(dbhost.branch, dbhost.sandbox_author,
logger=logger)
plenary = Plenary.get_plenary(dbhost, logger=logger)
return template_domain, plenary
@staticmethod
def _compile_template_domain(session, template_domain, plenary,
pancinclude, pancexclude, cleandeps):
with plenary.get_key():
template_domain.compile(session, only=plenary.object_templates,
panc_debug_include=pancinclude,
panc_debug_exclude=pancexclude,
cleandeps=cleandeps)
| quattor/aquilon | lib/aquilon/worker/commands/compile_hostname.py | Python | apache-2.0 | 3,029 | 0.00033 |
from numpy import NaN
from pandas import DataFrame
from asteroid_scraper.math.qfunction import Qfunction
from asteroid_scraper.math.tisserand import tisserand
from asteroid_scraper.utils.progress_bar import ProgressBarThread
class AsteroidFinder(object):
__shared_state = {}
client = None
def _borg_init(self, min_semiax, max_semiax, min_ecce, max_ecce, min_incl, max_incl, sort_key):
self.min_semiax = min_semiax
self.max_semiax = max_semiax
self.min_ecce = min_ecce
self.max_ecce = max_ecce
self.min_incl = min_incl
self.max_incl = max_incl
self.sort_key = sort_key
def __init__(self, min_semiax, max_semiax, min_ecce, max_ecce, min_incl, max_incl, sort_key):
self.__dict__ = self.__shared_state
if not self.__shared_state:
self._borg_init(min_semiax, max_semiax, min_ecce, max_ecce, min_incl, max_incl, sort_key)
def find_asteroids(self, cycler_orbits, asteroids_orbits):
"""
return asteroid orbits that meet the filter requirement, each asteroid have tisserand value and
tisserand delta with the cycler orbit
"""
pb = ProgressBarThread("Computing tisserand")
pb.start()
asteroids_orbits['tisserand'] = asteroids_orbits.apply(lambda row: tisserand(row), axis=1)
cycler_orbits['tisserand'] = cycler_orbits.apply(lambda row: tisserand(row), axis=1)
pb.stop()
pb = ProgressBarThread("Computing Qfunction")
pb.start()
asteroids_orbits['q_function'] = asteroids_orbits.apply(lambda row: Qfunction(row), axis=1)
cycler_orbits['q_function'] = cycler_orbits.apply(lambda row: Qfunction(row), axis=1)
pb.stop()
pb = ProgressBarThread("Scraping asteroids")
pb.start()
# from now, we treat data as dict data structure instead of pandas data frame,
# need more expertise with pandas API ;)
cycler_orbits = cycler_orbits.to_dict(orient="records")
asteroids_orbits = asteroids_orbits.to_dict(orient="records")
asteroids_orbits = self.filter_asteroids(asteroids_orbits)
results = []
for i, cycler_orbit in enumerate(cycler_orbits):
cycler_tisserand = cycler_orbit['tisserand']
cycler_q_function = cycler_orbit['q_function']
for orbit in asteroids_orbits:
delta_tisserand = self._tisserand_delta(cycler_tisserand,
orbit['tisserand'])
delta_q_function = self._q_function_delta(cycler_q_function,
orbit['q_function'])
results.append({'asteroid_id': orbit['id'],
'asteroid_full_name': orbit['full_name'],
'asteroid_tisserand': orbit['tisserand'],
'asteroid_q_function': orbit['q_function'],
'cycler_orbit_index': i,
'cycler_orbit_tisserand': cycler_tisserand,
'cycler_orbit_q_function': cycler_q_function,
'delta_q_function': delta_q_function,
'delta_tisserand': delta_tisserand,
})
# back to pandas data frame data structure
results = DataFrame(results)
results = results.sort_values('delta_' + self.sort_key)
pb.stop()
return results
def filter_asteroids(self, asteroids_orbits):
results = []
# should be filtered using pandas API in order to achieve efficency
for orbit in asteroids_orbits:
if self.min_semiax <= orbit['semiax'] <= self.max_semiax \
and self.min_ecce <= orbit['ecce'] <= self.max_ecce \
and self.min_incl <= orbit['incl'] <= self.max_incl:
results.append(orbit)
return results
def _tisserand_delta(self, t1, t2):
try:
return abs(t1 - t2)
except TypeError:
print 'error on tisserand_delta', t1, t2
return NaN
def _q_function_delta(self, t1, t2):
try:
return abs(t1 - t2)
except TypeError:
print 'error on q_function_delta', t1, t2
return NaN
| spacecoalmen/asteroid_scraper | asteroid_scraper/finder.py | Python | mit | 4,400 | 0.002273 |
#!/bin/env python
# -*- coding: utf-8 -*-
""" @package XBee Zigbee API Test Programme
Funtions include:
1) AT command;
2) Remote AT command
3) Send single TX request with response in const frequency
4) Send continuous TX requests with/without response in const frequency
5) Flow rate predict/measurement in Pps(Packet per Second)
and bps(Bit per Second)
6) Echo RX response for range test
"""
import os
import time
import wx
import string
import logging
import sys
import traceback
import threading
import struct
import socket
from ConfigParser import SafeConfigParser
from butter import Butter
from wx.lib.newevent import NewEvent
import XBeeIPServices
import PayloadPackage as pp
RxEvent, EVT_RSLT1 = NewEvent()
Rx2Event, EVT_RSLT2 = NewEvent()
RxStaEvent, EVT_STAT = NewEvent()
LogEvent, EVT_LOG = NewEvent()
RxCmpEvent, EVT_RSLT1C = NewEvent()
Rx2CmpEvent, EVT_RSLT2C = NewEvent()
RxGndEvent, EVT_RSLT1G = NewEvent()
RxAirEvent, EVT_RSLT1AIR = NewEvent()
log = logging.getLogger(__name__)
def Get14bit(val) :
if val & 0x2000 :
return -(((val & 0x1FFF)^0x1FFF)+1)
else :
return val & 0x1FFF
at_status = {
0: 'OK',
1: 'ERROR',
2: 'Invalid Command',
3: 'Invalid Parameter',
4: 'Tx Failure',
}
moderm_status = {
0: 'Hardware reset',
1: 'Watchdog timer reset',
2: 'Joined network (routers and end devices)',
3: 'Disassociated',
6: 'Coordinator started',
7: 'Network security key was updated',
0x0D: 'Voltage supply limit exceeded (PRO S2B only)',
0x0E: 'Device Cloud connected',
0x0F: 'Device Cloud disconnected',
0x11: 'Modem configuration changed while join in progress',
0x80: 'stack error',
}
discovery_status = {
0x00: 'No Discovery Overhead',
0x01: 'Address Discovery',
0x02: 'Route Discovery',
0x03: 'Address and Route',
0x40: 'Extended Timeout Discovery',
}
delivery_status = {
0x00: 'Success',
0x01: 'MAC ACK Failure',
0x02: 'CCA Failure',
0x03: 'Transmission was purged because it was attempted before stack was completely up',
0x15: 'Invalid destination endpoint',
0x21: 'Network ACK Failure',
0x22: 'Not Joined to Network',
0x23: 'Self-addressed',
0x24: 'Address Not Found',
0x25: 'Route Not Found',
0x26: 'Broadcast source failed to hear a neighbor relay the message',
0x2B: 'Invalid binding table index',
0x2C: 'Resource error lack of free buffers, timers, etc.',
0x2D: 'Attempted broadcast with APS transmission',
0x2E: 'Attempted unicast with APS transmission, but EE=0',
0x32: 'Resource error lack of free buffers, timers, etc.',
0x74: 'Data payload too large',
0x76: 'Attempt to create a client socket fail',
0x77: 'TCP connection to given IP address and port doesn\'t exist',
0x78: 'Source port on a UDP transmission does not match a listening port on the transmitting module',
}
tx_status = {
0x00: 'Success',
0x01: 'No ACK received',
0x02: 'CCA failure',
0x03: 'Purged',
}
recv_opts = {
0x01: 'Packet Acknowledged',
0x02: 'Packet was a broadcast packet',
0x20: 'Packet encrypted with APS encryption',
0x21: 'Packet encrypted with APS encryption',
0x22: 'Broadcast packet encrypted with APS encryption',
0x40: 'Packet was sent from an end device',
0x41: 'Packet was sent from an end device',
0x42: 'Broadcast packet was sent from an end device',
0x61: 'APS-encrypted Packet was sent from an end device',
0x62: 'APS-encrypted Broadcast packet was sent from an end device',
}
ALPHA_ONLY = 1
DIGIT_ONLY = 2
HEX_ONLY = 3
class MyValidator(wx.PyValidator):
def __init__(self, flag=None, pyVar=None):
wx.PyValidator.__init__(self)
self.flag = flag
self.Bind(wx.EVT_CHAR, self.OnChar)
self.hexs = string.digits + 'abcdefABCDEF'
def Clone(self):
return MyValidator(self.flag)
def Validate(self, win):
tc = self.GetWindow()
val = tc.GetValue()
if self.flag == ALPHA_ONLY:
return all([i in string.letters for i in val])
elif self.flag == DIGIT_ONLY:
return all([i in string.digits for i in val])
elif self.flag == HEX_ONLY:
return all([i in self.hexs for i in val])
return True
def OnChar(self, event):
key = event.GetKeyCode()
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
if self.flag == HEX_ONLY and chr(key) in self.hexs:
event.Skip()
return
if self.flag == ALPHA_ONLY and chr(key) in string.letters:
event.Skip()
return
if self.flag == DIGIT_ONLY and chr(key) in string.digits:
event.Skip()
return
if self.flag == DIGIT_ONLY and chr(key) in '-':
event.Skip()
return
if self.flag == DIGIT_ONLY and chr(key) in '.':
event.Skip()
return
if not wx.Validator_IsSilent():
wx.Bell()
# Returning without calling even.Skip eats the event before it
# gets to the text control
return
class RedirectError(object):
def __init__(self):
pass
def write(self, string):
string = string.strip('\r\n\t ')
if string:
log.error(string)
class RedirectInfo(object):
def __init__(self):
pass
def write(self, string):
string = string.strip('\r\n\t ')
if string:
log.info(string)
class RedirectText(object):
def __init__(self, parent):
self.parent = parent
def write(self, string):
wx.PostEvent(self.parent, LogEvent(log=string))
class MyFrame(wx.Frame):
def __init__(self, parent, ID, title,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, ID, title, pos, size, style)
parser = SafeConfigParser()
parser.read('config.ini')
self.parser = parser
panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
box = wx.BoxSizer(wx.HORIZONTAL)
self.btnStart = wx.Button(panel, -1, "Start", size=(100, -1))
box.Add(self.btnStart, 0, wx.ALIGN_CENTER, 5)
box.Add(wx.StaticText(panel, wx.ID_ANY, "Host:"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 1)
self.txtHost = wx.TextCtrl(panel, -1, parser.get('host','AP'), size=(100, -1))
box.Add(self.txtHost, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
self.btnBaseTime = wx.Button(panel, -1, "Set Base Time", size=(100, -1))
self.btnBaseTime.Enable(False)
box.Add(self.btnBaseTime, 0, wx.ALIGN_CENTER, 5)
self.txtRecName = wx.TextCtrl(panel, -1, parser.get('rec','prefix'), )
box.Add(self.txtRecName, 1, wx.ALIGN_CENTER|wx.LEFT, 5)
self.btnALLrec = wx.ToggleButton(panel, -1, "REC")
self.btnALLrec.Enable(False)
box.Add(self.btnALLrec, 0, wx.ALIGN_CENTER, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
AT_CMD = ['MY', 'MK', 'GW', 'SH', 'SL', 'DL', 'C0', 'ID', 'AH', 'MA',
'PL', 'BD', 'AI', 'WR', 'FR',]
HOST_LIST = ["192.168.191.2", "192.168.191.3", "192.168.191.4"]
self.PORT_LIST = ["2616", "2267", "2677", "2000"]
box = wx.BoxSizer(wx.HORIZONTAL)
self.target = 'GND'
self.rbGND = wx.RadioButton(panel, wx.ID_ANY, "GND:",
style=wx.RB_GROUP)
box.Add(self.rbGND, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 1)
self.txtGNDhost = wx.ComboBox(panel, -1, parser.get('host','GND'),
choices=HOST_LIST)
box.Add(self.txtGNDhost, 0, wx.ALIGN_CENTER, 5)
self.txtGNDport = wx.ComboBox(panel, -1, "2616",
choices=self.PORT_LIST[:-1], validator=MyValidator(HEX_ONLY))
box.Add(self.txtGNDport, 0, wx.ALIGN_CENTER, 5)
self.chkGNDsynct = wx.CheckBox(panel, -1, "")
self.chkGNDsynct.SetValue(True)
box.Add(self.chkGNDsynct, 0, wx.ALIGN_CENTER, 5)
self.btnGNDsynct = wx.Button(panel, -1, "Sync Time")
self.btnGNDsynct.Enable(False)
box.Add(self.btnGNDsynct, 0, wx.ALIGN_CENTER, 5)
self.txtGNDinfo = wx.StaticText(panel, wx.ID_ANY, "", size=(32, 16))
self.txtGNDinfo.SetForegroundColour((255, 55, 0))
box.Add(self.txtGNDinfo, 1, wx.ALIGN_CENTER|wx.LEFT, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.rbACM = wx.RadioButton(panel, wx.ID_ANY, "ACM:")
box.Add(self.rbACM, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 1)
self.txtACMhost = wx.ComboBox(panel, -1, parser.get('host','ACM'),
choices=HOST_LIST)
box.Add(self.txtACMhost, 0, wx.ALIGN_CENTER, 5)
self.txtACMport = wx.ComboBox(panel, -1, "2267",
choices=self.PORT_LIST[:-1], validator=MyValidator(HEX_ONLY))
box.Add(self.txtACMport, 0, wx.ALIGN_CENTER, 5)
self.chkACMsynct = wx.CheckBox(panel, -1, "")
self.chkACMsynct.SetValue(True)
box.Add(self.chkACMsynct, 0, wx.ALIGN_CENTER, 5)
self.btnACMsynct = wx.Button(panel, -1, "Sync Time")
self.btnACMsynct.Enable(False)
box.Add(self.btnACMsynct, 0, wx.ALIGN_CENTER, 5)
self.txtACMbat = wx.StaticText(panel, wx.ID_ANY, "", size=(32, 16))
self.txtACMbat.SetForegroundColour((255, 55, 0))
box.Add(self.txtACMbat, 1, wx.ALIGN_CENTER|wx.LEFT, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.rbCMP = wx.RadioButton(panel, wx.ID_ANY, "CMP:")
box.Add(self.rbCMP, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 1)
self.txtCMPhost = wx.ComboBox(panel, -1, parser.get('host','CMP'),
choices=HOST_LIST)
box.Add(self.txtCMPhost, 0, wx.ALIGN_CENTER, 5)
self.txtCMPport = wx.ComboBox(panel, -1, "2677",
choices=self.PORT_LIST[:-1], validator=MyValidator(HEX_ONLY))
box.Add(self.txtCMPport, 0, wx.ALIGN_CENTER, 5)
self.chkCMPsynct = wx.CheckBox(panel, -1, "")
self.chkCMPsynct.SetValue(True)
box.Add(self.chkCMPsynct, 0, wx.ALIGN_CENTER, 5)
self.btnCMPsynct = wx.Button(panel, -1, "Sync Time")
self.btnCMPsynct.Enable(False)
box.Add(self.btnCMPsynct, 0, wx.ALIGN_CENTER, 5)
self.txtCMPbat = wx.StaticText(panel, wx.ID_ANY, "", size=(32, 16))
self.txtCMPbat.SetForegroundColour((255, 55, 0))
box.Add(self.txtCMPbat, 1, wx.ALIGN_CENTER|wx.LEFT, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.btnRmtAT = wx.Button(panel, -1, "Send RemoteAT", size=(100, -1))
self.btnRmtAT.Enable(False)
box.Add(self.btnRmtAT, 0, wx.ALIGN_CENTER, 5)
self.txtRmtATcmd = wx.ComboBox(panel, -1, "MY",
choices=AT_CMD,
size=(50, -1))
self.txtRmtATcmd.SetToolTip(wx.ToolTip('''AT Command in TWO characters :
MY - IP Network Address
MK - IP Address Mask
GW - Gateway IP address
SH - Serial Number High
SL - Serial Number Low
DL - Destination Address Low
C0 - source IP port
ID - SSID
AH - Network Type
MA - IP Addressing Mode. 0=DHCP;1=Static
PL - Power Level
BD - baudrate
AI - Association Indication
WR - write to flash
FR - Software Reset
'''))
box.Add(self.txtRmtATcmd, 0, wx.ALIGN_CENTER, 5)
self.txtRmtATpar = wx.TextCtrl(panel, -1, "",
size=(100, -1),
validator=MyValidator(HEX_ONLY))
self.txtRmtATpar.SetToolTip(wx.ToolTip(
'Hexadecimal Parameter for remote AT Command to set.\n'
'If blanked, just get the parameter.'))
box.Add(self.txtRmtATpar, 0, wx.ALIGN_CENTER, 5)
self.txtRmtATopt = wx.TextCtrl(panel, -1, "02",
size=(30, -1),
validator=MyValidator(HEX_ONLY))
self.txtRmtATopt.SetToolTip(
wx.ToolTip('''Bitfield of supported transmission options
Supported values include the following:
0x00 - Disable retries and route repair
0x02 - Apply changes. '''))
box.Add(self.txtRmtATopt, 0, wx.ALIGN_CENTER, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.btnTX = wx.Button(panel, -1, "Send Ping", size=(100, -1))
self.btnTX.Enable(False)
box.Add(self.btnTX, 0, wx.ALIGN_CENTER, 5)
self.txtTX = wx.TextCtrl(panel, -1, "", size=(130, -1))
self.txtTX.SetToolTip(wx.ToolTip(
'Text to be sent\nIf in continoous mode, the sent text will be prefixed with "P" and 5-digital index number.'))
box.Add(self.txtTX, 1, wx.ALIGN_CENTER, 5)
self.txtTXrad = wx.TextCtrl(panel, -1, "01",
size=(30, -1),
validator=MyValidator(HEX_ONLY))
self.txtTXrad.SetToolTip(wx.ToolTip(
'''Sets maximum number of hops a broadcast transmission can occur.
If set to 0, the broadcast radius will be set to the maximum hops value.'''))
box.Add(self.txtTXrad, 0, wx.ALIGN_CENTER, 5)
self.txtTXopt = wx.TextCtrl(panel, -1, "01",
size=(30, -1),
validator=MyValidator(HEX_ONLY))
self.txtTXopt.SetToolTip(wx.ToolTip(
'''Bitfield of supported transmission options. Supported values include the following:
0x01 - Disable retries and route repair
0x20 - Enable APS encryption (if EE=1)
0x40 - Use the extended transmission timeout
Enabling APS encryption presumes the source and destination have been authenticated. I also decreases the maximum number of RF payload bytes by 4 (below the value reported by NP).
The extended transmission timeout is needed when addressing sleeping end devices.It also increases the retry interval between retries to compensate for end device polling.See Chapter 4, Transmission Timeouts, Extended Timeout for a description.
Unused bits must be set to 0. '''))
box.Add(self.txtTXopt, 0, wx.ALIGN_CENTER, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.btnTM = wx.Button(panel, -1, "Servo Command", size=(100, -1))
self.btnTM.Enable(False)
box.Add(self.btnTM, 0, wx.ALIGN_CENTER, 5)
boxV = wx.BoxSizer(wx.VERTICAL)
boxH = wx.BoxSizer(wx.HORIZONTAL)
self.InputType = wx.Choice(panel, wx.ID_ANY,
choices=['Reset','Step','Doublet','3-2-1-1','Ramp',
'pitch rate','open loop','LinFreq Sweep','ExpFreq Sweep'])
self.InputType.SetSelection(0)
boxH.Add(self.InputType, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "StartTime"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.StartTime = wx.TextCtrl(panel, -1, "500",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
self.StartTime.SetToolTip(wx.ToolTip('milliseconds'))
boxH.Add(self.StartTime, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "TimeDelta"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.TimeDelta = wx.TextCtrl(panel, -1, "500",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
self.TimeDelta.SetToolTip(wx.ToolTip('milliseconds'))
boxH.Add(self.TimeDelta, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "NofCycles"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.NofCycles = wx.TextCtrl(panel, -1, "1",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.NofCycles, 0, wx.ALIGN_CENTER, 5)
boxV.Add(boxH, 0, wx.ALIGN_CENTER, 5)
boxH = wx.BoxSizer(wx.HORIZONTAL)
self.Srv2Move1 = wx.CheckBox(panel, -1, "CH1")
boxH.Add(self.Srv2Move1, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "ServoRef"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.ServoRef1 = wx.TextCtrl(panel, -1, "1967",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.ServoRef1, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MaxValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MaxValue1 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MaxValue1, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MinValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MinValue1 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MinValue1, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "Sign"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.Sign1 = wx.TextCtrl(panel, -1, "1",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.Sign1, 0, wx.ALIGN_CENTER, 5)
boxV.Add(boxH, 0, wx.ALIGN_CENTER, 5)
boxH = wx.BoxSizer(wx.HORIZONTAL)
self.Srv2Move2 = wx.CheckBox(panel, -1, "CH2")
boxH.Add(self.Srv2Move2, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "ServoRef"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.ServoRef2 = wx.TextCtrl(panel, -1, "2259",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.ServoRef2, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MaxValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MaxValue2 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MaxValue2, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MinValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MinValue2 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MinValue2, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "Sign"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.Sign2 = wx.TextCtrl(panel, -1, "1",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.Sign2, 0, wx.ALIGN_CENTER, 5)
boxV.Add(boxH, 0, wx.ALIGN_CENTER, 5)
boxH = wx.BoxSizer(wx.HORIZONTAL)
self.Srv2Move3 = wx.CheckBox(panel, -1, "CH3")
boxH.Add(self.Srv2Move3, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "ServoRef"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.ServoRef3 = wx.TextCtrl(panel, -1, "2000",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.ServoRef3, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MaxValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MaxValue3 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MaxValue3, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MinValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MinValue3 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MinValue3, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "Sign"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.Sign3 = wx.TextCtrl(panel, -1, "1",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.Sign3, 0, wx.ALIGN_CENTER, 5)
boxV.Add(boxH, 0, wx.ALIGN_CENTER, 5)
boxH = wx.BoxSizer(wx.HORIZONTAL)
self.Srv2Move4 = wx.CheckBox(panel, -1, "CH4")
boxH.Add(self.Srv2Move4, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "ServoRef"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.ServoRef4 = wx.TextCtrl(panel, -1, "1700",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.ServoRef4, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MaxValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MaxValue4 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MaxValue4, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MinValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MinValue4 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MinValue4, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "Sign"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.Sign4 = wx.TextCtrl(panel, -1, "1",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.Sign4, 0, wx.ALIGN_CENTER, 5)
boxV.Add(boxH, 0, wx.ALIGN_CENTER, 5)
boxH = wx.BoxSizer(wx.HORIZONTAL)
self.Srv2Move5 = wx.CheckBox(panel, -1, "CH5")
boxH.Add(self.Srv2Move5, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "ServoRef"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.ServoRef5 = wx.TextCtrl(panel, -1, "1820",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.ServoRef5, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MaxValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MaxValue5 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MaxValue5, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MinValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MinValue5 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MinValue5, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "Sign"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.Sign5 = wx.TextCtrl(panel, -1, "1",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.Sign5, 0, wx.ALIGN_CENTER, 5)
boxV.Add(boxH, 0, wx.ALIGN_CENTER, 5)
boxH = wx.BoxSizer(wx.HORIZONTAL)
self.Srv2Move6 = wx.CheckBox(panel, -1, "CH6")
boxH.Add(self.Srv2Move6, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "ServoRef"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.ServoRef6 = wx.TextCtrl(panel, -1, "2067",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.ServoRef6, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MaxValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MaxValue6 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MaxValue6, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "MinValue"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.MinValue6 = wx.TextCtrl(panel, -1, "100",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.MinValue6, 0, wx.ALIGN_CENTER, 5)
boxH.Add(wx.StaticText(panel, wx.ID_ANY, "Sign"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.Sign6 = wx.TextCtrl(panel, -1, "1",
size=(50, -1),
validator=MyValidator(DIGIT_ONLY))
boxH.Add(self.Sign6, 0, wx.ALIGN_CENTER, 5)
boxV.Add(boxH, 0, wx.ALIGN_CENTER, 5)
box.Add(boxV, 0, wx.ALIGN_CENTER, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
sub_panel = wx.Panel(panel, -1)
sub_panel.SetDoubleBuffered(True)
sub_sizer = wx.BoxSizer(wx.VERTICAL)
box = wx.BoxSizer(wx.HORIZONTAL)
self.txtRXSta = wx.StaticText(sub_panel, wx.ID_ANY, "")
box.Add(self.txtRXSta, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 1)
sub_sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.txtRX = wx.StaticText(sub_panel, wx.ID_ANY, "", size=(32, 32))
self.txtRX.SetForegroundColour((0, 0, 255))
box.Add(self.txtRX, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 1)
sub_sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.txtRX2 = wx.StaticText(sub_panel, wx.ID_ANY, "", size=(32, 16))
self.txtRX2.SetForegroundColour((255, 55, 0))
box.Add(self.txtRX2, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 1)
sub_sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.txtRX_CMP = wx.StaticText(sub_panel, wx.ID_ANY, "", size=(32, 32))
self.txtRX_CMP.SetForegroundColour((0, 0, 255))
box.Add(self.txtRX_CMP, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 1)
sub_sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.txtRX2_CMP = wx.StaticText(sub_panel, wx.ID_ANY, "", size=(32, 16))
self.txtRX2_CMP.SetForegroundColour((255, 55, 0))
box.Add(self.txtRX2_CMP, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 1)
sub_sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.txtRX_GND = wx.StaticText(sub_panel, wx.ID_ANY, "", size=(32, 16))
self.txtRX_GND.SetForegroundColour((155, 55, 0))
box.Add(self.txtRX_GND, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 1)
sub_sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.txtRX_AIR = wx.StaticText(sub_panel, wx.ID_ANY, "", size=(32, 16))
self.txtRX_AIR.SetForegroundColour((155, 55, 0))
box.Add(self.txtRX_AIR, 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 1)
sub_sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
sub_panel.SetSizer(sub_sizer)
#sub_sizer.Fit(sub_panel)
sizer.Add(sub_panel, 0, wx.ALL | wx.EXPAND, 1)
self.log_txt = wx.TextCtrl(
panel, -1, "",
size=(300, 300),
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH2)
self.log_txt.SetFont(wx.Font(10, wx.FONTFAMILY_TELETYPE,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.INFO)
self.log_handle = logging.StreamHandler(RedirectText(self))
self.log_handle.setFormatter(
logging.Formatter('%(asctime)s:%(message)s'))
self.log.addHandler(self.log_handle)
# redirect stdout to log
sys.stdout = RedirectInfo()
sys.stderr = RedirectError()
sizer.Add(self.log_txt, 1, wx.ALL | wx.EXPAND, 1)
box = wx.BoxSizer(wx.HORIZONTAL)
self.btnClr = wx.Button(panel, -1, "Clear")
box.Add(self.btnClr, 1, wx.ALIGN_CENTER, 5)
self.btnSaveLog = wx.Button(panel, -1, "Save Log")
box.Add(self.btnSaveLog, 1, wx.ALIGN_CENTER, 5)
sizer.Add(box, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 1)
panel.SetSizer(sizer)
sizer.Fit(panel)
self.Bind(wx.EVT_BUTTON, self.OnStart, self.btnStart)
self.Bind(wx.EVT_BUTTON, self.OnRmtAT, self.btnRmtAT)
self.Bind(wx.EVT_BUTTON, self.OnSyncACM, self.btnACMsynct)
self.Bind(wx.EVT_BUTTON, self.OnSyncCMP, self.btnCMPsynct)
self.Bind(wx.EVT_BUTTON, self.OnSyncGND, self.btnGNDsynct)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRecALL, self.btnALLrec)
self.Bind(wx.EVT_BUTTON, self.OnSetBaseTime, self.btnBaseTime)
self.Bind(wx.EVT_BUTTON, self.OnTX, self.btnTX)
self.Bind(wx.EVT_BUTTON, self.OnTestMotor, self.btnTM)
self.Bind(wx.EVT_BUTTON, self.OnClr, self.btnClr)
self.Bind(wx.EVT_BUTTON, self.OnSaveLog, self.btnSaveLog)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(EVT_RSLT1, self.OnRX)
self.Bind(EVT_RSLT2, self.OnRX2)
self.Bind(EVT_RSLT1C, self.OnRX_CMP)
self.Bind(EVT_RSLT2C, self.OnRX2_CMP)
self.Bind(EVT_RSLT1G, self.OnRX_GND)
self.Bind(EVT_RSLT1AIR, self.OnRX_AIR)
self.Bind(EVT_STAT, self.OnRXSta)
self.Bind(EVT_LOG, self.OnLog)
self.Bind(wx.EVT_RADIOBUTTON, self.OnChooseACM, self.rbACM)
self.Bind(wx.EVT_RADIOBUTTON, self.OnChooseCMP, self.rbCMP)
self.Bind(wx.EVT_RADIOBUTTON, self.OnChooseGND, self.rbGND)
self.fileALL = None
self.butt = [Butter()]*4
def OnRecALL(self, event) :
if event.IsChecked():
self.filename = time.strftime('{}%Y%m%d%H%M%S.dat'.format(
self.txtRecName.GetValue()))
self.fileALL = open(self.filename, 'wb')
self.log.info('Recording to {}.'.format(self.filename))
else:
self.fileALL.close()
self.log.info('Stop Recording to {}.'.format(self.filename))
self.fileALL = None
def OnLog(self, event) :
self.log_txt.AppendText(event.log)
def OnSetBaseTime(self, event) :
self.ntp_tick0 = time.clock()
self.log.info('Set Local T0')
def OnSyncGND(self, event) :
self.rbGND.SetValue(True)
self.target = 'GND'
code = 0 if self.rbGND.GetValue() else 5
self.btnALLrec.Enable(True)
if not hasattr(self, 'ntp_tick0') :
self.OnSetBaseTime(None)
self.ntp_T0 = int((time.clock() - self.ntp_tick0)*1e6)
self.send(self.packNTP.pack(ord('S'),code,self.ntp_T0))
self.log.info('Local T0={}us'.format(self.ntp_T0))
def OnSyncACM(self, event) :
self.rbACM.SetValue(True)
self.target = 'ACM'
code = 0 if self.rbACM.GetValue() else 5
self.btnALLrec.Enable(True)
if not hasattr(self, 'ntp_tick0') :
self.OnSetBaseTime(None)
self.ntp_T0 = int((time.clock() - self.ntp_tick0)*1e6)
self.send(self.packNTP.pack(ord('S'),code,self.ntp_T0))
self.log.info('Local T0={}us'.format(self.ntp_T0))
def OnSyncCMP(self, event) :
self.rbCMP.SetValue(True)
self.target = 'CMP'
code = 0 if self.rbCMP.GetValue() else 5
self.btnALLrec.Enable(True)
if not hasattr(self, 'ntp_tick0') :
self.OnSetBaseTime(None)
self.ntp_T0 = int((time.clock() - self.ntp_tick0)*1e6)
self.send(self.packNTP.pack(ord('S'),code,self.ntp_T0))
self.log.info('Local T0={}us'.format(self.ntp_T0))
def OnRX(self, event) :
self.txtRX.SetLabel(event.txt)
def OnRX2(self, event) :
self.txtRX2.SetLabel(event.txt)
def OnRX_AIR(self, event) :
self.txtRX_AIR.SetLabel(event.txt)
def OnRX_GND(self, event) :
self.txtRX_GND.SetLabel(event.txt)
def OnRX_CMP(self, event) :
self.txtRX_CMP.SetLabel(event.txt)
def OnRX2_CMP(self, event) :
self.txtRX2_CMP.SetLabel(event.txt)
def OnRXSta(self, event) :
self.txtRXSta.SetLabel(event.txt)
def OnChooseACM(self, event):
self.target = 'ACM'
self.log.info('Target {}'.format(self.target))
def OnChooseCMP(self, event):
self.target = 'CMP'
self.log.info('Target {}'.format(self.target))
def OnChooseGND(self, event):
self.target = 'GND'
self.log.info('Target {}'.format(self.target))
def OnSaveLog(self, event):
dlg = wx.FileDialog(
self, message="Save log as ...", defaultDir=os.getcwd(),
defaultFile="log.txt", wildcard="Text file(*.txt)|*.txt",
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
self.log_txt.SaveFile(dlg.GetPath())
def OnClr(self, event):
self.log_txt.Clear()
self.txtRXSta.SetLabel('')
self.txtRX.SetLabel('')
self.txtRX2.SetLabel('')
self.txtRX_AIR.SetLabel('')
self.txtRX_GND.SetLabel('')
self.txtRX_CMP.SetLabel('')
self.txtRX2_CMP.SetLabel('')
self.txtACMbat.SetLabel('')
self.txtCMPbat.SetLabel('')
self.txtGNDinfo.SetLabel('')
self.first_cnt = True
self.arrv_cnt = 0
self.arrv_cnt_22 = 0
self.arrv_cnt_33 = 0
self.arrv_cnt_44 = 0
self.last_arrv_cnt = 0
self.arrv_bcnt = 0
self.periodic_count = 0
def OnClose(self, event):
self.log.info("clean_up" + traceback.format_exc())
try:
self.halting = True
time.sleep(0.2)
except:
pass
parser = self.parser
parser.set('host','AP', self.txtHost.GetValue())
parser.set('host','GND', self.txtGNDhost.GetValue())
parser.set('host','ACM', self.txtACMhost.GetValue())
parser.set('host','CMP', self.txtCMPhost.GetValue())
parser.set('rec','prefix', self.txtRecName.GetValue())
cfg = open('config.ini', 'w')
parser.write(cfg)
cfg.close()
if self.fileALL:
self.fileALL.close()
self.log.removeHandler(self.log_handle)
event.Skip()
def OnRmtAT(self, event):
try:
if self.target == 'ACM' :
remote_host = self.txtACMhost.GetValue().encode()
elif self.target == 'CMP' :
remote_host = self.txtCMPhost.GetValue().encode()
else :
remote_host = self.txtGNDhost.GetValue().encode()
options = self.txtRmtATopt.GetValue().encode()[:2].decode('hex')[0]
command = self.txtRmtATcmd.GetValue().encode()[:2]
parameter = self.txtRmtATpar.GetValue().encode()
if len(parameter) == 0:
parameter = None
self.log.info('get AT ' + command + ' from ' + remote_host +
' with option {:02x}'.format(ord(options)))
else:
if len(parameter) % 2 == 1:
parameter = '0' + parameter
parameter = parameter.decode('hex')
self.log.debug('send AT ' + command + '=' + ':'.join(
'{:02x}'.format(ord(c)) for c in parameter) + ' to '
+ remote_host + ' with option {:02x}'.format(ord(options)))
self.frame_id = self.getFrameId()
self.service.sendConfigCommand(remote_host, command, parameter,
frame_id=self.frame_id, options=options)
except:
traceback.print_exc()
def OnTestMotor(self, event):
InputType = self.InputType.GetSelection()+1
if InputType == 1:
ServoRef = [int(self.ServoRef1.GetValue()),
int(self.ServoRef2.GetValue()),
int(self.ServoRef3.GetValue()),
int(self.ServoRef4.GetValue()),
int(self.ServoRef5.GetValue()),
int(self.ServoRef6.GetValue()) ]
data = struct.pack('>2B6H', 0xA5, InputType, *ServoRef)
else :
Srv2Move = (1 if self.Srv2Move1.GetValue() else 0) \
| (2 if self.Srv2Move2.GetValue() else 0) \
| (4 if self.Srv2Move3.GetValue() else 0) \
| (8 if self.Srv2Move4.GetValue() else 0) \
| (16 if self.Srv2Move5.GetValue() else 0) \
| (32 if self.Srv2Move6.GetValue() else 0)
others = [ int(self.MaxValue1.GetValue()),
int(self.MaxValue2.GetValue()),
int(self.MaxValue3.GetValue()),
int(self.MaxValue4.GetValue()),
int(self.MaxValue5.GetValue()),
int(self.MaxValue6.GetValue()),
int(self.MinValue1.GetValue()),
int(self.MinValue2.GetValue()),
int(self.MinValue3.GetValue()),
int(self.MinValue4.GetValue()),
int(self.MinValue5.GetValue()),
int(self.MinValue6.GetValue()),
int(self.Sign1.GetValue()),
int(self.Sign2.GetValue()),
int(self.Sign3.GetValue()),
int(self.Sign4.GetValue()),
int(self.Sign5.GetValue()),
int(self.Sign6.GetValue()),
]
starttime = int(self.StartTime.GetValue())
deltatime = int(self.TimeDelta.GetValue())
nofcyc = int(self.NofCycles.GetValue())
data = struct.pack('>3B2HB6B6B6B', 0xA5, InputType, Srv2Move,
starttime, deltatime, nofcyc, *others)
if InputType == 1 :
self.OutputCnt = starttime*nofcyc/20+3+10
elif InputType == 2 :
self.OutputCnt = (starttime+deltatime)*nofcyc/20+3+10
elif InputType == 7 :
self.OutputCnt = starttime*nofcyc/20+3+10
self.OutputSrv2Move = Srv2Move
txt = '#Time,'
if self.OutputSrv2Move & 1 :
txt += 'Servo1,Ctrl1,'
if self.OutputSrv2Move & 2 :
txt += 'Servo2,Ctrl2,'
if self.OutputSrv2Move & 4 :
txt += 'Servo3,Ctrl3,'
if self.OutputSrv2Move & 8 :
txt += 'Servo4,Ctrl4,'
if self.OutputSrv2Move & 16 :
txt += 'Servo5,Ctrl5,'
if self.OutputSrv2Move & 32 :
txt += 'Servo6,Ctrl6,'
self.log.info(txt)
self.send(data)
def OnTX(self, event):
data = self.txtTX.GetValue().encode()
self.send('P\x00'+data)
self.ping_tick = time.clock()
def send(self, data):
try:
if data:
if self.target == 'ACM' :
remote_host = self.txtACMhost.GetValue().encode()
remote_port = self.port_struct.unpack(
self.txtACMport.GetValue().decode('hex'))[0]
elif self.target == 'CMP' :
remote_host = self.txtCMPhost.GetValue().encode()
remote_port = self.port_struct.unpack(
self.txtCMPport.GetValue().decode('hex'))[0]
else :
remote_host = self.txtGNDhost.GetValue().encode()
remote_port = self.port_struct.unpack(
self.txtGNDport.GetValue().decode('hex'))[0]
self.tx_socket.sendto(pp.pack(data), (remote_host, remote_port))
self.tick = time.clock()
except:
traceback.print_exc()
return False
else:
return True
def getFrameId(self):
fid = self.frame_id
self.frame_id += 1
if self.frame_id > 255:
self.frame_id = 1
return fid
def OnStart(self, event):
self.btnStart.Enable(False)
self.txtHost.Enable(False)
self.starting = True
self.frame_id = 0
self.first_cnt = True
self.arrv_cnt = 0
self.arrv_cnt_22 = 0
self.arrv_cnt_44 = 0
self.arrv_cnt_33 = 0
self.last_arrv_cnt = 0
self.arrv_bcnt = 0
self.periodic_count = 0
self.frame_id = 1
self.port_struct = struct.Struct("!H")
self.pack22 = struct.Struct(">B6H3H6HI6h")
self.pack77 = struct.Struct(">B3HI")
self.pack78 = struct.Struct(">B3HI")
self.packAA = struct.Struct(">BHI")
self.pack88 = struct.Struct(">B3BI")
self.pack33 = struct.Struct(">B4H4HI4h")
self.pack44 = struct.Struct(">B4HI")
self.packNTP = struct.Struct(">2BI")
self.packNTP1 = struct.Struct(">2I")
self.packNTP2 = struct.Struct(">2B2I")
self.packNTP3 = struct.Struct(">IhiI")
self.packNTP13 = struct.Struct(">bhi")
self.packHdr = struct.Struct(">BH")
self.packT = struct.Struct(">I")
self.packMeter = struct.Struct(">B2f")
self.ch = 0
self.test_motor_ticks = 0
self.starting = False
self.OutputSrv2Move = 0
self.OutputCnt = 0
self.btnRmtAT.Enable(True)
self.btnACMsynct.Enable(True)
self.btnCMPsynct.Enable(True)
self.btnGNDsynct.Enable(True)
self.btnBaseTime.Enable(True)
self.btnTX.Enable(True)
self.btnTM.Enable(True)
self.halting = False
try:
host = self.txtHost.GetValue().encode()
self.service = XBeeIPServices.XBeeApplicationService(host)
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
print '{} started on {}'.format(self.thread.name, host)
except:
self.log.error(traceback.format_exc())
all_ports = [(self.port_struct.unpack(i.decode('hex'))[0],i)
for i in self.PORT_LIST]
for i,port_name in all_ports :
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tx_socket = sock
sock.bind((host,i))
sock.settimeout(0.1)
thread = threading.Thread(target=self.monitor, args=(sock, port_name))
thread.daemon = True
thread.start()
print '{} started, listening on {}'.format(thread.name,
sock.getsockname())
except:
self.log.error(traceback.format_exc())
def monitor(self, sock, port_name):
while not self.halting:
try:
(rf_data,address)=sock.recvfrom(1500)
data = {'id':'rx', 'source_addr':address, 'rf_data':rf_data}
self.process(data)
except socket.timeout :
pass
def run(self):
while not self.halting:
data = self.service.getPacket()
if data :
self.process(data)
def updateStatistics(self, bcnt):
if self.first_cnt:
self.first_cnt = False
self.start_cnt = time.clock()
else:
self.arrv_cnt += 1
self.arrv_bcnt += bcnt
elapsed = time.clock() - self.start_cnt
if (self.arrv_cnt % 100) == 0 :
wx.PostEvent(self, RxStaEvent(txt=
'C{:0>5d}/T{:<.2f} {:03.0f}Pps/{:05.0f}bps'.format(
self.arrv_cnt, elapsed, self.arrv_cnt / elapsed,
self.arrv_bcnt * 10 / elapsed)))
def process(self, data) :
if data['id'] == 'rx':
try:
addr = data['source_addr']
data_group = data['rf_data']
self.updateStatistics(len(data_group))
'''
self.log.info( 'rf_data:{}'.format(
':'.join('{:02x}'.format(ord(c)) for c in data_group)))
'''
rf_data_group = pp.unpack(data_group)
for rf_data in rf_data_group :
if rf_data[0] == 'S':
if rf_data[1] == '\x01' :
T2 = int((time.clock() -
self.ntp_tick0)*1e6)
rslt = self.packNTP1.unpack(rf_data[2:])
T0 = rslt[0]
T1 = rslt[1]
T3 = int((time.clock() -
self.ntp_tick0)*1e6)
self.send(self.packNTP2.pack(ord('S'),2,T2,T3))
time.sleep(0.01)
delay = (T2-self.ntp_T0)-(T1-T0)
offset = ((T0-self.ntp_T0)+(T1-T2))/2
self.log.info(('Remote Time0={}us\n'
'Remote Time1={}us\n'
'Local Time2={}us\nLocal Time3={}us\n'
'Delay={}us, Offset={}us'
).format(T0,T1,T2,T3,delay,offset))
elif rf_data[1] == '\x03' :
T6 = int((time.clock() -
self.ntp_tick0)*1e6)
rslt = self.packNTP3.unpack(rf_data[2:])
T4 = rslt[0]
self.log.info('Remote Time4={}us'.format(T4))
delay = rslt[1]
offset = rslt[2]
self.log.info('Delay={}us, Offset={}us'.format(delay,offset))
T5 = rslt[3]
self.log.info('Remote Time={}us, Local Time={}us'.format(T5,T6))
elif rf_data[1] == '\x13' :
rslt = self.packNTP13.unpack(rf_data[2:])
target = rslt[0]
delay = rslt[1]
offset = rslt[2]
self.log.info('{} Delay={}us, Offset={}us'.format(
{ord('A'):'ACM',ord('C'):'CMP'}[target],
delay,offset))
elif rf_data[0] == 'P':
deltaT = (time.clock() - self.ping_tick)*1000
if rf_data[1] == '\x01':
self.log.info('Ping back {} in {:.1f}ms, from GND {}'.format(
rf_data[2:], deltaT, addr))
elif rf_data[1] == '\x02':
self.log.info('Ping back {} in {:.1f}ms, from ACM {}'.format(
rf_data[2:], deltaT, addr))
elif rf_data[1] == '\x03':
self.log.info('Ping back {} in {:.1f}ms, from CMP {}'.format(
rf_data[2:], deltaT, addr))
elif rf_data[0] == '\x22':
if self.fileALL:
self.fileALL.write(self.packHdr.pack(0x7e,
len(rf_data)))
self.fileALL.write(rf_data)
if self.OutputCnt > 0 or \
self.arrv_cnt_22 > 10 :
rslt = self.pack22.unpack(rf_data)
T = rslt[16]*1e-6
GX = Get14bit(rslt[10])*0.05
GY = Get14bit(rslt[11])*-0.05
GZ = Get14bit(rslt[12])*-0.05
AX = Get14bit(rslt[13])*-0.003333
AY = Get14bit(rslt[14])*0.003333
AZ = Get14bit(rslt[15])*0.003333
if self.OutputCnt > 0 :
self.OutputCnt -= 1
txt = '{:.2f},'.format(T)
if self.OutputSrv2Move & 1 :
txt += '{},{},'.format(rslt[1], rslt[17])
if self.OutputSrv2Move & 2 :
txt += '{},{},'.format(rslt[2], rslt[18])
if self.OutputSrv2Move & 4 :
txt += '{},{},'.format(rslt[3], rslt[19])
if self.OutputSrv2Move & 8 :
txt += '{},{},'.format(rslt[4], rslt[20])
if self.OutputSrv2Move & 16 :
txt += '{},{},'.format(rslt[5], rslt[21])
if self.OutputSrv2Move & 32 :
txt += '{},{},'.format(rslt[6], rslt[22])
self.log.info(txt)
if self.arrv_cnt_22 > 10 :
self.arrv_cnt_22 = 0
txt = ('T{0:08.3f} SenPack '
'1S{1:04d}/{16:+04d} 2S{2:04d}/{17:+04d} '
'3S{3:04d}/{18:+04d} 4S{4:04d}/{19:+04d} '
'5S{5:04d}/{20:+04d} 6S{6:04d}/{21:+04d}\n'
'1E{7:04d} 2E{8:04d} 3E{9:04d} '
'GX{10:6.1f} GY{11:6.1f} GZ{12:6.1f} '
'AX{13:6.2f} AY{14:6.2f} AZ{15:6.2f} ').format(T,
rslt[1],rslt[2],rslt[3],
rslt[4],rslt[5],rslt[6],
rslt[7],rslt[8],rslt[9],
GX,GY,GZ, AX,AY,AZ,
rslt[17],rslt[18],rslt[19],rslt[20],
rslt[21],rslt[22])
wx.PostEvent(self, RxEvent(txt=txt))
self.log.debug(txt)
else:
self.arrv_cnt_22 += 1
elif rf_data[0] == '\x33':
if self.fileALL:
self.fileALL.write(self.packHdr.pack(0x7e,
len(rf_data)))
self.fileALL.write(rf_data)
if self.OutputCnt > 0 or \
self.arrv_cnt_33 > 10:
rslt = self.pack33.unpack(rf_data)
T = rslt[9]*1e-6
if self.OutputCnt > 0 :
self.OutputCnt -= 1
txt = '{:.2f},'.format(T)
if self.OutputSrv2Move & 1 :
txt += '{},{},'.format(rslt[1], rslt[10])
if self.OutputSrv2Move & 2 :
txt += '{},{},'.format(rslt[2], rslt[11])
if self.OutputSrv2Move & 4 :
txt += '{},{},'.format(rslt[3], rslt[12])
if self.OutputSrv2Move & 8 :
txt += '{},{},'.format(rslt[4], rslt[13])
self.log.info(txt)
if self.arrv_cnt_33 > 10:
self.arrv_cnt_33 = 0
self.last_arrv_cnt = self.arrv_cnt
txt = ('T{0:08.2f} SenPack '
'1S{1:04d}/{9:+04d} 2S{2:04d}/{10:+04d} '
'3S{3:04d}/{11:+04d} 4S{4:04d}/{12:+04d}\n'
'1E{5:04d} 2E{6:04d} 3E{7:04d} 4E{8:04d} '
).format(T, rslt[1],rslt[2],rslt[3], rslt[4],
rslt[5],rslt[6], rslt[7],rslt[8],
rslt[10],rslt[11],rslt[12],rslt[13])
wx.PostEvent(self, RxCmpEvent(txt=txt))
self.log.debug(txt)
else:
self.arrv_cnt_33 += 1
elif rf_data[0] == '\xA5':
#rslt = self.packAA.unpack(rf_data)
if self.fileALL:
self.fileALL.write(self.packHdr.pack(0x7e,
len(rf_data)))
self.fileALL.write(rf_data)
txt = ('A5 cmd')
self.txtGNDinfo.SetLabel(txt)
elif rf_data[0] == '\xA6':
#rslt = self.packAA.unpack(rf_data)
if self.fileALL:
self.fileALL.write(self.packHdr.pack(0x7e,
len(rf_data)))
self.fileALL.write(rf_data)
txt = ('A6 cmd')
self.txtGNDinfo.SetLabel(txt)
elif rf_data[0] == '\x77':
rslt = self.pack77.unpack(rf_data)
T = rslt[4]*1e-6
txt = ('T{0:08.3f} ACM CommStat senTask{2:d}us svoTask{3:d}us '
'msgTask{4:d}us').format(T,*rslt)
self.log.debug(txt)
wx.PostEvent(self, Rx2Event(txt=txt))
elif rf_data[0] == '\x78' :
rslt = self.pack78.unpack(rf_data)
T = rslt[4]*1e-6
txt = ('T{0:08.3f} CMP CommStat senTask{2:d}us svoTask{3:d}us '
'msgTask{4:d}us').format(T,*rslt)
self.log.debug(txt)
wx.PostEvent(self, Rx2CmpEvent(txt=txt))
elif rf_data[0] == '\xAA':
rslt = self.packAA.unpack(rf_data)
txt = ('msgTask{1:d}').format(*rslt)
self.txtGNDinfo.SetLabel(txt)
elif rf_data[0] == '\x88':
rslt = self.pack88.unpack(rf_data)
B1 = rslt[1]*1.294e-2*1.515
B2 = rslt[2]*1.294e-2*3.0606
B3 = rslt[3]*1.294e-2*4.6363
B2 -= B1
if B2 < 0 :
B2 = 0
B2 =0 #TODO
B3 -= B1+B2
if B3 < 0 :
B3 = 0
txt = '{:.2f}V {:.2f}V {:.2f}V'.format(B1,B2,B3)
self.txtACMbat.SetLabel(txt)
elif rf_data[0] == '\x99':
rslt = self.pack88.unpack(rf_data)
B1 = rslt[1]*1.294e-2*1.515
B2 = rslt[2]*1.294e-2*3.0606
B3 = rslt[3]*1.294e-2*4.6363
B2 -= B1
if B2 < 0 :
B2 = 0
B2 =0 #TODO
B3 -= B1+B2
if B3 < 0 :
B3 = 0
txt = '{:.2f}V {:.2f}V {:.2f}V'.format(B1,B2,B3)
self.txtCMPbat.SetLabel(txt)
elif rf_data[0] == '\x44':
if self.fileALL:
self.fileALL.write(self.packHdr.pack(0x7e,
len(rf_data)))
self.fileALL.write(rf_data)
if self.arrv_cnt_44 > 10:
self.arrv_cnt_44 = 0
rslt = self.pack44.unpack(rf_data)
T = rslt[5]*1e-6
#ADC = [self.butt[i].update(rslt[1+i]) for i in range(4)]
ADC = [rslt[1+i] for i in range(4)]
txt = ('RIG {:.3f}s ADC:{:04.0f}|{:04.0f}'
'|{:04.0f}|{:04.0f}(unfilted)').format(T,*ADC)
wx.PostEvent(self, RxGndEvent(txt=txt))
else:
self.arrv_cnt_44 += 1
elif rf_data[0] == 'T':
try:
txt = rf_data[5:-2]
items = txt.split(',')
vel = float(items[1])
dp = float(items[4])
T = self.packT.unpack(rf_data[1:5])[0]*1e-6
if self.fileALL:
data = self.packMeter.pack(ord('A'),vel,dp)
self.fileALL.write(self.packHdr.pack(0x7e,
len(data)))
self.fileALL.write(data)
txt = 'Get Vel{:.2f}m/s {:.1f}pa from {} at {:.3f}'.format(
vel, dp, addr, T)
wx.PostEvent(self, RxAirEvent(txt=txt))
except :
traceback.print_exc()
else:
self.log.debug('Get {} from {}'.format(
rf_data.__repr__(), addr))
except:
traceback.print_exc()
self.log.error(repr(data))
elif data['id'] == 'tx_status':
try:
if self.use_ZB :
del_sta = ord(data['deliver_status'])
dis_sta = ord(data['discover_status'])
retries = ord(data['retries'])
if self.frame_id != ord(data['frame_id']):
self.log.error("TXResponse frame_id mismatch"
"{}!={}".format(self.frame_id,
ord(data['frame_id'])))
addr = data['dest_addr']
del_sta = delivery_status[del_sta]
dis_sta = discovery_status[dis_sta]
self.log.info(
'TXResponse:{} to {} ({:d} retries,{})'.format(
del_sta, ':'.join('{:02x}'.format(ord(c))
for c in addr), retries,
dis_sta))
else :
tx_sta = ord(data['status'])
if self.frame_id != ord(data['frame_id']):
self.log.error("TXResponse frame_id mismatch"
"{}!={}".format(self.frame_id,
ord(data['frame_id'])))
tx_sta = tx_status[tx_sta]
self.log.info( 'TXResponse:{}'.format(tx_sta))
except:
traceback.print_exc()
self.log.error(repr(data))
elif data['id'] == 'remote_at_response':
try:
s = data['status']
addr = data['source_addr']
parameter = data['parameter']
if self.frame_id != data['frame_id']:
self.log.error("Remote ATResponse frame_id mismatch")
self.log.info('ATResponse:{} {}={} from {}'.format(
at_status[s], data['command'],
':'.join('{:02x}'.format(ord(c)) for c in parameter),
addr))
except:
traceback.print_exc()
self.log.error(repr(data))
else:
self.log.info(repr(data))
if __name__ == '__main__':
app = wx.App(False)
frame = MyFrame(None, wx.ID_ANY, 'Monitor Station', size=(650, 800))
frame.Show(True)
app.MainLoop()
| matthewzhenggong/fiwt | XbeeZBS2Test/CommandWiFi.py | Python | lgpl-3.0 | 60,244 | 0.005577 |
"""
External serialization for testing remote module loading.
"""
from tiddlyweb.serializations import SerializationInterface
class Serialization(SerializationInterface):
def list_recipes(self, recipes):
print recipes
def list_bags(self, bags):
print bags
def recipe_as(self, recipe):
print "r_as: %s" % recipe
def as_recipe(self, recipe, input):
print "as_r: %s" % input
def bag_as(self, bag):
print "b_as: %s" % bag
def as_bag(self, bag, input):
print "as_b: %s" % input
def tiddler_as(self, tiddler):
print "t_as: %s" % tiddler
def as_tiddler(self, tiddler, input):
print "as_t: %s" % input
| funkyeah/tiddlyweb | test/other/tiddlyweb/serializations/debug.py | Python | bsd-3-clause | 701 | 0.001427 |
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslotest import base as oslo_test
from sqlalchemy import Column
from sqlalchemy import Integer, String
from sqlalchemy.ext.declarative import declarative_base
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import test_base
BASE = declarative_base()
class ModelBaseTest(test_base.DbTestCase):
def setUp(self):
super(ModelBaseTest, self).setUp()
self.mb = models.ModelBase()
self.ekm = ExtraKeysModel()
def test_modelbase_has_dict_methods(self):
dict_methods = ('__getitem__',
'__setitem__',
'__contains__',
'get',
'update',
'save',
'items',
'iteritems',
'keys')
for method in dict_methods:
self.assertTrue(hasattr(models.ModelBase, method),
"Method %s() is not found" % method)
def test_modelbase_is_iterable(self):
self.assertTrue(issubclass(models.ModelBase, collections.Iterable))
def test_modelbase_set(self):
self.mb['world'] = 'hello'
self.assertEqual(self.mb['world'], 'hello')
def test_modelbase_update(self):
h = {'a': '1', 'b': '2'}
self.mb.update(h)
for key in h.keys():
self.assertEqual(self.mb[key], h[key])
def test_modelbase_contains(self):
mb = models.ModelBase()
h = {'a': '1', 'b': '2'}
mb.update(h)
for key in h.keys():
# Test 'in' syntax (instead of using .assertIn)
self.assertTrue(key in mb)
self.assertFalse('non-existent-key' in mb)
def test_modelbase_contains_exc(self):
class ErrorModel(models.ModelBase):
@property
def bug(self):
raise ValueError
model = ErrorModel()
model.update({'attr': 5})
self.assertTrue('attr' in model)
self.assertRaises(ValueError, lambda: 'bug' in model)
def test_modelbase_items_iteritems(self):
h = {'a': '1', 'b': '2'}
expected = {
'id': None,
'smth': None,
'name': 'NAME',
'a': '1',
'b': '2',
}
self.ekm.update(h)
self.assertEqual(dict(self.ekm.items()), expected)
self.assertEqual(dict(self.ekm.iteritems()), expected)
def test_modelbase_dict(self):
h = {'a': '1', 'b': '2'}
expected = {
'id': None,
'smth': None,
'name': 'NAME',
'a': '1',
'b': '2',
}
self.ekm.update(h)
self.assertEqual(dict(self.ekm), expected)
def test_modelbase_iter(self):
expected = {
'id': None,
'smth': None,
'name': 'NAME',
}
i = iter(self.ekm)
found_items = 0
while True:
r = next(i, None)
if r is None:
break
self.assertEqual(expected[r[0]], r[1])
found_items += 1
self.assertEqual(len(expected), found_items)
def test_modelbase_keys(self):
self.assertEqual(set(self.ekm.keys()),
set(('id', 'smth', 'name')))
self.ekm.update({'a': '1', 'b': '2'})
self.assertEqual(set(self.ekm.keys()),
set(('a', 'b', 'id', 'smth', 'name')))
def test_modelbase_several_iters(self):
mb = ExtraKeysModel()
it1 = iter(mb)
it2 = iter(mb)
self.assertFalse(it1 is it2)
self.assertEqual(dict(it1), dict(mb))
self.assertEqual(dict(it2), dict(mb))
def test_extra_keys_empty(self):
"""Test verifies that by default extra_keys return empty list."""
self.assertEqual(self.mb._extra_keys, [])
def test_extra_keys_defined(self):
"""Property _extra_keys will return list with attributes names."""
self.assertEqual(self.ekm._extra_keys, ['name'])
def test_model_with_extra_keys(self):
data = dict(self.ekm)
self.assertEqual(data, {'smth': None,
'id': None,
'name': 'NAME'})
class ExtraKeysModel(BASE, models.ModelBase):
__tablename__ = 'test_model'
id = Column(Integer, primary_key=True)
smth = Column(String(255))
@property
def name(self):
return 'NAME'
@property
def _extra_keys(self):
return ['name']
class TimestampMixinTest(oslo_test.BaseTestCase):
def test_timestampmixin_attr(self):
methods = ('created_at',
'updated_at')
for method in methods:
self.assertTrue(hasattr(models.TimestampMixin, method),
"Method %s() is not found" % method)
| magic0704/oslo.db | oslo_db/tests/sqlalchemy/test_models.py | Python | apache-2.0 | 5,509 | 0 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(NavCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
| navcoindev/navcoin-core | qa/rpc-tests/walletbackup.py | Python | mit | 7,262 | 0.001515 |
import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from tensorflow.python.ops import math_ops
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class CategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return CategoricalPd
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class MultiCategoricalPdType(PdType):
def __init__(self, low, high):
self.low = low
self.high = high
self.ncats = high - low + 1
def pdclass(self):
return MultiCategoricalPd
def pdfromflat(self, flat):
return MultiCategoricalPd(self.low, self.high, flat)
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [len(self.ncats)]
def sample_dtype(self):
return tf.int32
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return BernoulliPd
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=-1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return U.argmax(self.logits, axis=-1)
def neglogp(self, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
# Note: we can't use sparse_softmax_cross_entropy_with_logits because
# the implementation does not allow second-order derivatives...
one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
return tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits,
labels=one_hot_actions)
def kl(self, other):
a0 = self.logits - U.max(self.logits, axis=-1, keepdims=True)
a1 = other.logits - U.max(other.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = U.sum(ea0, axis=-1, keepdims=True)
z1 = U.sum(ea1, axis=-1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1)
def entropy(self):
a0 = self.logits - U.max(self.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = U.sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (tf.log(z0) - a0), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits))
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalPd(Pd):
def __init__(self, low, high, flat):
self.flat = flat
self.low = tf.constant(low, dtype=tf.int32)
self.categoricals = list(map(CategoricalPd, tf.split(flat, high - low + 1, axis=len(flat.get_shape()) - 1)))
def flatparam(self):
return self.flat
def mode(self):
return self.low + tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32)
def neglogp(self, x):
return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x - self.low, axis=len(x.get_shape()) - 1))])
def kl(self, other):
return tf.add_n([
p.kl(q) for p, q in zip(self.categoricals, other.categoricals)
])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return self.low + tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32)
@classmethod
def fromflat(cls, flat):
raise NotImplementedError
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * U.sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ U.sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return U.sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return U.sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliPd(Pd):
def __init__(self, logits):
self.logits = logits
self.ps = tf.sigmoid(logits)
def flatparam(self):
return self.logits
def mode(self):
return tf.round(self.ps)
def neglogp(self, x):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=-1)
def kl(self, other):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def entropy(self):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.less(u, self.ps))
@classmethod
def fromflat(cls, flat):
return cls(flat)
def make_pdtype(ac_space):
from gym import spaces
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1
return DiagGaussianPdType(ac_space.shape[0])
elif isinstance(ac_space, spaces.Discrete):
return CategoricalPdType(ac_space.n)
elif isinstance(ac_space, spaces.MultiDiscrete):
return MultiCategoricalPdType(ac_space.low, ac_space.high)
elif isinstance(ac_space, spaces.MultiBinary):
return BernoulliPdType(ac_space.n)
else:
raise NotImplementedError
def shape_el(v, i):
maybe = v.get_shape()[i]
if maybe is not None:
return maybe
else:
return tf.shape(v)[i]
@U.in_session
def test_probtypes():
np.random.seed(0)
pdparam_diag_gauss = np.array([-.2, .3, .4, -.5, .1, -.5, .1, 0.8])
diag_gauss = DiagGaussianPdType(pdparam_diag_gauss.size // 2) #pylint: disable=E1101
validate_probtype(diag_gauss, pdparam_diag_gauss)
pdparam_categorical = np.array([-.2, .3, .5])
categorical = CategoricalPdType(pdparam_categorical.size) #pylint: disable=E1101
validate_probtype(categorical, pdparam_categorical)
pdparam_bernoulli = np.array([-.2, .3, .5])
bernoulli = BernoulliPdType(pdparam_bernoulli.size) #pylint: disable=E1101
validate_probtype(bernoulli, pdparam_bernoulli)
def validate_probtype(probtype, pdparam):
N = 100000
# Check to see if mean negative log likelihood == differential entropy
Mval = np.repeat(pdparam[None, :], N, axis=0)
M = probtype.param_placeholder([N])
X = probtype.sample_placeholder([N])
pd = probtype.pdclass()(M)
calcloglik = U.function([X, M], pd.logp(X))
calcent = U.function([M], pd.entropy())
Xval = U.eval(pd.sample(), feed_dict={M:Mval})
logliks = calcloglik(Xval, Mval)
entval_ll = - logliks.mean() #pylint: disable=E1101
entval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
entval = calcent(Mval).mean() #pylint: disable=E1101
assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas
# Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
M2 = probtype.param_placeholder([N])
pd2 = probtype.pdclass()(M2)
q = pdparam + np.random.randn(pdparam.size) * 0.1
Mval2 = np.repeat(q[None, :], N, axis=0)
calckl = U.function([M, M2], pd.kl(pd2))
klval = calckl(Mval, Mval2).mean() #pylint: disable=E1101
logliks = calcloglik(Xval, Mval2)
klval_ll = - entval - logliks.mean() #pylint: disable=E1101
klval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas
| brain-research/mirage-rl-bpttv | baselines/common/distributions.py | Python | mit | 10,920 | 0.009707 |
#!/usr/bin/env python3
# This file is part of CFDNSUpdater.
#
# CFDNSUpdater is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CFDNSUpdater is distributed in the hope that it will be useful,
# but WITHbuild ANY WARRANTY; withbuild even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CFDNSUpdater. If not, see <http://www.gnu.org/licenses/>.
import json
import argparse
import subprocess
import sys
import os
import uuid
import time
import shutil
from subprocess import Popen, PIPE
from errno import ENAMETOOLONG
def parseArguments(raw_args):
parser = argparse.ArgumentParser(prog="build",
description='Project Builder')
rootSubparsers = parser.add_subparsers(dest="function")
buildParser = rootSubparsers.add_parser('build', help='Build packages')
buildParser.add_argument('-project', '-p', required=True,
help="Github project", type=str)
buildParser.add_argument('-arch', '-a', required=True,
help='Architecture to build', type=str)
buildParser.add_argument('--branch', '-b', help='Git branch to build',
default="master", type=str)
buildParser.add_argument('-binname', '-bn', required=True,
help='binname', type=str)
buildParser.add_argument('--outputdirpath', '-o', help='Output directory',
required=True, type=str)
deployDescParser = rootSubparsers.add_parser('deploydesc',
help='Create deployement \
descriptor')
deployDescParser.add_argument('--branch', '-b', help='Git branch to build',
required=True, type=str)
deployDescParser.add_argument('-binname', '-bn', required=True,
help='binname', type=str)
deployDescParser.add_argument('-user', '-u', required=True,
help='User', type=str)
deployDescParser.add_argument('-description', '-dc', required=True,
help='Package description', type=str)
deployDescParser.add_argument('--outputdirpath', '-o',
help='Output directory',
required=True, type=str)
deployDescParser.add_argument('--licenses', '-li', help='Software licences',
default=[], type=str, action='append')
deployDescParser.add_argument('--labels', '-la', help='Package labels',
action='append',
default=[], type=str)
return parser.parse_args(raw_args)
def generateTmpDir():
tmp_dir_path = None
for x in range(0, 5):
tmp_dir_path = os.path.join(os.path.abspath(os.sep), "tmp", str(uuid.uuid4()))
if not os.path.exists(tmp_dir_path) :
os.makedirs(tmp_dir_path, exist_ok=True)
break
else:
tmp_dir_path = None
if tmp_dir_path == None:
raise Exception("Unable to generate a tmp direcctory")
return tmp_dir_path
def generatePackage(build_dir_path,
package_type, package_name, version, arch, project):
process = subprocess.Popen(["fpm", "-t", package_type,
"-n", package_name,
"-p", build_dir_path,
"-a", arch,
"-f",
"--url","https://www.github.com/{}".format(project),
"-v", version.replace("/", "_"),
"-C", os.path.join(build_dir_path, "packaging"),
"-s", "dir", "."], shell=False)
process.communicate()
if process.returncode != 0:
os.exit("Error while cloning project")
def build(build_dir_path, project, branch, arch, bin_name):
if len(os.listdir(build_dir_path)) != 0:
raise Exception("Build error: {} is not empty.".format(build_dir_path))
go_dir_path = os.path.join(generateTmpDir(), "go")
print("Go path is : {}".format(go_dir_path))
src_dir_path = os.path.join(go_dir_path, 'src', "github.com", project)
process = None
process = subprocess.Popen(["git", "clone", "-b", branch,
"https://github.com/{}".format(project),
src_dir_path], shell=False)
process.communicate()
if process.returncode != 0:
os.exit("Error while cloning project")
process = subprocess.Popen(["go", "get", "-d", "./..."],
cwd=src_dir_path, shell=False,
env=dict(os.environ,
GOARCH=arch,
GOPATH=go_dir_path,
CGO_ENABLED="0"))
process.communicate()
if process.returncode != 0:
sys.exit("Error while getting dependencies project")
process = subprocess.Popen(["go", "install", "./..."],
cwd=src_dir_path, shell=False,
env=dict(os.environ,
GOARCH=arch,
GOPATH=go_dir_path,
CGO_ENABLED="0"))
process.communicate()
if process.returncode != 0:
os.exit("Error while build the project")
bin_dir_path = os.path.join(build_dir_path, "packaging",
"usr", "local", "bin")
os.makedirs(bin_dir_path)
for dirName, _, fileList in os.walk(os.path.join(go_dir_path, "bin")):
for fname in fileList:
shutil.copy2(os.path.join(dirName, fname),
os.path.join(bin_dir_path, fname))
if os.path.exists(os.path.join(src_dir_path, "resources")) :
for name in os.listdir(os.path.join(src_dir_path, "resources")):
shutil.copytree(os.path.join(src_dir_path, "resources", name),
os.path.join(build_dir_path, "packaging", name))
def generateBintrayDescriptor(build_dir,
bin_name,
user,
desc,
version,
licenses=[],
labels=[]):
github_addr = "https://github.com/{}/{}".format(user,bin_name)
descriptor = {"package":{
"name":bin_name,
"repo":bin_name,
"subject":user,
"desc":desc,
"website_url":github_addr,
"issue_tracker_url":github_addr,
"vcs_url":github_addr,
"github_use_tag_release_notes":True,
"licenses":licenses,
"labels":labels,
"public_download_numebrs":False,
"public_stats":False
},
"version":{
"name":version,
"desc":desc,
"released":time.strftime("%Y-%m-%d"),
"vcs_tag":version,
"gpgSign":False
},
"files":[],
"publish":True
}
for distrib in os.listdir(build_dir):
if os.path.isdir(os.path.join(build_dir,distrib)):
for arch in os.listdir(os.path.join(build_dir,distrib)):
if os.path.isdir(os.path.join(build_dir,distrib,arch)) :
descriptor["files"].append({
"includePattern": os.path.join(build_dir,
distrib,
arch,
"(.*\.deb)"),
"uploadPattern": os.path.join(distrib,"$1"),
"matrixParams":
{
"deb_distribution":distrib,
"deb_component":"main",
"deb_architecture":arch
}
})
file = open(os.path.join(build_dir, "bintray.desc"), 'w')
json.dump(descriptor, file, ensure_ascii=False, indent=2)
file.close()
if __name__ == "__main__":
args = parseArguments(sys.argv[1:])
if not os.path.exists(args.outputdirpath):
os.makedirs(args.outputdirpath, exist_ok=True)
if args.function == "build" :
build(args.outputdirpath,
args.project, args.branch,
args.arch, args.binname)
generatePackage(args.outputdirpath, "deb", args.binname,
args.branch, args.arch, args.project)
generatePackage(args.outputdirpath, "tar", args.binname,
args.branch, args.arch, args.project)
else:
generateBintrayDescriptor(args.outputdirpath,
args.binname,
args.user,
args.description,
args.branch,
args.licenses,
args.labels)
| aacebedo/cloudflarednsupdater | environments/build/build.py | Python | lgpl-3.0 | 9,511 | 0.013984 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import partner
from . import company
from . import res_config
from . import account
| acsone/account-invoicing | account_invoice_rounding/__init__.py | Python | agpl-3.0 | 1,027 | 0 |
"""Model configuration for pascal dataset"""
import numpy as np
from config import base_model_config
def voc_squeezeDet_config():
"""Specify the parameters to tune below."""
mc = base_model_config('PASCAL_VOC')
mc.IMAGE_WIDTH = 320
mc.IMAGE_HEIGHT = 240
mc.BATCH_SIZE = 5
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.001
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 2.0
mc.PLOT_PROB_THRESH = 0.7 #0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.01 #0.005
mc.TOP_N_DETECTION = 12 #64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = 14, 19, 9
anchor_shapes = np.reshape(
[np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| fyhtea/squeezeDet-hand | src/config/voc_squeezeDet_config.py | Python | bsd-2-clause | 1,990 | 0.037186 |
import sys
sys.path.append('../..')
import web
from web.contrib.template import render_jinja
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from social.utils import setting_name
from social.apps.webpy_app.utils import psa, backends
from social.apps.webpy_app import app as social_app
import local_settings
web.config.debug = False
web.config[setting_name('USER_MODEL')] = 'models.User'
web.config[setting_name('AUTHENTICATION_BACKENDS')] = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.twitter.TwitterOAuth',
'social.backends.yahoo.YahooOpenId',
'social.backends.stripe.StripeOAuth2',
'social.backends.persona.PersonaAuth',
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.yahoo.YahooOAuth',
'social.backends.angel.AngelOAuth2',
'social.backends.behance.BehanceOAuth2',
'social.backends.bitbucket.BitbucketOAuth',
'social.backends.box.BoxOAuth2',
'social.backends.linkedin.LinkedinOAuth',
'social.backends.github.GithubOAuth2',
'social.backends.foursquare.FoursquareOAuth2',
'social.backends.instagram.InstagramOAuth2',
'social.backends.live.LiveOAuth2',
'social.backends.vk.VKOAuth2',
'social.backends.dailymotion.DailymotionOAuth2',
'social.backends.disqus.DisqusOAuth2',
'social.backends.dropbox.DropboxOAuth',
'social.backends.eveonline.EVEOnlineOAuth2',
'social.backends.evernote.EvernoteSandboxOAuth',
'social.backends.fitbit.FitbitOAuth2',
'social.backends.flickr.FlickrOAuth',
'social.backends.livejournal.LiveJournalOpenId',
'social.backends.soundcloud.SoundcloudOAuth2',
'social.backends.thisismyjam.ThisIsMyJamOAuth1',
'social.backends.stocktwits.StocktwitsOAuth2',
'social.backends.tripit.TripItOAuth',
'social.backends.clef.ClefOAuth2',
'social.backends.twilio.TwilioAuth',
'social.backends.xing.XingOAuth',
'social.backends.yandex.YandexOAuth2',
'social.backends.podio.PodioOAuth2',
'social.backends.mineid.MineIDOAuth2',
'social.backends.wunderlist.WunderlistOAuth2',
'social.backends.upwork.UpworkOAuth',
)
web.config[setting_name('LOGIN_REDIRECT_URL')] = '/done/'
urls = (
'^/$', 'main',
'^/done/$', 'done',
'', social_app.app_social
)
render = render_jinja('templates/')
class main(object):
def GET(self):
return render.home()
class done(social_app.BaseViewClass):
def GET(self):
user = self.get_current_user()
return render.done(user=user, backends=backends(user))
engine = create_engine('sqlite:///test.db', echo=True)
def load_sqla(handler):
web.ctx.orm = scoped_session(sessionmaker(bind=engine))
try:
return handler()
except web.HTTPError:
web.ctx.orm.commit()
raise
except:
web.ctx.orm.rollback()
raise
finally:
web.ctx.orm.commit()
# web.ctx.orm.expunge_all()
Session = sessionmaker(bind=engine)
Session.configure(bind=engine)
app = web.application(urls, locals())
app.add_processor(load_sqla)
session = web.session.Session(app, web.session.DiskStore('sessions'))
web.db_session = Session()
web.web_session = session
if __name__ == "__main__":
app.run()
| fearlessspider/python-social-auth | examples/webpy_example/app.py | Python | bsd-3-clause | 3,421 | 0.002631 |
import os
import pkgutil
import re
import six
import yaml as ser
from collections import defaultdict
import insights
from insights.core import dr
# TODO: consider case insensitive and regex
FILTERS = defaultdict(set)
def add_filter(name, patterns):
if isinstance(patterns, six.string_types):
FILTERS[name].add(patterns)
elif isinstance(patterns, list):
FILTERS[name] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[name] |= patterns
else:
raise TypeError("patterns must be string, list, or set.")
def get_filters(component):
filters = set()
if component in FILTERS:
filters |= FILTERS[component]
alias = dr.get_alias(component)
if alias and alias in FILTERS:
filters |= FILTERS[alias]
return filters
def apply_filters(target, lines):
results = []
for l in lines:
for f in FILTERS[target]:
if re.search(f, l):
results.append(l)
return results
_filename = ".".join(["filters", ser.__name__])
_dumps = ser.dump
_loads = ser.safe_load
def loads(string):
"""Loads the filters dictionary given a string."""
d = _loads(string)
for k, v in d.items():
FILTERS[dr.get_component(k) or k] = set(v)
def load(stream=None):
"""
Loads filters from a stream, normally an open file. If one is
not passed, filters are loaded from a default location within
the project.
"""
if stream:
loads(stream.read())
else:
data = pkgutil.get_data(insights.__name__, _filename)
return loads(data) if data else None
def dumps():
"""Returns a string representation of the FILTERS dictionary."""
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d)
def dump(stream=None):
"""
Dumps a string representation of `FILTERS` to a stream, normally an
open file. If none is passed, `FILTERS` is dumped to a default location
within the project.
"""
if stream:
stream.write(dumps())
else:
path = os.path.join(os.path.dirname(insights.__file__), _filename)
with open(path, "wu") as f:
f.write(dumps())
| wcmitchell/insights-core | insights/core/filters.py | Python | apache-2.0 | 2,205 | 0.000454 |
from django.utils import simplejson
from django import template
from django.conf import settings
from ..models import DataLayer, TileLayer
from ..views import _urls_for_js
register = template.Library()
@register.inclusion_tag('leaflet_storage/css2.html')
def leaflet_storage_css():
return {
"STATIC_URL": settings.STATIC_URL
}
@register.inclusion_tag('leaflet_storage/js.html')
def leaflet_storage_js(locale=None):
return {
"STATIC_URL": settings.STATIC_URL,
"locale": locale
}
@register.inclusion_tag('leaflet_storage/map_fragment.html')
def map_fragment(map_instance, **kwargs):
layers = DataLayer.objects.filter(map=map_instance)
datalayer_data = [c.metadata for c in layers]
tilelayers = TileLayer.get_list() # TODO: no need to all
map_settings = map_instance.settings
if not "properties" in map_settings:
map_settings['properties'] = {}
map_settings['properties'].update({
'tilelayers': tilelayers,
'datalayers': datalayer_data,
'urls': _urls_for_js(),
'STATIC_URL': settings.STATIC_URL,
"allowEdit": False,
'hash': False,
'attributionControl': False,
'scrollWheelZoom': False,
'datalayersControl': False,
'zoomControl': False,
'storageAttributionControl': False,
'moreControl': False,
'scaleControl': False,
'miniMap': False,
'storage_id': map_instance.pk,
'onLoadPanel': "none",
'captionBar': False,
'default_iconUrl': "%sstorage/src/img/marker.png" % settings.STATIC_URL,
'slideshow': {}
})
map_settings['properties'].update(kwargs)
return {
"map_settings": simplejson.dumps(map_settings),
"map": map_instance
}
@register.simple_tag
def tilelayer_preview(tilelayer):
"""
Return an <img> tag with a tile of the tilelayer.
"""
output = '<img src="{src}" alt="{alt}" title="{title}" />'
url = tilelayer.url_template.format(s="a", z=9, x=265, y=181)
output = output.format(src=url, alt=tilelayer.name, title=tilelayer.name)
return output
@register.filter
def notag(s):
return s.replace('<', '<')
| daumann/chronas-application | umap/templates/leaflet_storage/leaflet_storage_tags.py | Python | mit | 2,204 | 0.000907 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
webnotes.conn.sql("""drop table if exists `tabDocType Mapper`""")
webnotes.conn.sql("""drop table if exists `tabTable Mapper Detail`""")
webnotes.conn.sql("""drop table if exists `tabField Mapper Detail`""")
webnotes.delete_doc("DocType", "DocType Mapper")
webnotes.delete_doc("DocType", "Table Mapper Detail")
webnotes.delete_doc("DocType", "Field Mapper Detail") | gangadhar-kadam/sapphire_app | patches/july_2013/p01_remove_doctype_mappers.py | Python | agpl-3.0 | 516 | 0.017442 |
#!/usr/bin/python
import subprocess
import os
import time
import sys
import threading
import signal
from upload import ftp_open, upload
import gpio
from gpio import setup,mode,read,set,cleanup
led_red=91
led_amber=90
led_green=65
button_switch=95
def updateIndicators(stop_event):
blinker=0
while not stop_event.wait(0.1):
v=read(button_switch)
#print "button-switch", v
if v: ##disabled the blinker## and blinker<15:
set(led_red,0)
else:
set(led_red,1)
blinker=blinker+1
if blinker >= 20:
blinker=0
print 'updateIndicators thread has terminated.'
csi1_video='/dev/' + str(sys.argv[1])
print 'ipu0_csi1 @', csi1_video
# count the restarts due to errors, this value affect the filename see sinkfile definition for details
restarts = 0
# index numbers in filename
idx_nums = 3
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()
def delold():
try:
index_fossil = os.path.getmtime("trilobite")
except: ## there was an error reading/accessing file trilobite, for now we just return
pass
return
for root, dirs, files in os.walk("."):
for name in files:
try:
## delete file if older than X seconds compared to trilobite
if index_fossil > (os.path.getmtime(name)+1000):
#print "del", os.path.join(root,name), ", T:", os.path.getmtime(name)
os.remove( os.path.join(root,name) )
except:
pass
def button_status():
changed=True
old_v=-1
while True: # this will never exit (unless there is an error, maybe?)
v=read(95)
changed=(old_v!=v)
yield (changed,v)
old_v=v
b = button_status()
# v = next(b)
def button():
v = next(b)
#print 'button', v
return v
os.chdir("/media/")
def getGstCmd():
myfile = "usbcam{0}.mkv"
sinkfile = myfile.format( "{1}%0{0}d".format(idx_nums, chr(ord('A')+(restarts%26)) ) )
print "This is my file format:", sinkfile
maxsize=4*1024*1024
gstcmd_csi = (
"gst-launch-1.0 -v -e "
"imxv4l2videosrc capture-mode=1 device={2} ! "
"imxvpuenc_h264 quant-param=20 ! "
"h264parse ! matroskamux ! "
"multifilesink location={0} next-file=4 "
"max-file-size={1}".format(sinkfile,maxsize,csi1_video)
)
gstcmd = (
"gst-launch-1.0 -v -e "
"v4l2src device={2} num-buffers=-1 ! "
"videoconvert ! "
"video/x-raw,format=I420,width=640,height=360,framerate=10/1 ! "
"imxvpuenc_h264 quant-param=20 ! "
"multifilesink post-messages=1 location={0} next-file=4 "
"max-file-size={1}".format(sinkfile,maxsize,"/dev/video1")
)
print "cmd:", gstcmd_csi
return gstcmd_csi
def main():
try:
retval = subprocess.call(getGstCmd(), shell=True)
if retval < 0:
print >>sys.stderr, "Child was terminated by signal", -retval
else:
print >>sys.stderr, "Child returned", retval
except ValueError as e:
print "execution failed:", e
except OSError as e:
print "OS error:", e
except subprocess.CalledProcessError as e:
print "Called process error:", e
except KeyboardInterrupt:
print "user interrupted with ctrl-C"
except:
print "error."
finally:
print "adios!"
def josh():
event_counter=0
while button() != (False,1):
time.sleep(0.5)
touch("trilobite")
try:
gstproc = subprocess.Popen(getGstCmd(), shell=True)
except ValueError as e:
print "value error:", e
except OSError as e:
print "OS error:", e
except subprocess.CalledProcessError as e:
print "called process error:", e
finally:
print "Popen finished."
while gstproc.poll() is None:
time.sleep(1)
if button()==(True,0):
break
#print ".",
event_counter = event_counter + 1
if event_counter > 10:
event_counter=0
delold()
touch("trilobite")
#print "T"
time.sleep(2)
#gstproc.wait(5)
### when gstproc fails with returncode == 255, it has indicated the video source
### may be incorrect; instead of /dev/video0 (default) it could be /dev/video1, etc.
print "gst process finished, rc=", gstproc.returncode
#gstproc.kill() #terminate()
os.kill(gstproc.pid, signal.SIGINT)
print 'signal.SIGINT:', signal.SIGINT
if __name__ == "__main__":
print "starting josh.py..."
pill2kill = threading.Event()
ioThread = threading.Thread(target=updateIndicators, args=(pill2kill,))
ioThread.start()
while True:
try:
josh()
except KeyboardInterrupt:
pass
break
restarts = restarts + 1
print "...restarting({0}) gst recorder...".format( restarts )
pill2kill.set()
cleanup(led_red)
cleanup(button_switch)
print "Gst Recording script has terminated."
| sobi-wan/helmet | recipes-example/helmet-rootfs/helmet-rootfs-1.0.0/home/root/josh.py | Python | mit | 4,911 | 0.038689 |
"""Remove rule_description from RuleSql table
Revision ID: d10d998b796b
Revises: 5f1470603fa0
Create Date: 2018-03-20 13:46:14.180715
"""
# revision identifiers, used by Alembic.
revision = 'd10d998b796b'
down_revision = '5f1470603fa0'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('rule_sql', 'rule_description')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('rule_sql', sa.Column('rule_description', sa.TEXT(), autoincrement=False, server_default='N/A', nullable=False))
### end Alembic commands ###
| fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/d10d998b796b_remove_rule_description_from_rulesql_.py | Python | cc0-1.0 | 914 | 0.009847 |
# -*- coding: UTF-8 -*-
import urwid
from urwid.util import (move_prev_char, move_next_char, calc_width,
calc_text_pos, is_wide_char)
from urwid.text_layout import CanNotDisplayText, TextLayout
from urwid.compat import bytes, PYTHON3, B
ONECHAR_NEWLINES = (u'\n', b'\n', u'\r', b'\r')
TWOCHAR_NEWLINES = (u'\n\r', b'\n\r', u'\r\n', b'\r\n')
def find_newline(text, pos):
l = len(text)
while pos < l:
char = text[pos:pos+1]
if char in ONECHAR_NEWLINES:
return pos
pos += 1
return pos
class CodeLayout(TextLayout):
"""A layout for Urwid that can deal with tabs."""
tab_width = 8
def supports_align_mode(self, align):
"""Return True if align is a supported align mode."""
return align == urwid.LEFT
def supports_wrap_mode(self, wrap):
"""Return True if wrap is a supported wrap mode."""
return wrap == urwid.SPACE
def layout(self, text, width, align, wrap):
"""Return a layout structure for text."""
try:
segs = self.calculate_text_segments(text, width, wrap)
return self.align_layout(text, width, segs, wrap, align)
except CanNotDisplayText:
return [[]]
def calculate_text_segments(self, text, width, wrap):
"""
Calculate the segments of text to display given width screen
columns to display them.
text - unicode text or byte string to display
width - number of available screen columns
wrap - wrapping mode used
Returns a layout structure without alignment applied.
"""
# TODO: This function is a horror and a mess, and really hard to
# understand. It's based on urwids StandardLayout, which by itself
# is overly complex, and I added tab handling, which made it worse.
# It's a prime candidate for refacturing, making easier to understand
# and as it is heavily used, profiling would be nice too.
nl, nl_o, sp_o, tab_o = "\n", "\n", " ", "\t"
if PYTHON3 and isinstance(text, bytes):
nl = B(nl) # can only find bytes in python3 bytestrings
nl_o = ord(nl_o) # + an item of a bytestring is the ordinal value
sp_o = ord(sp_o)
tab_o = ord(tab_o)
b = []
p = 0
if wrap == 'clip':
# no wrapping to calculate, so it's easy.
l = []
while p <= len(text):
n_cr = find_newline(text, p)
if p != n_cr:
line = text[p:n_cr]
pt = 0
while pt < len(line):
n_tab = line.find(tab_o, pt)
if n_tab == -1:
end = len(line)
else:
end = n_tab
sc = calc_width(line, pt, end)
if sc != 0:
l.append((sc, p + pt, p + end))
if end == n_tab: # A tab was found
extra_space = (self.tab_width - (
sc % self.tab_width))
l.append((extra_space, p + n_tab))
pt = end + 1
l.append((0, n_cr))
b.append(l)
l = []
if text[n_cr:n_cr+2] in TWOCHAR_NEWLINES:
# Two char newline:
p = n_cr + 2
else:
p = n_cr + 1
return b
while p <= len(text):
# look for next eligible line break
n_cr = find_newline(text, p)
line = text[p:n_cr]
l = []
pt = 0
lc = 0
while pt < len(line):
n_tab = line.find(tab_o, pt)
if n_tab == -1:
end = len(line)
else:
end = n_tab
sc = calc_width(line, pt, end)
if lc + sc <= width:
# this segment fits
if sc:
l.append((sc, p + pt, p + end))
if end == n_tab: # A tab was found
extra_space = self.tab_width - (sc % self.tab_width)
l.append((extra_space, p + n_tab))
lc += extra_space
else:
# removed character hint
l.append((0, p + end))
pt = end + 1
lc += sc
if lc >= width:
# The tab can sometimes push line length to width, and
# then we adjust the line length and make a new line.
overshoot = lc - width
spaces, pos = l[-1]
l[-1] = (spaces - overshoot, pos)
b.append(l)
l = []
lc = 0
continue
# This segment does not fit. Let's fit it.
pos, sc = calc_text_pos(line, pt, end, width - lc)
if pos == pt: # pathological width=1 double-byte case
raise CanNotDisplayText(
"Wide character will not fit in 1-column width")
if wrap == 'any':
l.append((sc, p + pt, p + pos))
l.append((0, p + pos))
b.append(l)
l = []
lc = 0
pt = pos
continue
assert wrap == 'space'
if line[pos] == sp_o:
# perfect space wrap
l.append((sc, p + pt, p + pos))
# removed character hint
l.append((0, p + pos))
b.append(l)
l = []
lc = 0
pt = pos + 1
continue
if is_wide_char(line, pos):
# perfect next wide
l.append((sc, p + pt, p + pos))
b.append(l)
l = []
lc = 0
pt = pos
continue
prev = pos
while prev > pt:
prev = move_prev_char(line, pt, prev)
if line[prev] == sp_o:
sc = calc_width(line, pt, prev)
if prev != pt:
l.append((sc, p + pt, p + prev))
l.append((0, p + prev))
b.append(l)
l = []
lc = 0
pt = prev + 1
break
if is_wide_char(line, prev):
# wrap after wide char
nextc = move_next_char(line, prev, pos)
sc = calc_width(line, pt, nextc)
l.append((sc, p + pt, p + nextc))
b.append(l)
l = []
lc = 0
pt = nextc
break
else:
if lc == 0:
# unwrap previous line space if possible to
# fit more text (we're breaking a word anyway)
if b and (len(b[-1]) == 2 or (len(b[-1]) == 1 and
len(b[-1][0]) == 2)):
# look for removed space above
if len(b[-1]) == 1:
[(h_sc, h_off)] = b[-1]
p_sc = 0
p_off = p_end = h_off
else:
[(p_sc, p_off, p_end),
(h_sc, h_off)] = b[-1][-2:]
if (p_sc < width and h_sc == 0 and
text[h_off] == sp_o):
# combine with previous line
old_line = b[-1][:-2]
del b[-1]
pt = p_off - p
pos, sc = calc_text_pos(
line, pt, end, width)
old_line.append((sc, p + pt, p + pos))
b.append(old_line)
# check for trailing " " or "\n"
pt = pos
if pt < len(text) and (
text[pt] in (sp_o, nl_o)):
# removed character hint
b[-1].append((0, p + pt))
pt += 1
continue
# Break on previous tab, and try again.
if l:
b.append(l)
l = []
lc = 0
continue
# There is no space to break the line on, unwrapping the
# previous line doesn't help, I guess we just break on a
# character.
b.append([(sc, p + pt, p + pos)])
l = []
lc = 0
pt = pos
# force any char wrap
if l:
b.append(l)
elif not line:
# An empty line.
b.append([(0, n_cr)])
pt = 1
if text[pt-1:pt+1] in TWOCHAR_NEWLINES:
# Two char newline:
pt += 1
p += pt
return b
def align_layout(self, text, width, segs, wrap, align):
"""Convert the layout segs to an aligned layout."""
assert align == urwid.LEFT
return segs
| regebro/doctrine.urwid | doctrine/urwid/layout.py | Python | mit | 10,162 | 0.001181 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-23 21:51
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('layers', '0006_auto_20180811_1412'),
]
operations = [
migrations.AddField(
model_name='capa',
name='cantidad_de_bandas',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='capa',
name='gdal_driver_longname',
field=models.CharField(blank=True, default='', max_length=100, verbose_name='Driver - Long Name'),
),
migrations.AddField(
model_name='capa',
name='gdal_driver_shortname',
field=models.CharField(blank=True, default='', max_length=100, verbose_name='Driver - Short Name'),
),
migrations.AddField(
model_name='capa',
name='gdal_metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AddField(
model_name='capa',
name='proyeccion_proj4',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AddField(
model_name='capa',
name='size_height',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='capa',
name='size_width',
field=models.IntegerField(blank=True, null=True),
),
]
| pcecconi/mapground | layers/migrations/0007_auto_20180923_2151.py | Python | mit | 1,658 | 0.001206 |
# proxy module
from __future__ import absolute_import
from chaco.overlays.simple_inspector_overlay import *
| enthought/etsproxy | enthought/chaco/overlays/simple_inspector_overlay.py | Python | bsd-3-clause | 108 | 0 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import pipes
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.module_common import modify_module
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes, to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._supports_check_mode = True
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Temporary directory. Sometimes an action plugin sets up
a temporary directory and then calls another module. This parameter
allows us to reuse the same directory for both.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
# store the module invocation details into the results
results = {}
if self._task.async == 0:
results['invocation'] = dict(
module_name = self._task.action,
module_args = self._task.args,
)
return results
def _configure_module(self, module_name, module_args, task_vars=None):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# win_stat, win_file, and win_copy are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
if '.ps1' in self._connection.module_implementation_preferences:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars)
return (module_style, module_shebang, module_data)
def _compute_environment_string(self):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [ environments ]
# the environments as inherited need to be reversed, to make
# sure we merge in the parent's values first so those in the
# block then task 'win' in precedence
environments.reverse()
for environment in environments:
if environment is None:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
final_environment = self._templar.template(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a temp path should be created before the action is executed.
'''
return getattr(self, 'TRANSFERS_FILES', False)
def _late_needs_tmp_path(self, tmp, module_style):
'''
Determines if a temp path is required after some early actions have already taken place.
'''
if tmp and "tmp" in tmp:
# tmp has already been created
return False
if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su':
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
def _make_tmp_path(self, remote_user):
'''
Create and return a temporary path on a remote box.
'''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
use_system_tmp = True
tmp_mode = 0o700
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
result = self._low_level_execute_command(cmd, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection.'
' We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Authentication or permission failure.'
' In some cases, you may have been able to authenticate and did not have permissions on the remote directory.'
' Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp".'
' Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u": %s" % result['stdout']
raise AnsibleConnectionFailure(output)
try:
rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
return rc
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path and "-tmp-" in tmp_path:
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
self._low_level_execute_command(cmd, sudoable=False)
def _transfer_file(self, local_path, remote_path):
self._connection.put_file(local_path, remote_path)
return remote_path
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
data = to_bytes(data, errors='strict')
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
afo.flush()
afo.close()
try:
self._transfer_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _fixup_perms(self, remote_path, remote_user, execute=False, recursive=True):
"""
If the become_user is unprivileged and different from the
remote_user then we need to make the files we've uploaded readable by them.
"""
if remote_path is None:
# Sometimes code calls us naively -- it has a var which could
# contain a path to a tmp dir but doesn't know if it needs to
# exist or not. If there's no path, then there's no need for us
# to do work
self._display.debug('_fixup_perms called with remote_path==None. Sure this is correct?')
return remote_path
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
# Unprivileged user that's different than the ssh user. Let's get
# to work!
if remote_user == 'root':
# SSh'ing as root, therefore we can chown
self._remote_chown(remote_path, self._play_context.become_user, recursive=recursive)
if execute:
# root can read things that don't have read bit but can't
# execute them.
self._remote_chmod('u+x', remote_path, recursive=recursive)
else:
if execute:
mode = 'rx'
else:
mode = 'rX'
# Try to use fs acls to solve this problem
res = self._remote_set_user_facl(remote_path, self._play_context.become_user, mode, recursive=recursive, sudoable=False)
if res['rc'] != 0:
if C.ALLOW_WORLD_READABLE_TMPFILES:
# fs acls failed -- do things this insecure way only
# if the user opted in in the config file
self._display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
self._remote_chmod('a+%s' % mode, remote_path, recursive=recursive)
else:
raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user. For information on working around this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
elif execute:
# Can't depend on the file being transferred with execute
# permissions. Only need user perms because no become was
# used here
self._remote_chmod('u+x', remote_path, recursive=recursive)
return remote_path
def _remote_chmod(self, mode, path, recursive=True, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(mode, path, recursive=recursive)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, path, user, group=None, recursive=True, sudoable=False):
'''
Issue a remote chown command
'''
cmd = self._connection._shell.chown(path, user, group, recursive=recursive)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, path, user, mode, recursive=True, sudoable=False):
'''
Issue a remote call to setfacl
'''
cmd = self._connection._shell.set_user_facl(path, user, mode, recursive=recursive)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None):
'''
Get information from remote file.
'''
module_args=dict(
path=path,
follow=follow,
get_md5=False,
get_checksum=True,
checksum_algo='sha1',
)
mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None))
if 'failed' in mystat and mystat['failed']:
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg']))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if not 'checksum' in mystat['stat']:
mystat['stat']['checksum'] = ''
return mystat['stat']
def _remote_checksum(self, path, all_vars):
'''
Produces a remote checksum given a path,
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
0 = unknown error
1 = file does not exist, this might not be an error
2 = permissions issue
3 = its a directory, not a file
4 = stat module failed, likely due to not finding python
'''
x = "0" # unknown error has occured
try:
remote_stat = self._execute_remote_stat(path, all_vars, follow=False)
if remote_stat['exists'] and remote_stat['isdir']:
x = "3" # its a directory not a file
else:
x = remote_stat['checksum'] # if 1, file is missing
except AnsibleError as e:
errormsg = to_unicode(e)
if errormsg.endswith('Permission denied'):
x = "2" # cannot read file
elif errormsg.endswith('MODULE FAILURE'):
x = "4" # python not found or module uncaught exception
finally:
return x
def _remote_expand_user(self, path):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~
return path
# FIXME: Can't use os.path.sep for Windows paths.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
cmd = self._connection._shell.expand_user(expand_path)
data = self._low_level_execute_command(cmd, sudoable=False)
#initial_fragment = utils.last_non_blank_line(data['stdout'])
initial_fragment = data['stdout'].strip().splitlines()[-1]
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
def _filter_leading_non_json_lines(self, data):
'''
Used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
idx = 0
for line in data.splitlines(True):
if line.startswith((u'{', u'[')):
break
idx = idx + len(line)
return data[idx:]
def _strip_success_message(self, data):
'''
Removes the BECOME-SUCCESS message from the data.
'''
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
'''
Transfer and run a module along with its arguments.
'''
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use
# the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
# set check mode in the module arguments, if required
if self._play_context.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# Get the connection user for permission checks
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
# set no log in the module arguments, if required
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._play_context.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = self._display.verbosity
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
# a remote tmp path may be necessary and not already created
remote_module_path = None
args_file_path = None
if not tmp and self._late_needs_tmp_path(tmp, module_style):
tmp = self._make_tmp_path(remote_user)
if tmp:
remote_module_filename = self._connection._shell.get_remote_filename(module_name)
remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
if module_style in ['old', 'non_native_want_json']:
# we'll also need a temp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmp, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote")
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k,v in iteritems(module_args):
args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style == 'non_native_want_json':
self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
# Fix permissions of the tmp path and tmp files. This should be
# called after all files have been transferred.
self._fixup_perms(tmp, remote_user, recursive=True)
cmd = ""
in_data = None
if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
in_data = module_data
else:
if remote_module_path:
cmd = remote_module_path
rm_tmp = None
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self._play_context.become or self._play_context.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the sudo_user
sudoable = False
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self._play_context.become and self._play_context.become_user != 'root':
# not sudoing to root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = self._connection._shell.remove(tmp, recurse=True)
self._low_level_execute_command(cmd2, sudoable=False)
try:
data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u'')))
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, parsed=False)
data['msg'] = "MODULE FAILURE"
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
data['module_stderr'] = res['stderr']
if res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
# pre-split stdout into lines, if stdout is in the data and there
# isn't already a stdout_lines value there
if 'stdout' in data and 'stdout_lines' not in data:
data['stdout_lines'] = data.get('stdout', u'').splitlines()
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='replace'):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
'''
display.debug("_low_level_execute_command(): starting")
if not cmd:
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
display.debug("_low_level_execute_command(): no command, exiting")
return dict(stdout='', stderr='')
allow_same_user = C.BECOME_ALLOW_SAME_USER
same_user = self._play_context.become_user == self._play_context.remote_user
if sudoable and self._play_context.become and (allow_same_user or not same_user):
display.debug("_low_level_execute_command(): using become for this command")
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
cmd = executable + ' -c ' + pipes.quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, binary_type):
out = to_unicode(stdout, errors=encoding_errors)
elif not isinstance(stdout, text_type):
out = to_unicode(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, binary_type):
err = to_unicode(stderr, errors=encoding_errors)
elif not isinstance(stderr, text_type):
err = to_unicode(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def _get_first_available_file(self, faf, of=None, searchdir='files'):
display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
for fn in faf:
fnt = self._templar.template(fn)
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = fnt
fnd = self._loader.path_dwim_relative(lead, searchdir, fnt)
if not os.path.exists(fnd) and of is not None:
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = of
fnd = self._loader.path_dwim_relative(lead, searchdir, of)
if os.path.exists(fnd):
return fnd
return None
def _get_diff_data(self, destination, source, task_vars, source_file=True):
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)
if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0:
if peek_result['state'] == 'absent':
diff['before'] = ''
elif peek_result['appears_binary']:
diff['dst_binary'] = 1
elif C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
src = open(source)
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if "\x00" in src_contents:
diff['src_binary'] = 1
else:
diff['after_header'] = source
diff['after'] = src_contents
else:
display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
if self._play_context.no_log:
if 'before' in diff:
diff["before"] = ""
if 'after' in diff:
diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]"
return diff
| hfinucane/ansible | lib/ansible/plugins/action/__init__.py | Python | gpl-3.0 | 32,910 | 0.003981 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class FrontendConfig(AppConfig):
"""Configuration for frontend app."""
name = 'ariane.apps.frontend'
verbose_name = _("Frontend")
| DebVortex/ariane-old- | ariane/apps/frontend/apps.py | Python | bsd-3-clause | 235 | 0 |
# -*- coding:utf-8 -*-
"""
Verion: 1.0
Author: zhangjian
Site: http://iliangqunru.com
File: __init__.py.py
Time: 2017/7/22 2:19
"""
| Xarrow/pySimulatedDNS | dnsCat/__init__.py | Python | apache-2.0 | 139 | 0 |
#!/usr/bin/env python3
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPixmap, QMouseEvent
from PyQt5.QtWidgets import QToolButton
from PyQt5.QtCore import Qt, pyqtSignal
from models.components import *
class DraggableComponentButton(QToolButton):
mousePress = pyqtSignal(ComponentType, QMouseEvent, name='mousePress')
def __init__(self, parent=None):
QToolButton.__init__(self, parent)
self.componentType = None
def mousePressEvent(self, event):
self.checked = False
self.mousePress.emit(self.componentType, event) | bandienkhamgalan/flappyeagle | views/DraggableComponentButton.py | Python | gpl-3.0 | 549 | 0.018215 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various base layers for the colorization transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import operator
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.compat.v2.keras import layers
from coltran.utils import att_utils
from coltran.utils import base_utils
# pylint: disable=duplicate-string-formatting-argument
def residual_dropout(inputs, output, dropout, training):
"""out = inputs + dropout(output)."""
if training and dropout:
output = tf.nn.dropout(output, dropout)
output += inputs
return output
class Shift(layers.Layer):
"""Shifts an input tensor either down or right to preserve causal ordering."""
def __init__(self, dimension, resolution, **kwargs):
"""Init.
Args:
dimension: int, 0 to shift down, 1 to shift right.
resolution: list of 2 ints, [H, W].
**kwargs:
"""
super(Shift, self).__init__(**kwargs)
self.dimension = dimension
self.resolution = resolution
def call(self, x):
shape = x.shape
rank = len(shape)
dim = self.dimension + 1
# Assume 1 batch_dim.
index = [0] * len(self.resolution)
y = x
paddings = np.zeros((rank, 2), dtype=np.int32)
paddings[dim, 0] = 1
y = tf.pad(y, paddings)
rem_dims = rank - 1 - len(index[:dim])
slice_inds = [0] + index[:dim] + [0] * rem_dims
return tf.slice(y, slice_inds, shape)
class Cache(layers.Layer):
"""Keras layer for cacheing.
Values are cached in a tensor of shape (B, canvas_shape, D).
B and D are inferred from the inputs to the call method.
Every call to the cache instance is assumed to be a tuple of (index, values).
It updates the cache such that cache[:, index:, :] = values
"""
def __init__(self, canvas_shape,
num_batch_axes=1,
dtype=tf.float32,
**kwargs):
super(Cache, self).__init__(trainable=False, **kwargs)
self.canvas_shape = canvas_shape
self.num_batch_axes = num_batch_axes
self._dtype = dtype
def build(self, input_shapes):
num_canvas_dim = len(self.canvas_shape)
value, _ = input_shapes
features_shape = value[self.num_batch_axes + num_canvas_dim:]
cache_shape = (value[:self.num_batch_axes] + self.canvas_shape +
features_shape)
self.cache = tf.zeros(shape=cache_shape, dtype=self._dtype)
super(Cache, self).build(input_shapes)
def reset(self):
self.cache = tf.zeros(shape=self.cache.shape, dtype=self._dtype)
def call(self, inputs):
value, index = inputs
if self.cache.shape == inputs[0].shape:
self.cache = value
return value
shape = self.cache.shape.as_list()
num_index_axes = index.shape[0]
num_batch_axes = self.num_batch_axes
num_feature_axes = len(shape) - num_index_axes - num_batch_axes
features_shape = shape[num_batch_axes + num_index_axes:]
batch_shape = shape[:num_batch_axes]
value_index_shape = tf.shape(value)[num_batch_axes:-num_feature_axes]
if tf.reduce_max(value_index_shape) > 1:
# This is a block update starting at index.
value_ranges = []
for i, s in enumerate(tf.unstack(value_index_shape)):
curr_range = tf.range(index[i], index[i] + s)
value_ranges.append(curr_range)
batch_ranges = [tf.range(s) for s in batch_shape]
mesh = tf.meshgrid(*(batch_ranges + value_ranges), indexing='ij')
indices = tf.stack(mesh, axis=-1)
indices = tf.reshape(indices, [-1, num_index_axes + num_batch_axes])
else:
# This is a single update at index position.
batch_ranges = [tf.range(s) for s in batch_shape]
mesh = tf.meshgrid(*batch_ranges, indexing='ij')
batch_indices = tf.stack(mesh, axis=-1)
batch_indices = tf.reshape(batch_indices, [-1, num_batch_axes])
# Add leading axes to nd-index and tile to get batched indices.
shape_indices = tf.reshape(index, [1] * num_batch_axes + [-1])
shape_indices = tf.tile(shape_indices, batch_shape + [1])
shape_indices = tf.reshape(shape_indices, [-1, num_index_axes])
indices = tf.concat([batch_indices, shape_indices], axis=-1)
# We need to squeeze nd-axes from value before updating.
value = tf.reshape(value, [-1] + features_shape)
self.cache = tf.tensor_scatter_nd_update(self.cache, indices, value)
return self.cache
class Masking(object):
"""Masking options for self-attention.
We can either mask the entire future, i.e. allow looking into the past and
the current element, or we can mask in addition the present as well, i.e.,
we can look only to the past.
"""
FUTURE = 'future'
FUTURE_PRESENT = 'future_present'
class PositionEmbed(layers.Layer):
"""Adds factorized positional embeddings for specified axes."""
def __init__(self, axes, max_lengths=None, **kwargs):
"""Init.
Args:
axes: list of ints, axis over which to apply the positional embeddings.
max_lengths: list of ints, maximum length over each axis.
**kwargs:
"""
super(PositionEmbed, self).__init__(**kwargs)
if not isinstance(axes, (list, tuple)):
axes = [axes]
self.axes = axes
self.max_lengths = None
if max_lengths:
if not isinstance(max_lengths, (list, tuple)):
max_lengths = [max_lengths]
self.max_lengths = max_lengths
def build(self, input_shape):
rank = len(input_shape)
self.axes = sorted([rank + a if a < 0 else a for a in self.axes])
self.max_lengths = self.max_lengths or [input_shape[a] for a in self.axes]
self.embeddings = []
for i, axis in enumerate(self.axes):
shape = [self.max_lengths[i]] + [1] * (rank - axis - 2)
shape.append(input_shape[-1])
init = tf.keras.initializers.RandomNormal(stddev=shape[-1]**-0.5)
self.embeddings.append(
self.add_weight(
name='position_embedding_%d' % i,
shape=shape,
initializer=init,
trainable=True))
super(PositionEmbed, self).build(input_shape)
def call(self, inputs):
out = inputs
for e in self.embeddings:
out += e
return out
class DenseND(layers.Layer):
"""Maps a rank-m tensor to a rank-n tensor through a dense contraction."""
def __init__(self,
filters,
contract_axes=1,
use_bias=False,
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(DenseND, self).__init__(**kwargs)
if isinstance(filters, int):
filters = [filters]
self.filters = tuple(filters)
self.contract_axes = contract_axes
self.use_bias = use_bias
self.activation = tf.keras.activations.get(activation)
self.bias_initializer = bias_initializer
self._kernel_initializer = kernel_initializer
# Behaviours differ when shape(weights) > 2.
# see: https://github.com/tensorflow/tensorflow/blob/r2.1/tensorflow/python/ops/init_ops_v2.py#L733 pylint: disable=line-too-long
if self._kernel_initializer == 'glorot_uniform_nd':
self._kernel_initializer = self._glorot_uniform
def _num_batch_axes(self, input_shape):
"""Returns number of batch axes in inputs."""
return len(input_shape) - len(self.contract_shape)
def _glorot_uniform(self, shape, dtype=tf.float32):
"""Glorot uniform initializer."""
fan_out = functools.reduce(operator.mul, self.filters)
fan_in = functools.reduce(operator.mul, shape[:self.contract_axes])
scale = 1. / max(1., (fan_in + fan_out) / 2.)
limit = math.sqrt(3.0 * scale)
return tf.random.uniform(shape, -limit, limit, dtype)
def build(self, input_shape):
# Infer matrix multiplication if no contract shape specified.
self.contract_shape = input_shape[-self.contract_axes:]
w_shape = self.contract_shape + self.filters
self.w = self.add_weight(
name='kernel',
shape=w_shape,
initializer=self._kernel_initializer,
trainable=True)
if self.use_bias:
self.b = self.add_weight(
name='bias', shape=self.filters, initializer=self.bias_initializer,
trainable=True)
super(DenseND, self).build(input_shape)
def call(self, inputs):
# Workaround lack of ellipsis support.
# pyformat: disable
num_batch_axes = self._num_batch_axes(inputs.shape)
batch_str = 'abcdefghijklm'[:num_batch_axes]
contract_str = 'ABCDEFGHIJKLM'[:len(self.contract_shape)]
output_str = 'nopqrstuvwxyz'[:len(self.filters)]
# pyformat: enable
einsum_str = '{}{},{}{}->{}{}'.format(batch_str, contract_str, contract_str,
output_str, batch_str, output_str)
result = tf.einsum(einsum_str, inputs, self.w)
if self.use_bias:
result += self.b
if self.activation is not None:
result = self.activation(result)
return result
class RelativeAttentionBiasND(layers.Layer):
"""Relative attention bias in nd factorizes over dimensions."""
def __init__(self, lengths, num_heads, **kwargs):
self.num_heads = num_heads
self.lengths = lengths
super(RelativeAttentionBiasND, self).__init__(**kwargs)
def build(self, input_shapes):
self.biases = []
self.total_length = 1
for i, l in enumerate(self.lengths):
self.total_length *= l
if l > 1:
weight = self.add_weight(
name='relative_attention_bias_%d' % i,
shape=[self.num_heads, 2 * l],
initializer=tf.keras.initializers.Zeros(), trainable=True)
else:
weight = None
self.biases.append(weight)
super(RelativeAttentionBiasND, self).build(input_shapes)
def call(self, inputs=None):
tile, index, biases = 1, None, []
len_q = self.total_length
for i, s in enumerate(self.lengths):
# Relative attention in every dimension separately.
if s > 1:
new_bias = att_utils.relative_attn_bias(
self.biases[i], self.num_heads, index)
repeat = self.total_length // (tile * s)
if repeat > 1:
new_bias = tf.expand_dims(new_bias, -1)
new_bias = tf.tile(new_bias, [tile, repeat, tile, repeat])
new_bias = tf.reshape(new_bias,
[len_q, self.num_heads, self.total_length])
elif tile > 1:
new_bias = tf.tile(new_bias, [tile, 1, tile])
tile *= s
biases.append(new_bias)
return tf.add_n(biases)
class ConditionalLayerNorm(layers.Layer):
"""Conditional Layer Norm.
Normalization of the input with the scale and shift as a function of 3-D
context. Transforms 3-D spatial context into 1-D shift and scale of the
layer-norm parameters. This is done via two dense projections:
1. Spatial averaging via spatial_average='mean' or 'learnable'.
2. Pointwise dense projection across channels.
"""
def __init__(self,
spatial_average='learnable',
sequence='sc',
out_init='glorot_uniform',
out_act='identity', **kwargs):
super(ConditionalLayerNorm, self).__init__(**kwargs)
self.spatial_average = spatial_average
self.sequence = sequence
self.out_init = out_init
self.out_act = out_act
self.out_act_func = base_utils.act_to_func(out_act)
if self.spatial_average not in ['mean', 'learnable']:
raise ValueError('Expected spatial average to be "mean" or "learnable" ,'
'got %s' % self.spatial_average)
if self.sequence not in ['sc', 'cs']:
raise ValueError('Expected sequence to be "sc" or "cs" ,'
'got %s' % self.sequence)
def build(self, input_shape):
x_shape = input_shape[0]
height, width, features = x_shape[-3:]
self.layer_norm = layers.LayerNormalization(
trainable=False, name='normalize')
if self.spatial_average == 'learnable':
self.spatial_weights = self.add_weight(
name='spatial_average', shape=(1, height, width, 1),
initializer=tf.keras.initializers.Ones())
self.channel_dense = layers.Dense(
units=2*features, kernel_initializer=self.out_init)
super(ConditionalLayerNorm, self).build(input_shape)
def spatial_projection(self, cond_inputs):
if self.spatial_average == 'learnable':
cond_inputs = self.spatial_weights * cond_inputs
return tf.reduce_mean(cond_inputs, axis=(1, 2), keepdims=True)
def call(self, inputs):
inputs, cond_inputs = inputs
if self.sequence == 'sc':
ops = [self.spatial_projection, self.channel_dense]
elif self.sequence == 'cs':
ops = [self.channel_dense, self.spatial_projection]
for op in ops:
cond_inputs = op(cond_inputs)
scale, shift = tf.split(cond_inputs, num_or_size_splits=2, axis=-1)
scale = self.out_act_func(scale)
shift = self.out_act_func(shift)
inputs_norm = self.layer_norm(inputs)
inputs_norm *= scale
inputs_norm += shift
return inputs_norm
class SelfAttentionND(layers.Layer):
"""Transforms input through a N-D self-attention layer.
Assume key, query and memory tensors are N-D tensors.
1. Project key, query and value tensors into (N+2)-D tensors using
dense layers where the outer two dimensions are
[num_heads, num_channels_per_head].
num_channels_per_head is set to num_channels // num_heads by default.
2. Computes self-attention tensor using 2 dot products.
The first computes similarity between the key and query tensors.
The second uses this similarity to perform a weighted average over
the value tensors. Done in _dot_product and _weighted_sum.
3. The default behaviour, i.e if nd_block is not set, is to do global
self attention. If nd_block_set is set, the above self-attention is limited
to a block-size of nd_block_size.
For instance, in case of 2D inputs (images), setting nd_block_size to
[1, num_columns] or [num_rows, 1] to limit attention to column
and rows respectively.
4. If mask=='future', zero out the contribution of the values that
violate raster ordering. Done in _apply_mask_and_bias
for more details.
5. Project the transformed tensor into hidden_size number of channels
using a dense layer.
Self-attention can be optionally conditioned with an tuple of two values
where the second argument is the conditional input. Supports:
1. Biasing: By setting cond_q, cond_k or cond_v to be True.
2. Scaling: By setting cond_scale to be True.
"""
def __init__(self,
hidden_size,
num_heads=1,
num_channels_per_head=None,
mask=None,
kernel_initializer='glorot_uniform',
nd_block_size=None,
resolution=None,
cond_init='glorot_uniform',
cond_k=False,
cond_q=False,
cond_v=False,
cond_scale=False,
cond_act='identity',
**kwargs):
super(SelfAttentionND, self).__init__(**kwargs)
if nd_block_size:
nd_block_size = list(nd_block_size)
num_channels_per_head = num_channels_per_head or hidden_size // num_heads
self.num_filters = [num_heads, num_channels_per_head]
self.kernel_initializer = kernel_initializer
self.hidden_size = hidden_size
self.cond_k = cond_k
self.cond_q = cond_q
self.cond_v = cond_v
self.cond_scale = cond_scale
self.cond_init = cond_init
self.cond_act_func = base_utils.act_to_func(cond_act)
self.project_cond_q, self.project_cond_k, self.project_cond_v = None, None, None
self.cond_filters = self.num_filters
if cond_scale:
self.cond_filters = [num_heads, 2*num_channels_per_head]
self.nd_block_size = nd_block_size
self.resolution = resolution
self.mask = mask
self.num_channels_per_head = num_channels_per_head
self.num_heads = num_heads
self.hidden_size = hidden_size
# By default, apply attention in third last dimension.
# Last 2 dimensions are heads, channels.
self.attention_dim_q = self.attention_dim_k = -3
# Self attention type.
self.is_block_attention = True if self.nd_block_size else False
def get_num_filters(self, is_cond):
if not is_cond:
return self.num_filters
num_heads, num_channels_per_head = self.num_filters
return [num_heads, 2*num_channels_per_head]
def cond_shift_and_scale(self, inputs, cond_inputs, is_cond, layer):
if not is_cond:
return inputs
cond_out = layer(cond_inputs)
if self.cond_scale:
scale, shift = tf.split(cond_out, num_or_size_splits=2, axis=-1)
scale = self.cond_act_func(scale)
shift = self.cond_act_func(shift)
inputs *= scale
inputs += shift
else:
inputs += cond_out
return inputs
def build(self, input_shape):
if not isinstance(input_shape[-1], int):
input_shape = input_shape[0]
lengths = self.nd_block_size or self.resolution or input_shape[1:-1]
self.project_q = DenseND(
self.num_filters, kernel_initializer=self.kernel_initializer, name='q')
self.project_k = DenseND(
self.num_filters, kernel_initializer=self.kernel_initializer, name='k')
self.project_v = DenseND(
self.num_filters, kernel_initializer=self.kernel_initializer, name='v')
self.project_final = DenseND(
self.hidden_size, kernel_initializer=self.kernel_initializer,
contract_axes=2, name='output')
self.relative_attention = RelativeAttentionBiasND(
lengths, self.num_heads)
self.relative_attention.build([])
if self.cond_k:
self.project_cond_k = DenseND(
self.cond_filters, kernel_initializer=self.cond_init, name='cond_k')
if self.cond_q:
self.project_cond_q = DenseND(
self.cond_filters, kernel_initializer=self.cond_init, name='cond_q')
if self.cond_v:
self.project_cond_v = DenseND(
self.cond_filters, kernel_initializer=self.cond_init, name='cond_v')
self.is_one_dim_attention = (
self.is_block_attention and
sum(s != 1 for s in self.nd_block_size) == 1)
if self.is_one_dim_attention:
max_dim = self.nd_block_size.index(max(self.nd_block_size))
if self.nd_block_size[max_dim] == lengths[max_dim]:
self.is_block_attention = False
self.attention_dim_q = max_dim - len(self.nd_block_size) - 2
self.attention_dim_k = self.attention_dim_q
else:
self.is_one_dim_attention = False
if self.mask:
total_length = functools.reduce(operator.mul, lengths, 1)
self._mask = np.triu(np.ones([total_length, total_length], np.float32))
if self.mask != Masking.FUTURE_PRESENT:
self._mask *= (1.0 - np.eye(total_length))
self._mask *= -1e6
self._mask = tf.constant(
np.reshape(self._mask, [total_length, 1, total_length]))
super(SelfAttentionND, self).build(input_shape)
def _apply_mask_and_bias(self, alphas):
bias = self.relative_attention(None)
if self.mask:
bias += self._mask
expand_bias_dims = -self.attention_dim_q - 3
if expand_bias_dims:
bias = tf.reshape(bias, [-1] + [1] * expand_bias_dims +
list(bias.shape[1:]))
return alphas + bias
def _dot_product(self, q, k, contract_dim_q=-3, contract_dim_k=-3):
num_batch_axes = len(q.shape) + contract_dim_q
pre_str = 'abcdefghij' [:num_batch_axes]
in_dim_q = -contract_dim_q - 2
in_dim_k = -contract_dim_k - 2
in_str_q = 'zyxwv' [:in_dim_q]
in_str_k = 'zyxwv' [:in_dim_k]
einsum_str = '{}Q{}C,{}M{}C->{}Q{}M'.format(pre_str, in_str_q, pre_str,
in_str_k, pre_str, in_str_q)
return tf.einsum(einsum_str, q, k)
def _weighted_sum(self, alphas, v, contract_dim_a=-3, contract_dim_v=-3):
num_batch_axes = len(alphas.shape) + contract_dim_a
pre_str = 'abcdefghij' [:num_batch_axes]
in_dim_a = -contract_dim_a - 2
in_dim_v = -contract_dim_v - 2
in_str_a = 'zyxwv' [:in_dim_a]
in_str_v = 'zyxwv' [:in_dim_v]
einsum_str = '{}Q{}M,{}M{}C->{}Q{}C'.format(pre_str, in_str_a, pre_str,
in_str_v, pre_str, in_str_a)
return tf.einsum(einsum_str, alphas, v)
def _prepare_block_attention(self, x):
return att_utils.divide_nd_blocks(x, self.nd_block_size, collapse=True)
def _prepare_full_attention(self, x):
return tf.reshape(x, [x.shape[0], -1, x.shape[-1]])
def call(self, inputs):
cond_inputs = memory = None
cond_qkv = self.cond_v or self.cond_q or self.cond_k
if cond_qkv:
if tf.is_tensor(inputs) or len(inputs) != 2:
raise ValueError('Expected tuple of (inputs, cond_inputs)')
inputs, cond_inputs = inputs
x = inputs
if not self.is_one_dim_attention:
# We flatten the index axes here. [B, ..., D] --> [B, M, D].
if self.is_block_attention:
x = self._prepare_block_attention(x)
else:
x = self._prepare_full_attention(x)
memory = x
q, k, v = self.project_q(x), self.project_k(memory), self.project_v(memory)
q = self.cond_shift_and_scale(
q, cond_inputs, self.cond_q, self.project_cond_q)
k = self.cond_shift_and_scale(
k, cond_inputs, self.cond_k, self.project_cond_k)
v = self.cond_shift_and_scale(
v, cond_inputs, self.cond_v, self.project_cond_v)
q *= q.shape[-1]**-0.5
alphas = self._dot_product(q, k, self.attention_dim_q, self.attention_dim_k)
alphas = self._apply_mask_and_bias(alphas)
weights = tf.nn.softmax(alphas)
output = self._weighted_sum(weights, v, self.attention_dim_q,
self.attention_dim_k)
output = self.project_final(output)
return output
class FactorizedAttention(layers.Layer):
"""Encodes image into 2-D spatial context with factorized attention layers."""
def __init__(self, config, **kwargs):
super(FactorizedAttention, self).__init__(**kwargs)
self.config = config
self.dropout = self.config.get('dropout', 0.0)
def build(self, input_shapes):
ff_size, hidden_size = self.config.ff_size, self.config.hidden_size
num_heads = self.config.num_heads
height, width = input_shapes[1:3]
self.pos_embed = PositionEmbed(axes=[1, 2], max_lengths=[height, width])
self.residual_layers = []
num_norms = 4 * self.config.num_encoder_layers
self.layer_norms = [layers.LayerNormalization() for _ in range(num_norms)]
for _ in range(self.config.num_encoder_layers):
# unmasked row
unmask_row = SelfAttentionND(
hidden_size=hidden_size, num_heads=num_heads,
nd_block_size=[1, width], resolution=[height, width])
ff_row = tf.keras.Sequential([
layers.Dense(units=ff_size, activation='relu'),
layers.Dense(units=hidden_size)
])
# unmasked column,
unmask_col = SelfAttentionND(
hidden_size=hidden_size, num_heads=num_heads,
nd_block_size=[height, 1], resolution=[height, width])
ff_col = tf.keras.Sequential([
layers.Dense(units=ff_size, activation='relu'),
layers.Dense(units=hidden_size)
])
self.residual_layers.append(unmask_row)
self.residual_layers.append(ff_row)
self.residual_layers.append(unmask_col)
self.residual_layers.append(ff_col)
def call(self, inputs, training=True):
inputs = self.pos_embed(inputs)
# Apply a stack of unmaked row and column attention layers.
for layer, norm in zip(self.residual_layers, self.layer_norms):
output = layer(inputs)
output = residual_dropout(inputs, output, self.dropout, training)
inputs = norm(output)
return inputs
| google-research/google-research | coltran/models/layers.py | Python | apache-2.0 | 24,446 | 0.006054 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.