text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from hortee.settings import *
| bne/hortee | hortee/production.py | Python | apache-2.0 | 31 | 0 |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from ..forms.settings.details import DetailsForm
class NewOrganizationView(TemplateView):
template_name = 'organizations/new_organization.html'
def get_context_data(self, **kwargs):
return {
'form': self.form,
}
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
raise PermissionDenied(_('You do not have permission to create a new organization.'))
self.form = DetailsForm(request.POST or None, request.FILES or None)
return super(NewOrganizationView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.form.is_valid():
company = self.form.save()
messages.success(request, _('The organization has been saved.'))
return redirect(reverse('organizations:organization', args=[company.pk]))
return self.render_to_response(self.get_context_data())
| bgroff/kala-app | django_kala/organizations/views/new_organization.py | Python | mit | 1,353 | 0.002217 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier définissant la classe BaseNoeud détaillée plus bas."""
class BaseNoeud:
"""Classe représentant la base d'un noeud.
Cette classe est héritée par tous les autres types de noeuds.
"""
importeur = None
def __init__(self):
"""Constructeur du noeud de base"""
self.nom = ""
self.suivant = None
def valider(self, personnage, dic_masques, commande, tester_fils=True):
"""Validation du noeud.
Cette méthode est à redéfinir dans chacune des classes-filles créée.
Chaque type de noeud a sa propre méthode de validation.
Dans tous les cas, une booléen doit être retourné :
- True si le noeud a pu être interprété ;
- False sinon.
Note : pour la plupart des noeuds, la validation est aussi fonction
des fils.
"""
raise NotImplementedError
def _get_fils(self):
"""Retourne les fils du noeud sous la forme d'une liste."""
return [self.suivant]
fils = property(_get_fils)
def afficher(self, personnage=None):
"""Retourne un affichage du masque pour les joueurs."""
return ""
| stormi/tsunami | src/primaires/interpreteur/masque/noeuds/base_noeud.py | Python | bsd-3-clause | 2,736 | 0.000368 |
from web.backend.utils import loadPresetsList, transition2isolver, getAddressesToRead
from graph.graph_utils import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, GraphUtils
from logic.logic import Logic
from utils.version import displayedVersion
from gluon.html import OPTGROUP
class Plando(object):
def __init__(self, session, request, cache):
self.session = session
self.request = request
self.cache = cache
# required for GraphUtils access to access points
Logic.factory('vanilla')
def run(self):
# init session
if self.session.plando is None:
self.session.plando = {
"state": {},
"preset": "regular",
"seed": None,
"startLocation": "Landing Site",
# rando params
"rando": {},
# set to False in plando.html
"firstTime": True
}
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList(self.cache)
# access points
vanillaAPs = []
for (src, dest) in vanillaTransitions:
vanillaAPs += [transition2isolver(src), transition2isolver(dest)]
vanillaBossesAPs = []
for (src, dest) in vanillaBossesTransitions:
vanillaBossesAPs += [transition2isolver(src), transition2isolver(dest)]
escapeAPs = []
for (src, dest) in vanillaEscapeTransitions:
escapeAPs += [transition2isolver(src), transition2isolver(dest)]
# generate list of addresses to read in the ROM
addresses = getAddressesToRead(plando=True)
startAPs = GraphUtils.getStartAccessPointNamesCategory()
startAPs = [OPTGROUP(_label="Standard", *startAPs["regular"]),
OPTGROUP(_label="Custom", *startAPs["custom"]),
OPTGROUP(_label="Custom (Area rando only)", *startAPs["area"])]
return dict(stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets,
vanillaAPs=vanillaAPs, vanillaBossesAPs=vanillaBossesAPs, escapeAPs=escapeAPs,
curSession=self.session.plando, addresses=addresses, startAPs=startAPs,
version=displayedVersion)
| theonlydude/RandomMetroidSolver | web/backend/plando.py | Python | mit | 2,305 | 0.003471 |
import logging
logger = logging.getLogger('sci-wms')
__version__ = '1.0.0'
| ayan-usgs/sci-wms | sciwms/__init__.py | Python | gpl-3.0 | 76 | 0 |
#!/usr/bin/env python3
"""
Copyright (c) 2015 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import asyncio
from pymata_aio.pymata_core import PymataCore
from pymata_aio.constants import Constants
# noinspection PyPep8
class RedBotAccel:
"""
This library is a direct port of: https://github.com/sparkfun/SparkFun_MMA8452Q_Arduino_Library/tree/V_1.1.0
Special Note: All reads have the Constants.I2C_END_TX_MASK bit sit. Most devices do not need to do this, but it
is required for this chip.
"""
MMA8452Q_Register = {
'STATUS': 0x00,
'OUT_X_MSB': 0x01,
'OUT_Y_MSB': 0x03,
'OUT_Y_LSB': 0x04,
'OUT_Z_MSB': 0x05,
'OUT_Z_LSB': 0x06,
'SYSMOD': 0x0B,
'INT_SOURCE': 0x0C,
'WHO_AM_I': 0x0D,
'XYZ_DATA_CFG': 0x0E,
'HP_FILTER_CUTOFF': 0x0F,
'PL_STATUS': 0x10,
'PL_CFG': 0x11,
'PL_COUNT': 0x12,
'PL_BF_ZCOMP': 0x13,
'P_L_THS_REG': 0x14,
'FF_MT_CFG': 0x15,
'FF_MT_SRC': 0x16,
'FF_MT_THS': 0x17,
'FF_MT_COUNT': 0x18,
'TRANSIENT_CFG': 0x1D,
'TRANSIENT_SRC': 0x1E,
'TRANSIENT_THS': 0x1F,
'TRANSIENT_COUNT': 0x20,
'PULSE_CFG': 0x21,
'PULSE_SRC': 0x22,
'PULSE_THSX': 0x23,
'PULSE_THSY': 0x24,
'PULSE_THSZ': 0x25,
'PULSE_TMLT': 0x26,
'PULSE_LTCY': 0x27,
'PULSE_WIND': 0x28,
'ASLP_COUNT': 0x29,
'CTRL_REG1': 0x2A,
'CTRL_REG2': 0x2B,
'CTRL_REG3': 0x2C,
'CTRL_REG4': 0x2D,
'CTRL_REG5': 0x2E,
'OFF_X': 0x2F,
'OFF_Y': 0x30,
'OFF_Z': 0x31
}
def __init__(self, board, address, scale, output_data_rate):
"""
@param address: Address of the device
@param scale: scale factor
@param output_data_rate: output data rate
@return: no return value
"""
# portrait landscape status values
self.PORTRAIT_U = 0
self.PORTRAIT_D = 1
self.LANDSCAPE_R = 2
self.LANDSCAPE_L = 3
self.LOCKOUT = 0x40
# device id
self.device_id = 42
# device address
self.address = address
# scale factor (fsr)
self.scale = scale
# output data rate (odr)
self.output_data_rate = output_data_rate
# call backs for axis, portrait/landscape and tap results
self.axis = None
self.p_l = None
self.tap = None
# When a read is performed,, data is returned through a call back to this structure.
# It should be cleared after data is consumed
self.callback_data = []
# beginning of data returned is located at position 4
# 0 is the device address
self.data_start = 2
self.board = board
async def start(self):
# configure firmata for i2c
await self.board.i2c_config()
# reset the device
register = self.MMA8452Q_Register['CTRL_REG2']
await self.board.i2c_write_request(self.address, [register, 0x40])
# verify the device by sending a WHO AM I command and checking the results
id_board = await self.check_who_am_i()
if not id_board:
print("Who am I fails")
await self.board.shutdown()
else:
# Correct device, continue with init
# Must be in standby to change registers
await self.standby()
# set up the scale register
await self.set_scale(self.scale)
# set the output data rate
await self.set_output_data_rate(self.output_data_rate)
# Set up portrait/landscape detection
await self.setup_portrait_landscape()
# Disable x, y, set z to 0.5g
await self.setup_tap(0x80, 0x80, 0x08)
# set device to active state
# self.board.sleep(.3)
await self.set_active()
async def data_val(self, data):
"""
This is the callback method used to save read results
@param data: Data returned from the device
@return: No return value
"""
self.callback_data = data
async def check_who_am_i(self):
"""
This method checks verifies the device ID.
@return: True if valid, False if not
"""
register = self.MMA8452Q_Register['WHO_AM_I']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
# await asyncio.sleep(1)
reply = await self.wait_for_read_result()
if reply[self.data_start] == self.device_id:
rval = True
else:
rval = False
return rval
async def standby(self):
"""
Put the device into standby mode so that the registers can be set.
@return: No return value
"""
register = self.MMA8452Q_Register['CTRL_REG1']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
ctrl1 = await self.wait_for_read_result()
ctrl1 = (ctrl1[self.data_start]) & ~0x01
self.callback_data = []
await self.board.i2c_write_request(self.address, [register, ctrl1])
async def set_scale(self, scale):
"""
Set the device scale register.
Device must be in standby before calling this function
@param scale: scale factor
@return: No return value
"""
register = self.MMA8452Q_Register['XYZ_DATA_CFG']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
config_reg = await self.wait_for_read_result()
config_reg = config_reg[self.data_start]
config_reg &= 0xFC # Mask out scale bits
config_reg |= (scale >> 2)
await self.board.i2c_write_request(self.address, [register, config_reg])
async def set_output_data_rate(self, output_data_rate):
"""
Set the device output data rate.
Device must be in standby before calling this function
@param output_data_rate: Desired data rate
@return: No return value.
"""
# self.standby()
register = self.MMA8452Q_Register['CTRL_REG1']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
control_reg = await self.wait_for_read_result()
control_reg = control_reg[self.data_start]
control_reg &= 0xC7 # Mask out data rate bits
control_reg |= (output_data_rate << 3)
await self.board.i2c_write_request(self.address, [register, control_reg])
async def setup_portrait_landscape(self):
"""
Setup the portrait/landscape registers
Device must be in standby before calling this function
@return: No return value
"""
register = self.MMA8452Q_Register['PL_CFG']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
control_reg = await self.wait_for_read_result()
control_reg = control_reg[self.data_start] | 0x40
# 1. Enable P/L
await self.board.i2c_write_request(self.address, [register, control_reg])
register = self.MMA8452Q_Register['PL_COUNT']
# 2. Set the de-bounce rate
await self.board.i2c_write_request(self.address, [register, 0x50])
async def read_portrait_landscape(self, callback=None):
"""
This function reads the portrait/landscape status register of the MMA8452Q.
It will return either PORTRAIT_U, PORTRAIT_D, LANDSCAPE_R, LANDSCAPE_L,
or LOCKOUT. LOCKOUT indicates that the sensor is in neither p or ls.
:param callback: Callback function
:returns: See above.
"""
register = self.MMA8452Q_Register['PL_STATUS']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
pl_status = await self.wait_for_read_result()
pl_status = pl_status[self.data_start]
if pl_status & 0x40: # Z-tilt lockout
pl_status = self.LOCKOUT
else: # Otherwise return LAPO status
pl_status = (pl_status & 0x6) >> 1
if callback:
await callback(pl_status)
await asyncio.sleep(.001)
return pl_status
async def setup_tap(self, x_ths, y_ths, z_ths):
"""
This method sets the tap thresholds.
Device must be in standby before calling this function.
Set up single and double tap - 5 steps:
for more info check out this app note:
http://cache.freescale.com/files/sensors/doc/app_note/AN4072.pdf
Set the threshold - minimum required acceleration to cause a tap.
@param x_ths: x tap threshold
@param y_ths: y tap threshold
@param z_ths: z tap threshold
@return: No return value.
"""
temp = 0
if not (x_ths & 0x80): # If top bit ISN'T set
temp |= 0x3 # Enable taps on x
register = self.MMA8452Q_Register["PULSE_THSX"]
await self.board.i2c_write_request(self.address, [register, x_ths])
if not (y_ths & 0x80): # If top bit ISN'T set
temp |= 0x0C # Enable taps on y
register = self.MMA8452Q_Register["PULSE_THSY"]
await self.board.i2c_write_request(self.address, [register, y_ths])
if not (z_ths & 0x80): # If top bit Izx
temp |= 0x30 # Enable taps on z
register = self.MMA8452Q_Register["PULSE_THSZ"]
await self.board.i2c_write_request(self.address, [register, z_ths])
# self.board.sleep(2)
# Set up single and/or double tap detection on each axis individually.
register = self.MMA8452Q_Register['PULSE_CFG']
await self.board.i2c_write_request(self.address, [register, temp | 0x40])
# Set the time limit - the maximum time that a tap can be above the thresh
register = self.MMA8452Q_Register['PULSE_TMLT']
# 30ms time limit at 800Hz odr
await self.board.i2c_write_request(self.address, [register, 0x30])
# Set the pulse latency - the minimum required time between pulses
register = self.MMA8452Q_Register['PULSE_LTCY']
await self.board.i2c_write_request(self.address, [register, 0xA0])
# Set the second pulse window - maximum allowed time between end of
# latency and start of second pulse
register = self.MMA8452Q_Register['PULSE_WIND']
await self.board.i2c_write_request(self.address, [register, 0xFF]) # 5. 318ms (max value) between taps max
async def read_tap(self, callback=None):
"""
This function returns any taps read by the MMA8452Q. If the function
returns 0, no new taps were detected. Otherwise the function will return the
lower 7 bits of the PULSE_SRC register.
:param callback: Callback function
:returns: 0 or lower 7 bits of the PULSE_SRC register.
"""
# self.board.sleep(1)
register = self.MMA8452Q_Register['PULSE_SRC']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
tap_status = await self.wait_for_read_result()
tap_status = tap_status[self.data_start]
if tap_status & 0x80:
tap_status &= 0x7f
else:
tap_status = 0
if callback:
await callback(tap_status)
await asyncio.sleep(.001)
return tap_status
async def set_active(self):
"""
This method sets the device to the active state
@return: No return value.
"""
register = self.MMA8452Q_Register['CTRL_REG1']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
control_reg = await self.wait_for_read_result()
control_reg = control_reg[self.data_start] | 0x01
await self.board.i2c_write_request(self.address, [register, control_reg])
async def available(self):
"""
This method checks to see if new xyz data is available
@return: Returns 0 if not available. 1 if it is available
"""
register = self.MMA8452Q_Register['STATUS']
await self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
avail = await self.wait_for_read_result()
avail = (avail[self.data_start] & 0x08) >> 3
return avail
async def read(self, callback=None):
"""
The device returns an MSB and LSB (in that order) for each axis.
These are 12 bit values - that is only the upper 4 bits of the LSB are used.
To make things more confusing, firmata returns each axis as 4 bytes, and reverses the order because
it looks at the world as lsb, msb order.
:param callback: Callback function
:returns: callback data is set with x,y,z raw (integers) followed by x,y,z corrected ( floating point)
Call available() first to make sure new data is really available.
"""
register = self.MMA8452Q_Register['OUT_X_MSB']
await self.board.i2c_read_request(self.address, register, 6,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_ASYNCIO)
# get x y z data
xyz = await self.wait_for_read_result()
await asyncio.sleep(.1)
# string off address and register bytes
xyz = xyz[2:]
xmsb = xyz[0]
xlsb = xyz[1]
ymsb = xyz[2]
ylsb = xyz[3]
zmsb = xyz[4]
zlsb = xyz[5]
xa = int((xmsb << 8) | xlsb) >> 4
if xmsb > 127:
xa = 4095 - xa
xa = ~xa + 1
ya = int(((ymsb << 8) | ylsb)) >> 4
if ymsb > 127:
ya = 4095 - ya
ya = ~ya + 1
za = int((zmsb << 8) | zlsb) >> 4
if zmsb > 127:
za = 4095 - za
za = ~za + 1
cx = xa / 2048 * self.scale
cy = ya / 2048 * self.scale
cz = za / 2048 * self.scale
if callback:
await callback([xa, ya, za, cx, cy, cz])
await asyncio.sleep(.001)
return [xa, ya, za, cx, cy, cz]
async def wait_for_read_result(self):
"""
This is a utility function to wait for return data call back
@return: Returns resultant data from callback
"""
while not self.callback_data:
await asyncio.sleep(.001)
rval = self.callback_data
self.callback_data = []
return rval
if __name__ == "__main__":
my_board = PymataCore(2)
loop = asyncio.get_event_loop()
loop.run_until_complete(my_board.start_aio())
accel = RedBotAccel(my_board, 0x1d, 2, 0)
loop.run_until_complete(accel.start())
while True:
availb = loop.run_until_complete(accel.available())
if availb:
axis = loop.run_until_complete(accel.read())
x = axis[3]
y = axis[4]
z = axis[5]
tap = loop.run_until_complete(accel.read_tap())
if tap:
tap = 'TAPPED'
else:
tap = 'NO TAP'
port_land = loop.run_until_complete(accel.read_portrait_landscape())
if port_land == accel.LOCKOUT:
port_land = 'Flat '
elif port_land == 0:
port_land = 'Tilt Lf'
elif port_land == 1:
port_land = 'Tilt Rt'
elif port_land == 2:
port_land = 'Tilt Up'
else:
port_land = 'Tilt Dn'
# noinspection PyPep8
print('{0:.2f} {1:.2f} {2:.2f} {3} {4}'.format(x, y, z, port_land, tap))
loop.run_forever()
| MrYsLab/rb4s | redbot_accel.py | Python | gpl-3.0 | 18,018 | 0.003219 |
"""taskburster URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import logout
from django.conf.urls.i18n import i18n_patterns
from .views import home, home_files
urlpatterns = [
url(r'^(?P<filename>(robots.txt)|(humans.txt))$',
home_files, name='home-files'),
url(r'^accounts/logout/$', logout, {'next_page': '/'}),
url(r'^accounts/', include('allauth.urls')),
]
urlpatterns += i18n_patterns(
url(r'^$', home, name='home'),
url(r'^admin/', include(admin.site.urls)),
)
| danhuynhdev/taskbuster | taskbuster/urls.py | Python | mit | 1,187 | 0 |
# Copyright (C) 2011 Marie E. Rognes
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2011-05-22
# Last changed: 2011-05-22
import sys
from instant import get_status_output
tests = ["verify_demo_code_snippets.py"]
failed = []
for test in tests:
command = "%s %s" % (sys.executable, test)
fail, output = get_status_output(command)
if fail:
failed.append(fail)
print "*** %s failed" % test
print output
else:
print "OK"
sys.exit(len(failed))
| maciekswat/dolfin_1.3.0 | test/documentation/test.py | Python | gpl-3.0 | 1,132 | 0 |
import unittest
from katas.kyu_6.compare_versions import compare_versions
class CompareVersionsTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(compare_versions('11', '10'))
def test_true_2(self):
self.assertTrue(compare_versions('11', '11'))
def test_true_3(self):
self.assertTrue(compare_versions('10.4.6', '10.4'))
def test_false(self):
self.assertFalse(compare_versions('10.4', '10.4.8'))
def test_false_2(self):
self.assertFalse(compare_versions('10.4', '11'))
def test_false_3(self):
self.assertFalse(compare_versions('10.4', '10.10'))
def test_false_4(self):
self.assertFalse(compare_versions('10.4.9', '10.5'))
| the-zebulan/CodeWars | tests/kyu_6_tests/test_compare_versions.py | Python | mit | 729 | 0 |
from django.db.models import CharField, Value
from django.db.models.functions import Length, LPad, RPad
from django.test import TestCase
from .models import Author
class PadTests(TestCase):
def test_pad(self):
Author.objects.create(name='John', alias='j')
tests = (
(LPad('name', 7, Value('xy')), 'xyxJohn'),
(RPad('name', 7, Value('xy')), 'Johnxyx'),
(LPad('name', 6, Value('x')), 'xxJohn'),
(RPad('name', 6, Value('x')), 'Johnxx'),
# The default pad string is a space.
(LPad('name', 6), ' John'),
(RPad('name', 6), 'John '),
# If string is longer than length it is truncated.
(LPad('name', 2), 'Jo'),
(RPad('name', 2), 'Jo'),
(LPad('name', 0), ''),
(RPad('name', 0), ''),
)
for function, padded_name in tests:
with self.subTest(function=function):
authors = Author.objects.annotate(padded_name=function)
self.assertQuerysetEqual(authors, [padded_name], lambda a: a.padded_name, ordered=False)
def test_pad_negative_length(self):
for function in (LPad, RPad):
with self.subTest(function=function):
with self.assertRaisesMessage(ValueError, "'length' must be greater or equal to 0."):
function('name', -1)
def test_combined_with_length(self):
Author.objects.create(name='Rhonda', alias='john_smith')
Author.objects.create(name='♥♣♠', alias='bytes')
authors = Author.objects.annotate(filled=LPad('name', Length('alias'), output_field=CharField()))
self.assertQuerysetEqual(
authors.order_by('alias'),
[' ♥♣♠', ' Rhonda'],
lambda a: a.filled,
)
| nesdis/djongo | tests/django_tests/tests/v21/tests/db_functions/test_pad.py | Python | agpl-3.0 | 1,830 | 0.00165 |
from six import iteritems, itervalues
from six.moves import range
from ..interfaces import ISnapshotable
from ..errors import AccessViolationError, InvalidResourceError
from ..util import align, sizeof_fmt, Flags
from ..snapshot import SnapshotNode
import enum
DEFAULT_MEMORY_SIZE = 0x1000000
# Types
from ctypes import c_byte as i8_t # NOQA
from ctypes import c_short as i16_t # NOQA
from ctypes import c_int as i32_t # NOQA
from ctypes import c_int64 as i64_t # NOQA
from ctypes import c_ubyte as u8_t # NOQA
from ctypes import c_ushort as u16_t # NOQA
from ctypes import c_uint as u32_t # NOQA
from ctypes import c_uint64 as u64_t # NOQA
WORD_SIZE = 4
SHORT_SIZE = 2
PAGE_SHIFT = 8
#: Size of memory page, in bytes.
PAGE_SIZE = (1 << PAGE_SHIFT)
PAGE_MASK = (~(PAGE_SIZE - 1))
MINIMAL_SIZE = 16
class MMOperationList(enum.IntEnum):
ALLOC = 3
FREE = 4
UNUSED = 5
MMAP = 6
UNMMAP = 7
from ..util import UINT8_FMT, UINT16_FMT, UINT32_FMT, UINT64_FMT # noqa
def SIZE_FMT(size):
return str(size)
def OFFSET_FMT(offset):
s = '-' if offset < 0 else ''
return '{}0x{:04X}'.format(s, abs(offset))
class MalformedBinaryError(Exception):
pass
def addr_to_page(addr):
return (addr & PAGE_MASK) >> PAGE_SHIFT
def addr_to_offset(addr):
return addr & (PAGE_SIZE - 1)
def area_to_pages(addr, size):
return ((addr & PAGE_MASK) >> PAGE_SHIFT, align(PAGE_SIZE, size) // PAGE_SIZE)
class PageTableEntry(Flags):
_flags = ['read', 'write', 'execute', 'dirty']
_labels = 'RWXD'
READ = 0x01
WRITE = 0x02
EXECUTE = 0x04
DIRTY = 0x08
class MemoryPageState(SnapshotNode):
def __init__(self, *args, **kwargs):
super(MemoryPageState, self).__init__('index', 'content')
class MemoryPage(object):
"""
Base class for all memory pages of any kinds.
Memory page has a set of boolean flags that determine access to and behavior
of the page.
+-------------+-----------------------------------------------------------------------------+-----------+
| Flag | Meaning | Default |
+-------------+-----------------------------------------------------------------------------+-----------+
| ``read`` | page is readable by executed instructions | ``False`` |
+-------------+-----------------------------------------------------------------------------+-----------+
| ``write`` | page is writable by executed instructions | ``False`` |
+-------------+-----------------------------------------------------------------------------+-----------+
| ``execute`` | content of the page can be used as executable instructions | ``False`` |
+-------------+-----------------------------------------------------------------------------+-----------+
| ``dirty`` | there have been write access to this page, its content has changed | ``False`` |
+-------------+-----------------------------------------------------------------------------+-----------+
:param ducky.mm.MemoryController controller: Controller that owns this page.
:param int index: Serial number of this page.
"""
def __init__(self, controller, index):
super(MemoryPage, self).__init__()
self.controller = controller
self.index = index
self.DEBUG = self.controller.DEBUG
self.INFO = self.controller.INFO
self.WARN = self.controller.WARN
self.ERROR = self.controller.ERROR
self.EXCEPTION = self.controller.EXCEPTION
self.base_address = self.index * PAGE_SIZE
def __repr__(self):
return '<%s index=%i, base=%s>' % (self.__class__.__name__, self.index, UINT32_FMT(self.base_address))
def save_state(self, parent):
"""
Create state of this page, and attach it to snapshot tree.
:param parent: Parent snapshot node.
:type parent: ducky.snapshot.SnapshotNode
"""
state = parent.add_child('page_{}'.format(self.index), MemoryPageState())
state.index = self.index
state.content = [ord(i) if isinstance(i, str) else i for i in self.data]
return state
def load_state(self, state):
"""
Restore page from a snapshot.
"""
for i in range(0, PAGE_SIZE):
self.data[i] = state.content[i]
def __len__(self):
"""
:return: length of this page. By default, all pages have the same length.
:rtype: int
"""
return PAGE_SIZE
def clear(self):
"""
Clear page.
This operation is implemented by child classes.
"""
raise NotImplementedError('Not allowed to clear memory on this address: page={}'.format(self.index))
def read_u8(self, offset):
"""
Read byte.
This operation is implemented by child classes.
:param int offset: offset of requested byte.
:rtype: int
"""
raise NotImplementedError('Not allowed to access memory on this address: page={}, offset={}'.format(self.index, offset))
def read_u16(self, offset):
"""
Read word.
This operation is implemented by child classes.
:param int offset: offset of requested word.
:rtype: int
"""
raise NotImplementedError('Not allowed to access memory on this address: page={}, offset={}'.format(self.index, offset))
def read_u32(self, offset):
"""
Read longword.
This operation is implemented by child classes.
:param int offset: offset of requested longword.
:rtype: int
"""
raise NotImplementedError('Not allowed to access memory on this address: page={}, offset={}'.format(self.index, offset))
def write_u8(self, offset, value):
"""
Write byte.
This operation is implemented by child classes.
:param int offset: offset of requested byte.
:param int value: value to write into memory.
"""
raise NotImplementedError('Not allowed to access memory on this address: page={}, offset={}'.format(self.index, offset))
def write_u16(self, offset, value):
"""
Write word.
This operation is implemented by child classes.
:param int offset: offset of requested word.
:param int value: value to write into memory.
"""
raise NotImplementedError('Not allowed to access memory on this address: page={}, offset={}'.format(self.index, offset))
def write_u32(self, offset, value):
"""
Write longword.
This operation is implemented by child classes.
:param int offset: offset of requested longword.
:param int value: value to write into memory.
"""
raise NotImplementedError('Not allowed to access memory on this address: page={}, offset={}'.format(self.index, offset))
class AnonymousMemoryPage(MemoryPage):
"""
"Anonymous" memory page - this page is just a plain array of bytes, and is
not backed by any storage. Its content lives only in the memory.
Page is created with all bytes set to zero.
"""
def __init__(self, controller, index):
super(AnonymousMemoryPage, self).__init__(controller, index)
self.data = bytearray([0 for _ in range(0, PAGE_SIZE)])
def clear(self):
self.DEBUG('%s.clear', self.__class__.__name__)
for i in range(0, PAGE_SIZE):
self.data[i] = 0
def read_u8(self, offset):
self.DEBUG('%s.read_u8: page=%s, offset=%s', self.__class__.__name__, self.index, offset)
return self.data[offset]
def read_u16(self, offset):
self.DEBUG('%s.read_u16: page=%s, offset=%s', self.__class__.__name__, self.index, offset)
return self.data[offset] | (self.data[offset + 1] << 8)
def read_u32(self, offset):
self.DEBUG('%s.do_read_u32: page=%s, offset=%s', self.__class__.__name__, self.index, offset)
return self.data[offset] | (self.data[offset + 1] << 8) | (self.data[offset + 2] << 16) | (self.data[offset + 3] << 24)
def write_u8(self, offset, value):
self.DEBUG('%s.do_write_u8: page=%s, offset=%s, value=%s', self.__class__.__name__, self.index, offset, value)
self.data[offset] = value
def write_u16(self, offset, value):
self.DEBUG('%s.write_u16: page=%s, offset=%s, value=%s', self.__class__.__name__, self.index, offset, value)
self.data[offset] = value & 0x00FF
self.data[offset + 1] = (value & 0xFF00) >> 8
def write_u32(self, offset, value):
self.DEBUG('%s.write_u32: page=%s, offset=%s, value=%s', self.__class__.__name__, self.index, offset, value)
self.data[offset] = value & 0xFF
self.data[offset + 1] = (value & 0xFF00) >> 8
self.data[offset + 2] = (value & 0xFF0000) >> 16
self.data[offset + 3] = (value & 0xFF000000) >> 24
class VirtualMemoryPage(MemoryPage):
"""
Memory page without any real storage backend.
"""
def __repr__(self):
return '<%s index=%i, base=%s>' % (self.__class__.__name__, self.index, UINT32_FMT(self.base_address))
def save_state(self, parent):
return
class ExternalMemoryPage(MemoryPage):
"""
Memory page backed by an external source. Source is an array of bytes,
and can be provided by device driver, mmaped file, or by any other mean.
"""
def __init__(self, controller, index, data, offset = 0):
super(ExternalMemoryPage, self).__init__(controller, index)
self.data = data
self.offset = offset
def __repr__(self):
return '<%s index=%i, base=%s, offset=%s>' % (self.__class__.__name__, self.index, UINT32_FMT(self.base_address), UINT32_FMT(self.offset))
def save_state(self, parent):
state = super(ExternalMemoryPage, self).save_state(parent)
if self.data:
state.content = [ord(i) if isinstance(i, str) else i for i in self.data[self.offset:self.offset + PAGE_SIZE]]
else:
state.content = []
def clear(self):
self.DEBUG('%s.clear', self.__class__.__name__)
for i in range(0, PAGE_SIZE):
self.data[i] = 0
def get(self, offset):
"""
Get one byte from page. Override this method in case you need a different
offset of requested byte.
:param int offset: offset of the requested byte.
:rtype: int
:returns: byte at position in page.
"""
return self.data[self.offset + offset]
def put(self, offset, b):
"""
Put one byte into page. Override this method in case you need a different
offset of requested byte.
:param int offset: offset of modified byte.
:param int b: new value.
"""
self.data[self.offset + offset] = b
def read_u8(self, offset):
self.DEBUG('%s.read_u8: page=%s, offset=%s', self.__class__.__name__, self.index, offset)
return self.get(offset)
def read_u16(self, offset):
self.DEBUG('%s.read_u16: page=%s, offset=%s', self.__class__.__name__, self.index, offset)
return self.get(offset) | (self.get(offset + 1) << 8)
def read_u32(self, offset):
self.DEBUG('%s.read_u32: page=%s, offset=%s', self.__class__.__name__, self.index, offset)
return self.get(offset) | (self.get(offset + 1) << 8) | (self.get(offset + 2) << 16) | (self.get(offset + 3) << 24)
def write_u8(self, offset, value):
self.DEBUG('%s.write_u8: page=%s, offset=%s, value=%s', self.__class__.__name__, self.index, offset, value)
self.put(offset, value)
def write_u16(self, offset, value):
self.DEBUG('%s.write_u16: page=%s, offset=%s, value=%s', self.__class__.__name__, self.index, offset, value)
self.put(offset, value & 0x00FF)
self.put(offset + 1, (value & 0xFF00) >> 8)
def write_u32(self, offset, value):
self.DEBUG('%s.write_u32: page=%s, offset=%s, value=%s', self.__class__.__name__, self.index, offset, value)
self.put(offset, value & 0x00FF)
self.put(offset + 1, (value & 0xFF00) >> 8)
self.put(offset + 2, (value & 0xFF0000) >> 16)
self.put(offset + 3, (value & 0xFF000000) >> 24)
class MemoryRegionState(SnapshotNode):
def __init__(self):
super(MemoryRegionState, self).__init__('name', 'address', 'size', 'flags', 'pages_start', 'pages_cnt')
class MemoryRegion(ISnapshotable, object):
region_id = 0
def __init__(self, mc, name, address, size, flags):
super(MemoryRegion, self).__init__()
self.memory = mc
self.id = MemoryRegion.region_id
MemoryRegion.region_id += 1
self.name = name
self.address = address
self.size = size
self.flags = flags
self.pages_start, self.pages_cnt = area_to_pages(self.address, self.size)
self.memory.machine.DEBUG('MemoryRegion: name=%s, address=%s, size=%s, flags=%s, pages_start=%s, pages_cnt=%s', name, address, size, self.flags.to_string(), self.pages_start, self.pages_cnt)
def __repr__(self):
return '<MemoryRegion: name=%s, address=%s, size=%s, flags=%s, pages_start=%s, pages_cnt=%s' % (self.name, self.address, self.size, self.flags.to_string(), self.pages_start, self.pages_cnt)
def save_state(self, parent):
state = parent.add_child('memory_region_{}'.format(self.id), MemoryRegionState())
state.name = self.name
state.address = self.address
state.size = self.size
state.flags = self.flags.to_int()
state.pages_start = self.pages_start
state.pages_cnt = self.pages_cnt
def load_state(self, state):
pass
class MemoryState(SnapshotNode):
def __init__(self):
super(MemoryState, self).__init__('size')
def get_page_states(self):
return [__state for __name, __state in iteritems(self.get_children()) if __name.startswith('page_')]
class MemoryController(object):
"""
Memory controller handles all operations regarding main memory.
:param ducky.machine.Machine machine: virtual machine that owns this controller.
:param int size: size of memory, in bytes.
:raises ducky.errors.InvalidResourceError: when memory size is not multiple of
:py:data:`ducky.mm.PAGE_SIZE`.
"""
def __init__(self, machine, size = DEFAULT_MEMORY_SIZE):
machine.DEBUG('%s: size=0x%X', self.__class__.__name__, size)
if size % PAGE_SIZE != 0:
raise InvalidResourceError('Memory size must be multiple of PAGE_SIZE')
if size < MINIMAL_SIZE * PAGE_SIZE:
raise InvalidResourceError('Memory size must be at least %d pages' % MINIMAL_SIZE)
self.machine = machine
# Setup logging - create our local shortcuts to machine' logger
self.DEBUG = self.machine.DEBUG
self.INFO = self.machine.INFO
self.WARN = self.machine.WARN
self.ERROR = self.machine.ERROR
self.EXCEPTION = self.machine.EXCEPTION
self.force_aligned_access = self.machine.config.getbool('memory', 'force-aligned-access', default = False)
self.size = size
self.pages_cnt = size // PAGE_SIZE
self.pages = {}
def save_state(self, parent):
self.DEBUG('mc.save_state')
state = parent.add_child('memory', MemoryState())
state.size = self.size
for page in itervalues(self.pages):
page.save_state(state)
def load_state(self, state):
self.size = state.size
for page_state in state.get_children():
page = self.get_page(page_state.index)
page.load_state(page_state)
def __set_page(self, pg):
"""
Install page object for a specific memory page.
:param ducky.mm.MemoryPage pg: page to be installed
:returns: installed page
:rtype: :py:class:`ducky.mm.MemoryPage`
"""
assert pg.index not in self.pages
if pg.index >= self.pages_cnt:
raise InvalidResourceError('Attempt to create page with index out of bounds: pg.index=%d' % pg.index)
self.pages[pg.index] = pg
return pg
def __remove_page(self, pg):
"""
Removes page object for a specific memory page.
:param ducky.mm.MemoryPage pg: page to be removed
"""
assert pg.index in self.pages
del self.pages[pg.index]
def __alloc_page(self, index):
"""
Allocate new anonymous page for usage. The first available index is used.
Be aware that this method does NOT check if page is already allocated. If
it is, it is just overwritten by new anonymous page.
:param int index: index of requested page.
:returns: newly reserved page.
:rtype: :py:class:`ducky.mm.AnonymousMemoryPage`
"""
return self.__set_page(AnonymousMemoryPage(self, index))
def alloc_specific_page(self, index):
"""
Allocate new anonymous page with specific index for usage.
:param int index: allocate page with this particular index.
:returns: newly reserved page.
:rtype: :py:class:`ducky.mm.AnonymousMemoryPage`
:raises ducky.errors.AccessViolationError: when page is already allocated.
"""
self.DEBUG('mc.alloc_specific_page: index=%s', index)
if index in self.pages:
raise AccessViolationError('Page {} is already allocated'.format(index))
return self.__alloc_page(index)
def alloc_pages(self, base = None, count = 1):
"""
Allocate continuous sequence of anonymous pages.
:param u24 base: if set, start searching pages from this address.
:param int count: number of requested pages.
:returns: list of newly allocated pages.
:rtype: ``list`` of :py:class:`ducky.mm.AnonymousMemoryPage`
:raises ducky.errors.InvalidResourceError: when there is no available sequence of
pages.
"""
self.DEBUG('mc.alloc_pages: base=%s, count=%s', UINT32_FMT(base) if base is not None else '<none>', count)
if base is not None:
pages_start = base // PAGE_SIZE
pages_cnt = self.pages_cnt - pages_start
else:
pages_start = 0
pages_cnt = self.pages_cnt
self.DEBUG('mc.alloc_pages: page=%s, cnt=%s', pages_start, pages_cnt)
for i in range(pages_start, pages_start + pages_cnt):
for j in range(i, i + count):
if j in self.pages:
break
else:
return [self.__alloc_page(j) for j in range(i, i + count)]
raise InvalidResourceError('No sequence of free pages available')
def alloc_page(self, base = None):
"""
Allocate new anonymous page for usage. The first available index is used.
:param int base: if set, start searching pages from this address.
:returns: newly reserved page.
:rtype: :py:class:`ducky.mm.AnonymousMemoryPage`
:raises ducky.errors.InvalidResourceError: when there is no available page.
"""
self.DEBUG('mc.alloc_page: base=%s', UINT32_FMT(base) if base is not None else '<none>')
if base is not None:
pages_start = base // PAGE_SIZE
pages_cnt = self.pages_cnt - pages_start
else:
pages_start = 0
pages_cnt = self.pages_cnt
self.DEBUG('mc.alloc_page: page=%s, cnt=%s', pages_start, pages_cnt)
for i in range(pages_start, pages_start + pages_cnt):
if i not in self.pages:
self.DEBUG('mc.alloc_page: page=%s', i)
return self.__alloc_page(i)
raise InvalidResourceError('No free page available')
def register_page(self, pg):
"""
Install page object for a specific memory page. This method is intended
for external objects, e.g. device drivers to install their memory page
objects to handle memory-mapped IO.
:param ducky.mm.MemoryPage pg: page to be installed
:returns: installed page
:rtype: :py:class:`ducky.mm.AnonymousMemoryPage`
:raises ducky.errors.AccessViolationError: when there is already allocated page
"""
self.DEBUG('mc.register_page: pg=%s', pg)
if pg.index in self.pages:
raise AccessViolationError('Page {} is already allocated'.format(pg.index))
return self.__set_page(pg)
def unregister_page(self, pg):
"""
Remove page object for a specific memory page. This method is intende
for external objects, e.g. device drivers to remove their memory page objects
handling memory-mapped IO.
:param ducky.mm.MemoryPage pg: page to be removed
:raises ducky.errors.AccessViolationError: when there is no allocated page
"""
self.DEBUG('mc.unregister_page: pg=%s', pg)
if pg.index not in self.pages:
raise AccessViolationError('Page {} is not allocated'.format(pg.index))
self.__remove_page(pg)
def free_page(self, page):
"""
Free memory page when it's no longer needed.
:param ducky.mm.MemoryPage page: page to be freed.
"""
self.DEBUG('mc.free_page: page=%i, base=%s', page.index, UINT32_FMT(page.base_address))
self.__remove_page(page)
def free_pages(self, page, count = 1):
"""
Free a continuous sequence of pages when they are no longer needed.
:param ducky.mm.MemoryPage page: first page in series.
:param int count: number of pages.
"""
self.DEBUG('mc.free_pages: page=%i, base=%s, count=%s', page.index, UINT32_FMT(page.base_address), count)
for i in range(page.index, page.index + count):
self.free_page(self.pages[i])
def get_page(self, index):
"""
Return memory page, specified by its index from the beginning of memory.
:param int index: index of requested page.
:rtype: :py:class:`ducky.mm.MemoryPage`
:raises ducky.errors.AccessViolationError: when requested page is not allocated.
"""
if index not in self.pages:
return self.alloc_specific_page(index)
# raise AccessViolationError('Page {} not allocated yet'.format(index))
return self.pages[index]
def get_pages(self, pages_start = 0, pages_cnt = None, ignore_missing = False):
"""
Return list of memory pages.
:param int pages_start: index of the first page, 0 by default.
:param int pages_cnt: number of pages to get, number of all memory pages by default.
:param bool ignore_missing: if ``True``, ignore missing pages, ``False`` by default.
:raises ducky.errors.AccessViolationError: when ``ignore_missing == False`` and there's
a missing page in requested range, this exception is rised.
:returns: list of pages in area
:rtype: `list` of :py:class:`ducky.mm.MemoryPage`
"""
self.DEBUG('mc.pages: pages_start=%s, pages_cnt=%s, ignore_missing=%s', pages_start, pages_cnt, ignore_missing)
pages_cnt = pages_cnt or self.pages_cnt
if ignore_missing is True:
return (self.pages[i] for i in range(pages_start, pages_start + pages_cnt) if i in self.pages)
else:
return (self.pages[i] for i in range(pages_start, pages_start + pages_cnt))
def pages_in_area(self, address = 0, size = None, ignore_missing = False):
"""
Return list of memory pages.
:param u24 address: beggining address of the area, by default 0.
:param u24 size: size of the area, by default the whole memory size.
:param bool ignore_missing: if ``True``, ignore missing pages, ``False`` by default.
:raises ducky.errors.AccessViolationError: when ``ignore_missing == False`` and there's
a missing page in requested range, this exception is rised.
:returns: list of pages in area
:rtype: `list` of :py:class:`ducky.mm.MemoryPage`
"""
self.DEBUG('mc.pages_in_area: address=%s, size=%s', UINT32_FMT(address), size)
size = size or self.size
pages_start, pages_cnt = area_to_pages(address, size)
return self.get_pages(pages_start = pages_start, pages_cnt = pages_cnt, ignore_missing = ignore_missing)
def boot(self):
"""
Prepare memory controller for immediate usage by other components.
"""
self.machine.tenh('mm: %s, %s available', sizeof_fmt(self.size, max_unit = 'Ki'), sizeof_fmt(self.size - len(self.pages) * PAGE_SIZE, max_unit = 'Ki'))
def halt(self):
pass
def read_u8(self, addr):
self.DEBUG('mc.read_u8: addr=%s', UINT32_FMT(addr))
return self.get_page((addr & PAGE_MASK) >> PAGE_SHIFT).read_u8(addr & (PAGE_SIZE - 1))
def read_u16(self, addr):
self.DEBUG('mc.read_u16: addr=%s', UINT32_FMT(addr))
return self.get_page((addr & PAGE_MASK) >> PAGE_SHIFT).read_u16(addr & (PAGE_SIZE - 1))
def read_u32(self, addr):
self.DEBUG('mc.read_u32: addr=%s', UINT32_FMT(addr))
return self.get_page((addr & PAGE_MASK) >> PAGE_SHIFT).read_u32(addr & (PAGE_SIZE - 1))
def write_u8(self, addr, value):
self.DEBUG('mc.write_u8: addr=%s, value=%s', UINT32_FMT(addr), UINT8_FMT(value))
self.get_page((addr & PAGE_MASK) >> PAGE_SHIFT).write_u8(addr & (PAGE_SIZE - 1), value)
def write_u16(self, addr, value):
self.DEBUG('mc.write_u16: addr=%s, value=%s', UINT32_FMT(addr), UINT16_FMT(value))
self.get_page((addr & PAGE_MASK) >> PAGE_SHIFT).write_u16(addr & (PAGE_SIZE - 1), value)
def write_u32(self, addr, value):
self.DEBUG('mc.write_u32: addr=%s, value=%s', UINT32_FMT(addr), UINT32_FMT(value))
self.get_page((addr & PAGE_MASK) >> PAGE_SHIFT).write_u32(addr & (PAGE_SIZE - 1), value)
| happz/ducky | ducky/mm/__init__.py | Python | mit | 24,453 | 0.010714 |
#
# Copyright (c) 2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
import struct
from logging import getLogger
from socket import socket as Socket
from socket import AF_INET, SOCK_STREAM, IPPROTO_TCP
from socket import TCP_NODELAY, SOL_SOCKET, SO_REUSEADDR, SO_LINGER
from gofer.common import Thread, utf8
from gofer.messaging import Document
from gofer.agent.plugin import Container
from gofer.agent.builtin import Admin
HOST = 'localhost'
PORT = 5650
log = getLogger(__name__)
class Handler(object):
"""
The request handler.
"""
def show(self):
container = Container()
admin = Admin(container)
return admin.help()
def cancel(self, sn=None, criteria=None):
container = Container()
admin = Admin(container)
return admin.cancel(sn=sn, criteria=criteria)
def load(self, path):
container = Container()
return container.load(path)
def reload(self, path):
container = Container()
return container.reload(path)
def unload(self, path):
container = Container()
return container.unload(path)
class Manager(Thread):
"""
The manager thread.
"""
def __init__(self, host=None, port=None, handler=None):
"""
:param host: The host (interface) to listen on.
:type: host: str
:param port: The port to listen on.
:type: port: int
:param handler: The request handler.
:type handler: Handler
"""
super(Manager, self).__init__(name='manager')
self.host = host or HOST
self.port = port or port
self.handler = handler or Handler()
self.setDaemon(True)
def listen(self):
"""
Bind and listen.
:return: The open socket.
:rtype: socket.socket
"""
address = (self.host, self.port)
socket = Socket(AF_INET, SOCK_STREAM)
socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
socket.bind(address)
socket.listen(5)
log.info('listening on: %d', self.port)
return socket
def accept(self, socket):
"""
Accept requests.
:param socket: An open socket.
:type socket: socket.socket
"""
while not Thread.aborted():
client, address = socket.accept()
try:
self.accepted(client)
finally:
client.close()
def accepted(self, client):
"""
Process the request on the accepted socket.
:param client: A client socket.
:type client: socket.socket
"""
try:
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
client.setsockopt(SOL_SOCKET, SO_LINGER, struct.pack('ii', 1, 1))
message = client.recv(4096)
call = Document()
call.load(message)
reply = self.dispatch(call)
client.send(reply)
except Exception, e:
log.error(utf8(e))
def run(self):
"""
The thread main.
"""
try:
socket = self.listen()
self.accept(socket)
except Exception:
log.exception(self.host)
def dispatch(self, call):
"""
Dispatch the call to the handler.
:param call: A *call* document.
:type call: Document
"""
reply = Document()
try:
method = getattr(self.handler, call.name)
result = method(*call.args, **call.kwargs)
reply.code = 0
reply.result = result
except Exception, e:
reply.code = 1
reply.result = utf8(e)
return reply.dump()
class Method(object):
"""
Remote method.
"""
def __init__(self, host, port, name):
"""
:param host: The host used to connect to the manager.
:type host: str
:param port: The port used to connect to the manager.
:type: port: int
:param name: The method name.
:type name: str
"""
self.name = name
self.address = (host, port)
def call(self, *args, **kwargs):
"""
Remote call.
"""
socket = Socket(AF_INET, SOCK_STREAM)
socket.connect(self.address)
try:
method = Document()
method.name = self.name
method.args = args
method.kwargs = kwargs
socket.send(method.dump())
reply = socket.recv(4096)
result = Document()
result.load(reply)
return result
finally:
socket.close()
def __call__(self, *args, **kwargs):
try:
result = self.call(*args, **kwargs)
except Exception, e:
reply = Document()
reply.code = 1
reply.result = utf8(e)
result = reply
return result
class Client(object):
"""
The remote manager client.
"""
def __init__(self, host=None, port=None):
"""
:param port: The port used to connect to the manager.
:type: port: int
"""
self.host = host or HOST
self.port = port or PORT
def __getattr__(self, name):
return Method(self.host, self.port, name)
| credativ/gofer | src/gofer/agent/manager.py | Python | lgpl-2.1 | 5,843 | 0 |
import os
from django.conf.urls import url
from . import views
from .earthquakes import viewsEarthquakes
from .weather import viewsWeather
from .gtfs import viewsGTFS
from django.http import HttpResponseRedirect
from .utils.utils import *
from django.db import connection
import simplekml
import time
import subprocess
import logging
logger = logging.getLogger("django")
app_name = 'floybd'
urlpatterns = [
url(r'^$', views.index, name='index'),
url('clearKML', views.clearKML, name='clearKML'),
url('weatherStats', viewsWeather.weatherStats, name='weatherStats'),
url('dayWeather', viewsWeather.weatherConcreteIndex, name='dayWeather'),
url('predictWeatherStats', viewsWeather.weatherPredictionsStats, name='predictWeatherStats'),
url('predictWeather', viewsWeather.weatherPredictions, name='predictWeather'),
url('weatherDemos', views.weatherDemos, name='weatherDemos'),
url('weather', views.weatherIndex, name='weather'),
url('currentWeather', viewsWeather.currentWeather, name='currentWeather'),
url('dummyWeather', viewsWeather.dummyWeather, name='dummyWeather'),
url('stopTour', views.stopTourView, name='stopTour'),
url('demoEarthquakes', views.demoEarthquakes, name='demoEarthquakes'),
url('getConcreteDateValues', viewsWeather.getConcreteValues, name='getConcreteDateValues'),
url('sendConcreteValuesToLG', viewsWeather.sendConcreteValuesToLG, name='sendConcreteValuesToLG'),
url('getPredictionStats', viewsWeather.getPredictionStats, name='getPredictionStats'),
url('getPrediction', viewsWeather.getPrediction, name='getPrediction'),
url('sendPredictionsToLG', viewsWeather.sendPredictionsToLG, name='sendPredictionsToLG'),
url('earthquakes', views.eartquakesIndex, name='earthquakes'),
url('getApproxEarthquakes', viewsEarthquakes.getEarthquakesApprox, name='getApproxEarthquakes'),
url('getExactEarthquakes', viewsEarthquakes.getEarthquakesExact, name='getExactEarthquakes'),
url('sendConcreteEarthquakesValuesToLG', viewsEarthquakes.sendConcreteValuesToLG,
name='sendConcreteEarthquakesValuesToLG'),
url('demoLastWeekEarthquakesHeatmap', viewsEarthquakes.demoLastWeekEarthquakesHeatmap,
name='demoLastWeekEarthquakesHeatmap'),
url('demoLastWeekEarthquakes', viewsEarthquakes.demoLastWeekEarthquakes, name='demoLastWeekEarthquakes'),
url('heatMapEarthquakes', views.eartquakesHeatMapIndex, name='heatMapEarthquakes'),
url('getHeatMapEarthquakesKML', viewsEarthquakes.generateHeapMapKml, name='getHeatMapEarthquakesKML'),
url('getHeatMapEarthquakes', viewsEarthquakes.getHeatMap, name='getHeatMapEarthquakes'),
url('getStats', viewsWeather.getStats, name='getStats'),
url('sendStatsToLG', viewsWeather.sendStatsToLG, name='sendStatsToLG'),
url('getGraphDataForStats', viewsWeather.getGraphDataForStats, name='getGraphDataForStats'),
url('launchdemogtfs', viewsGTFS.launchdemogtfs, name='launchdemogtfs'),
url('demogtfsindex', views.demogtfs, name='demogtfsindex'),
url('uploadgtfs', viewsGTFS.uploadgtfs, name='uploadgtfs'),
url('viewgtfs', viewsGTFS.viewgtfs, name='viewgtfs'),
url('gtfs', views.gtfs, name='gtfs'),
url('uploadGTFS', viewsGTFS.uploadGTFS, name='uploadGTFS'),
url('sendGTFSToLG', viewsGTFS.sendGTFSToLG, name='sendGTFSToLG'),
url('getAgenciesAndGenerateKML', viewsGTFS.getAgenciesAndGenerateKML, name='getAgenciesAndGenerateKML'),
url('citydashboard', viewsWeather.citydashboard, name='citydashboard'),
url('viewDashboard', viewsWeather.viewDashboard, name='viewDashboard'),
url('openHelp', views.openHelp, name='openHelp'),
url('launchScreenSaver', views.launchScreenSaver, name='launchScreenSaver'),
url('stopScreenSaver', views.stopScreenSaver, name='stopScreenSaver'),
url('clearCache', views.clearLGCache, name='clearCache'),
url('relaunchLG', views.relaunchLG, name='relaunchLG'),
url('settings', lambda x: HttpResponseRedirect('/admin/floybd/setting/'), name='settings'),
url('webhook', views.webhook, name='webhook'),
url('getSlideImage', views.getSlideImage, name='getSlideImage'),
]
def sendLogos():
if checkPing(getLGIp()):
millis = int(round(time.time() * 1000))
kml = simplekml.Kml(name="Layout")
screen = kml.newscreenoverlay(name='FLOYBD')
screen.icon.href = "http://"+getDjangoIp()+":8000/static/img/ownLogos.png?a="+str(millis)
screen.overlayxy = simplekml.OverlayXY(x=0.0, y=1.0, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screen.screenxy = simplekml.ScreenXY(x=0.0, y=1.0, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screen.rotationxy = simplekml.RotationXY(x=0.0, y=0.0, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screen.size.x = 0.20
screen.size.y = 0.15
screen.size.xunits = simplekml.Units.fraction
screen.size.yunits = simplekml.Units.fraction
screenName = kml.newscreenoverlay(name='App name')
screenName.icon.href = "http://" + getDjangoIp() + ":8000/static/img/FlOYBDLogo.png?a=" + str(millis)
screenName.overlayxy = simplekml.OverlayXY(x=0.0, y=1.0, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screenName.screenxy = simplekml.ScreenXY(x=0.3, y=0.95, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screenName.rotationxy = simplekml.RotationXY(x=0.0, y=0.0, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screenName.size.x = 0.50
screenName.size.y = 0.07
screenName.size.xunits = simplekml.Units.fraction
screenName.size.yunits = simplekml.Units.fraction
screen1 = kml.newscreenoverlay(name='Logos')
screen1.icon.href = "http://" + getDjangoIp() + ":8000/static/img/sharedLogos.png?a="+str(millis)
screen1.overlayxy = simplekml.OverlayXY(x=0.0, y=0.0, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screen1.screenxy = simplekml.ScreenXY(x=0.0, y=0.01, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screen1.rotationxy = simplekml.RotationXY(x=0.0, y=0.0, xunits=simplekml.Units.fraction,
yunits=simplekml.Units.fraction)
screen1.size.x = 0.3
screen1.size.y = 0.25
screen1.size.xunits = simplekml.Units.fraction
screen1.size.yunits = simplekml.Units.fraction
currentDir = os.getcwd()
fileName = "Layout.kml"
dir1 = os.path.join(currentDir, "static/logos")
dirPath2 = os.path.join(dir1, fileName)
logger.info("\033[93m" + "Saving kml: " + str(dirPath2) + "\033[0m")
kml.save(dirPath2)
if db_table_exists("floybd_setting"):
logger.info("\033[93m" + "Sending Logos...from: " + getDjangoIp() + " to: " + getLGIp() + "\033[0m")
getLeftScreenCommand = "sshpass -p " + getLGPass() + " ssh lg@" + getLGIp() + \
" 'head -n 1 personavars.txt | cut -c17-19'"
leftScreenDirty = subprocess.check_output(getLeftScreenCommand, stderr=subprocess.STDOUT, shell=True)
leftScreenClean = leftScreenDirty.rstrip().decode("utf-8")
leftScreenNumber = leftScreenClean[-1:]
logger.debug("Left Screen: " + str(leftScreenClean))
logger.info("\033[93m" + "Left Screen Number: " + str(leftScreenNumber) + "\033[0m")
command = "echo 'http://" + getDjangoIp() + ":8000/static/logos/Layout.kml?a="+str(millis) +\
"' | sshpass -p " + getLGPass() + " ssh lg@" + getLGIp() + " 'cat - > /var/www/html/kmls_" + \
leftScreenNumber+".txt'"
os.system(command)
def createDefaultSettingsObjects():
if db_table_exists("floybd_setting"):
lgIp, created = Setting.objects.get_or_create(key="lgIp")
if created:
logger.info("\033[93m" + "Created lgIp setting object\n" + "\033[0m")
else:
logger.info("\033[93m" + "lgIp setting object existent\n" + "\033[0m")
sparkIp, created = Setting.objects.get_or_create(key="sparkIp", value="130.206.117.178")
if created:
logger.info("\033[93m" + "Created sparkIp setting object\n" + "\033[0m")
else:
logger.info("\033[93m" + "sparkIp setting object existent\n" + "\033[0m")
LGPassword, created = Setting.objects.get_or_create(key="LGPassword", value="lqgalaxy")
if created:
logger.info("\033[93m" + "Created LGPassword setting object\n" + "\033[0m")
else:
logger.info("\033[93m" + "LGPassword setting object existent\n" + "\033[0m")
def startup_clean():
if not os.path.exists("static/kmls"):
logger.info("\033[93m" + "Creating kmls folder" + "\033[0m")
os.makedirs("static/kmls")
def db_table_exists(table_name):
logger.info("\033[93m" + "Checking table existence..." + str(table_name) + "\033[0m")
return table_name in connection.introspection.table_names()
def checkPing(host):
response = os.system("ping -W 1 -c 1 " + host)
if response == 0:
logger.info("\033[93m" + str(host) + ' is up!' + "\033[0m")
return True
else:
logger.info("\033[91m" + str(host) + ' is down!' + "\033[0m")
return False
def printIp():
logger.info('\033[94m'+"###########\tServing Web Application on address: " + str(getDjangoIp()+":8000") +
str("\t\t###########") + '\033[0m')
startup_clean()
createDefaultSettingsObjects()
sendLogos()
printIp()
| navijo/FlOYBD | Django/mysite/floybd/urls.py | Python | mit | 10,036 | 0.005281 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zipfile
from lxml import etree
import os
import uploader
from iso639 import languages as isoLanguages
def extractCover(zipFile, coverFile, coverpath, tmp_file_name):
if coverFile is None:
return None
else:
zipCoverPath = os.path.join(coverpath , coverFile).replace('\\','/')
cf = zipFile.read(zipCoverPath)
prefix = os.path.splitext(tmp_file_name)[0]
tmp_cover_name = prefix + '.' + os.path.basename(zipCoverPath)
image = open(tmp_cover_name, 'wb')
image.write(cf)
image.close()
return tmp_cover_name
def get_epub_info(tmp_file_path, original_file_name, original_file_extension):
ns = {
'n': 'urn:oasis:names:tc:opendocument:xmlns:container',
'pkg': 'http://www.idpf.org/2007/opf',
'dc': 'http://purl.org/dc/elements/1.1/'
}
epubZip = zipfile.ZipFile(tmp_file_path)
txt = epubZip.read('META-INF/container.xml')
tree = etree.fromstring(txt)
cfname = tree.xpath('n:rootfiles/n:rootfile/@full-path', namespaces=ns)[0]
cf = epubZip.read(cfname)
tree = etree.fromstring(cf)
coverpath = os.path.dirname(cfname)
p = tree.xpath('/pkg:package/pkg:metadata', namespaces=ns)[0]
epub_metadata = {}
for s in ['title', 'description', 'creator', 'language']:
tmp = p.xpath('dc:%s/text()' % s, namespaces=ns)
if len(tmp) > 0:
epub_metadata[s] = p.xpath('dc:%s/text()' % s, namespaces=ns)[0]
else:
epub_metadata[s] = "Unknown"
if epub_metadata['description'] == "Unknown":
description = tree.xpath("//*[local-name() = 'description']/text()")
if len(description) > 0:
epub_metadata['description'] = description
else:
epub_metadata['description'] = ""
if epub_metadata['language'] == "Unknown":
epub_metadata['language'] = ""
else:
lang = epub_metadata['language'].split('-', 1)[0].lower()
if len(lang) == 2:
epub_metadata['language'] = isoLanguages.get(part1=lang).name
elif len(lang) == 3:
epub_metadata['language'] = isoLanguages.get(part3=lang).name
else:
epub_metadata['language'] = ""
coversection = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='cover-image']/@href", namespaces=ns)
coverfile = None
if len(coversection) > 0:
coverfile = extractCover(epubZip, coversection[0], coverpath, tmp_file_path)
else:
meta_cover = tree.xpath("/pkg:package/pkg:metadata/pkg:meta[@name='cover']/@content", namespaces=ns)
if len(meta_cover) > 0:
coversection = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='"+meta_cover[0]+"']/@href", namespaces=ns)
if len(coversection) > 0:
filetype = coversection[0].rsplit('.', 1)[-1]
if filetype == "xhtml" or filetype == "html": #if cover is (x)html format
markup = epubZip.read(os.path.join(coverpath, coversection[0]))
markupTree = etree.fromstring(markup)
# no matter xhtml or html with no namespace
imgsrc = markupTree.xpath("//*[local-name() = 'img']/@src")
# imgsrc maybe startwith "../"" so fullpath join then relpath to cwd
filename = os.path.relpath(os.path.join(os.path.dirname(os.path.join(coverpath, coversection[0])), imgsrc[0]))
coverfile = extractCover(epubZip, filename, "", tmp_file_path)
else:
coverfile = extractCover(epubZip, coversection[0], coverpath, tmp_file_path)
if epub_metadata['title'] is None:
title = original_file_name
else:
title = epub_metadata['title']
return uploader.BookMeta(
file_path=tmp_file_path,
extension=original_file_extension,
title=title.encode('utf-8').decode('utf-8'),
author=epub_metadata['creator'].encode('utf-8').decode('utf-8'),
cover=coverfile,
description=epub_metadata['description'],
tags="",
series="",
series_id="",
languages=epub_metadata['language']) | ajurcevic/calibre-web | cps/epub.py | Python | gpl-3.0 | 4,225 | 0.003314 |
# -*- coding: utf-8 -*-
"""
Created on 26/11/15
@author: Carlos Eduardo Barbosa
Convert table with SSP parameters of MILES II from the original to the
appropriate format for the MCMC run.
"""
import os
import numpy as np
from config import *
if __name__ == "__main__":
os.chdir(os.path.join(home, "tables"))
miles2 = "MILES_BaSTI_un_1.30.LICK.txt"
lick = "BANDS"
data = np.loadtxt(miles2, dtype=str)
header = data[0]
names = data[:,0]
cols = np.array([2,3,4,5,6,7,8,9,14,15,16,17,18,24,25,26,27,28,29,30,31,
32,33,34,35])
data = data[:,cols]
# lick = np.loadtxt("BANDS", dtype=str, usecols=(0,))
# for a in zip(header[cols], lick):
# print a
table = []
for name, d in zip(names, data):
Z = name[8:13].replace("m", "-").replace("p", "+")
age = name[14:21]
alpha = name[25:29]
scale = name[30:]
if scale not in ["Ep0.00", "Ep0.40"]:
continue
if float(age) < 1.:
continue
table.append(np.hstack((age, Z, alpha, d)))
table = np.array(table)
header = np.hstack(("# Age(Gyr)", "[Z/H]", "[alpha/Fe]", header[cols]))
header = ["{0:12}".format(x) for x in header]
with open("MILESII.txt", "w") as f:
f.write("".join(header))
np.savetxt(f, table, fmt="%12s")
| kadubarbosa/hydra1 | miles2_table.py | Python | gpl-2.0 | 1,347 | 0.019302 |
"""
This file unregisters the admin class for each model specified in
ALPHAFILTER_ADMIN_FIELDS and replaces it with a new admin class that
subclasses both the original admin and one with an alphabet_filter attribute
"""
from django.db.models import get_model
from django.contrib import admin
from django.conf import settings
MODEL_REGISTRY = getattr(settings, 'ALPHAFILTER_ADMIN_FIELDS', {})
FIELDS = {}
for key, val in MODEL_REGISTRY.items():
if isinstance(key, basestring):
FIELDS[get_model(*key.split('.'))] = val
for model, modeladmin in admin.site._registry.items():
if model in FIELDS:
admin.site.unregister(model)
admin.site.register(model, type('newadmin', (modeladmin.__class__,), {
'alphabet_filter': FIELDS[model],
})) | affan2/django-alphabetfilter | alphafilter/admin.py | Python | apache-2.0 | 788 | 0.003807 |
# ****************************************************************************
# Copyright 2018 The Apollo Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
# -*- coding: utf-8 -*-
"""Module for init environment."""
import sys
import os
import importlib
import time
import threading
import ctypes
from google.protobuf.descriptor_pb2 import FileDescriptorProto
PY_CALLBACK_TYPE = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p)
PY_CALLBACK_TYPE_T = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p)
# init vars
CYBER_PATH = os.environ['CYBER_PATH']
CYBER_DIR = os.path.split(CYBER_PATH)[0]
sys.path.append(CYBER_PATH + "/third_party/")
sys.path.append(CYBER_PATH + "/lib/")
sys.path.append(CYBER_PATH + "/python/cyber")
sys.path.append(CYBER_PATH + "/python/cyber_py")
sys.path.append(CYBER_PATH + "/lib/python/")
sys.path.append(CYBER_DIR + "/python/")
sys.path.append(CYBER_DIR + "/cyber/")
_CYBER_INIT = importlib.import_module('_cyber_init')
_CYBER_NODE = importlib.import_module('_cyber_node')
def init(module_name="cyber_py"):
"""
init cyber.
"""
return _CYBER_INIT.py_init(module_name)
def ok():
"""
is cyber envi ok.
"""
return _CYBER_INIT.py_ok()
def shutdown():
"""
shutdown cyber envi.
"""
return _CYBER_INIT.py_shutdown()
def is_shutdown():
"""
is cyber shutdown.
"""
return _CYBER_INIT.py_is_shutdown()
def waitforshutdown():
"""
waitforshutdown.
"""
return _CYBER_INIT.py_waitforshutdown()
# //////////////////////////////class//////////////////////////////
class Writer(object):
"""
Class for cyber writer wrapper.
"""
def __init__(self, name, writer, data_type):
self.name = name
self.writer = writer
self.data_type = data_type
def write(self, data):
"""
writer msg string
"""
return _CYBER_NODE.PyWriter_write(self.writer, data.SerializeToString())
class Reader(object):
"""
Class for cyber reader wrapper.
"""
def __init__(self, name, reader, data_type):
self.name = name
self.reader = reader
self.data_type = data_type
class Client(object):
"""
Class for cyber service client wrapper.
"""
def __init__(self, client, data_type):
self.client = client
self.data_type = data_type
def send_request(self, data):
"""
send request to service
@param self
@param data: proto message to send
@return : None or response
"""
response_str = _CYBER_NODE.PyClient_send_request(
self.client, data.SerializeToString())
if len(response_str) == 0:
return None
response = self.data_type()
response.ParseFromString(response_str)
return response
class Node(object):
"""
Class for cyber Node wrapper.
"""
def __init__(self, name):
self.node = _CYBER_NODE.new_PyNode(name)
self.list_writer = []
self.list_reader = []
self.subs = {}
self.pubs = {}
self.list_client = []
self.list_service = []
self.mutex = threading.Lock()
self.callbacks = {}
self.services = {}
def __del__(self):
# print("+++ node __del___")
for writer in self.list_writer:
_CYBER_NODE.delete_PyWriter(writer)
for reader in self.list_reader:
_CYBER_NODE.delete_PyReader(reader)
for c in self.list_client:
_CYBER_NODE.delete_PyClient(c)
for s in self.list_service:
_CYBER_NODE.delete_PyService(s)
_CYBER_NODE.delete_PyNode(self.node)
def register_message(self, file_desc):
"""
register proto message desc file.
"""
for dep in file_desc.dependencies:
self.register_message(dep)
proto = FileDescriptorProto()
file_desc.CopyToProto(proto)
proto.name = file_desc.name
desc_str = proto.SerializeToString()
_CYBER_NODE.PyNode_register_message(self.node, desc_str)
def create_writer(self, name, data_type, qos_depth=1):
"""
create a topic writer for send message to topic.
@param self
@param name str: topic name
@param data_type proto: message class for serialization
"""
self.register_message(data_type.DESCRIPTOR.file)
datatype = data_type.DESCRIPTOR.full_name
writer = _CYBER_NODE.PyNode_create_writer(self.node, name,
datatype, qos_depth)
self.list_writer.append(writer)
return Writer(name, writer, datatype)
def reader_callback(self, name):
"""
reader callback
"""
sub = self.subs[name]
msg_str = _CYBER_NODE.PyReader_read(sub[0], False)
if len(msg_str) > 0:
if sub[3] != "RawData":
proto = sub[3]()
proto.ParseFromString(msg_str)
else:
# print "read rawdata-> ",sub[3]
proto = msg_str
if sub[2] is None:
sub[1](proto)
else:
sub[1](proto, sub[2])
return 0
def create_reader(self, name, data_type, callback, args=None):
"""
create a topic reader for receive message from topic.
@param self
@param name str: topic name
@param data_type proto: message class for serialization
@callback fn: function to call (fn(data)) when data is
received. If args is set, the function must
accept the args as a second argument,
i.e. fn(data, args)
@args any: additional arguments to pass to the callback
"""
self.mutex.acquire()
if name in self.subs.keys():
self.mutex.release()
return None
self.mutex.release()
# datatype = data_type.DESCRIPTOR.full_name
reader = _CYBER_NODE.PyNode_create_reader(
self.node, name, str(data_type))
if reader is None:
return None
self.list_reader.append(reader)
sub = (reader, callback, args, data_type, False)
self.mutex.acquire()
self.subs[name] = sub
self.mutex.release()
fun_reader_cb = PY_CALLBACK_TYPE(self.reader_callback)
self.callbacks[name] = fun_reader_cb
f_ptr = ctypes.cast(self.callbacks[name], ctypes.c_void_p).value
_CYBER_NODE.PyReader_register_func(reader, f_ptr)
return Reader(name, reader, data_type)
def create_rawdata_reader(self, name, callback, args=None):
"""
Create RawData reader:listener RawMessage
"""
return self.create_reader(name, "RawData", callback, args)
def create_client(self, name, request_data_type, response_data_type):
datatype = request_data_type.DESCRIPTOR.full_name
c = _CYBER_NODE.PyNode_create_client(self.node, name,
str(datatype))
self.list_client.append(c)
return Client(c, response_data_type)
def service_callback(self, name):
v = self.services[name]
msg_str = _CYBER_NODE.PyService_read(v[0])
if (len(msg_str) > 0):
proto = v[3]()
proto.ParseFromString(msg_str)
response = None
if v[2] is None:
response = v[1](proto)
else:
response = v[1](proto, v[2])
_CYBER_NODE.PyService_write(v[0], response.SerializeToString())
return 0
def create_service(self, name, req_data_type, res_data_type, callback, args=None):
self.mutex.acquire()
if name in self.services.keys():
self.mutex.release()
return None
self.mutex.release()
datatype = req_data_type.DESCRIPTOR.full_name
s = _CYBER_NODE.PyNode_create_service(self.node, name, str(datatype))
self.list_service.append(s)
v = (s, callback, args, req_data_type, False)
self.mutex.acquire()
self.services[name] = v
self.mutex.release()
f = PY_CALLBACK_TYPE(self.service_callback)
self.callbacks[name] = f
f_ptr = ctypes.cast(f, ctypes.c_void_p).value
_CYBER_NODE.PyService_register_func(s, f_ptr)
return s
def spin(self):
"""
spin in wait and process message.
@param self
"""
while not _CYBER_INIT.py_is_shutdown():
time.sleep(0.002)
class ChannelUtils(object):
@staticmethod
def get_debugstring_rawmsgdata(msg_type, rawmsgdata):
"""
Parse rawmsg from rawmsg data
Input: message type; rawmsg data
Output: a human readable form of this message.for debugging and other purposes.
"""
return _CYBER_NODE.PyChannelUtils_get_debugstring_by_msgtype_rawmsgdata(msg_type, rawmsgdata)
@staticmethod
def get_msgtype(channel_name, sleep_s=2):
"""
Parse rawmsg from rawmsg data
Input: channel name, wait for topo discovery
Output: the corresponding message type of this channel in topo.
"""
return _CYBER_NODE.PyChannelUtils_get_msg_type(channel_name, sleep_s)
@staticmethod
def get_channels(sleep_s=2):
"""
Get active channels name
Input: wait for topo discovery
Output: all active channels
"""
return _CYBER_NODE.PyChannelUtils_get_active_channels(sleep_s)
@staticmethod
def get_channels_info(sleep_s=2):
"""
Get active channel info
Input: wait for topo discovery
Output: {'channel1':[], 'channel2':[]} .channels info
"""
return _CYBER_NODE.PyChannelUtils_get_channels_info(sleep_s)
| ycool/apollo | cyber/python/cyber_py/cyber.py | Python | apache-2.0 | 10,449 | 0.000383 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Fortran/F03FILESUFFIXES.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
from common import write_fake_link
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
write_fake_link(test)
test.write('myfortran.py', r"""
import getopt
import sys
comment = '#' + sys.argv[1]
opts, args = getopt.getopt(sys.argv[2:], 'co:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:len(comment)] != comment:
outfile.write(l)
sys.exit(0)
""")
# Test default file suffix: .f90/.F90 for F90
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
F03 = r'%(_python_)s myfortran.py f03',
FORTRAN = r'%(_python_)s myfortran.py fortran')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
env.Program(target = 'test09', source = 'test09.f03')
env.Program(target = 'test10', source = 'test10.F03')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#fortran\n")
test.write('test02.F', "This is a .F file.\n#link\n#fortran\n")
test.write('test03.for', "This is a .for file.\n#link\n#fortran\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#fortran\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#fortran\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#fortran\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#fortran\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#fortran\n")
test.write('test09.f03', "This is a .f03 file.\n#link\n#f03\n")
test.write('test10.F03', "This is a .F03 file.\n#link\n#f03\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, "This is a .f file.\n")
test.must_match('test02' + _exe, "This is a .F file.\n")
test.must_match('test03' + _exe, "This is a .for file.\n")
test.must_match('test04' + _exe, "This is a .FOR file.\n")
test.must_match('test05' + _exe, "This is a .ftn file.\n")
test.must_match('test06' + _exe, "This is a .FTN file.\n")
test.must_match('test07' + _exe, "This is a .fpp file.\n")
test.must_match('test08' + _exe, "This is a .FPP file.\n")
test.must_match('test09' + _exe, "This is a .f03 file.\n")
test.must_match('test10' + _exe, "This is a .F03 file.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | test/Fortran/F03FILESUFFIXES.py | Python | mit | 4,014 | 0.001495 |
#!/usr/bin/python2.7
# Run this script as user: www-data
import os
import server_path
import squeakspace.server.db_sqlite3 as db
import config
try:
os.remove(config.db_path)
except OSError:
pass
conn = db.connect(config.db_path)
c = db.cursor(conn)
db.make_db(c, config.total_quota)
db.commit(conn)
db.close(conn)
| eek6/squeakspace | admin/init_server_db.py | Python | gpl-3.0 | 325 | 0 |
from pyramid.view import view_config
@view_config(route_name='url#index', request_method='GET', renderer='templates/url/index.pt')
def index(request):
return {}
@view_config(route_name='url#show', request_method='GET', renderer='templates/url/show.pt')
def show(request):
return {}
@view_config(route_name='url#new', request_method='GET', renderer='templates/url/new.pt')
def new(request):
return {}
@view_config(route_name='url#create', request_method='GET', renderer='templates/url/create.pt')
def create(request):
return {}
@view_config(route_name='url#edit', request_method='GET', renderer='templates/url/edit.pt')
def edit(request):
return {}
@view_config(route_name='url#update', request_method='GET', renderer='templates/url/update.pt')
def update(request):
return {}
@view_config(route_name='url#destroy', request_method='GET', renderer='templates/url/destroy.pt')
def destroy(request):
return {}
| antljones/saw | saw/views/url.py | Python | mit | 940 | 0.014894 |
# Copyright (c) The AcidSWF Project.
# See LICENSE.txt for details.
"""
Support for creating a service which runs a web server.
@since: 1.0
"""
import logging
from twisted.python import usage
from twisted.application import service
from acidswf.service import createAMFService
optParameters = [
['log-level', None, logging.INFO, 'Log level.'],
['amf-transport', None, 'http', 'Run the AMF server on HTTP or HTTPS transport.'],
['amf-host', None, 'localhost', 'The interface for the AMF gateway to listen on.'],
['service', None, 'acidswf', 'The remote service name.'],
['amf-port', None, 8000, 'The port number for the AMF gateway to listen on.'],
['crossdomain', None, 'crossdomain.xml', 'Path to a crossdomain.xml file.'],
]
class Options(usage.Options):
"""
Define the options accepted by the I{acidswf amf} plugin.
"""
synopsis = "[amf options]"
optParameters = optParameters
longdesc = """\
This starts an AMF server."""
def postOptions(self):
"""
Set up conditional defaults and check for dependencies.
If SSL is not available but an HTTPS server was configured, raise a
L{UsageError} indicating that this is not possible.
If no server port was supplied, select a default appropriate for the
other options supplied.
"""
pass
#if self['https']:
# try:
# from twisted.internet.ssl import DefaultOpenSSLContextFactory
# except ImportError:
# raise usage.UsageError("SSL support not installed")
def makeService(options):
top_service = service.MultiService()
createAMFService(top_service, options)
return top_service
| thijstriemstra/acidswf | python/acidswf/application/amf.py | Python | gpl-3.0 | 1,733 | 0.004616 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the `Google Monitoring API (V3)`_.
Example::
>>> from gcloud import monitoring
>>> client = monitoring.Client()
>>> query = client.query(minutes=5)
>>> print(query.as_dataframe()) # Requires pandas.
At present, the client supports querying of time series, metric descriptors,
and monitored resource descriptors.
.. _Google Monitoring API (V3): https://cloud.google.com/monitoring/api/
"""
from gcloud.client import JSONClient
from gcloud.monitoring.connection import Connection
from gcloud.monitoring.metric import MetricDescriptor
from gcloud.monitoring.query import Query
from gcloud.monitoring.resource import ResourceDescriptor
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: string
:param project: The target project. If not passed, falls back to the
default inferred from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def query(self,
metric_type=Query.DEFAULT_METRIC_TYPE,
end_time=None,
days=0, hours=0, minutes=0):
"""Construct a query object for listing time series.
Example::
>>> query = client.query(minutes=5)
>>> print(query.as_dataframe()) # Requires pandas.
:type metric_type: string
:param metric_type: The metric type name. The default value is
:data:`Query.DEFAULT_METRIC_TYPE
<gcloud.monitoring.query.Query.DEFAULT_METRIC_TYPE>`,
but please note that this default value is provided only for
demonstration purposes and is subject to change. See the
`supported metrics`_.
:type end_time: :class:`datetime.datetime` or None
:param end_time: The end time (inclusive) of the time interval
for which results should be returned, as a datetime object.
The default is the start of the current minute.
The start time (exclusive) is determined by combining the
values of ``days``, ``hours``, and ``minutes``, and
subtracting the resulting duration from the end time.
It is also allowed to omit the end time and duration here,
in which case
:meth:`~gcloud.monitoring.query.Query.select_interval`
must be called before the query is executed.
:type days: integer
:param days: The number of days in the time interval.
:type hours: integer
:param hours: The number of hours in the time interval.
:type minutes: integer
:param minutes: The number of minutes in the time interval.
:rtype: :class:`~gcloud.monitoring.query.Query`
:returns: The query object.
:raises: :exc:`ValueError` if ``end_time`` is specified but
``days``, ``hours``, and ``minutes`` are all zero.
If you really want to specify a point in time, use
:meth:`~gcloud.monitoring.query.Query.select_interval`.
.. _supported metrics: https://cloud.google.com/monitoring/api/metrics
"""
return Query(self, metric_type,
end_time=end_time,
days=days, hours=hours, minutes=minutes)
def fetch_metric_descriptor(self, metric_type):
"""Look up a metric descriptor by type.
Example::
>>> METRIC = 'compute.googleapis.com/instance/cpu/utilization'
>>> print(client.fetch_metric_descriptor(METRIC))
:type metric_type: string
:param metric_type: The metric type name.
:rtype: :class:`~gcloud.monitoring.metric.MetricDescriptor`
:returns: The metric descriptor instance.
:raises: :class:`gcloud.exceptions.NotFound` if the metric descriptor
is not found.
"""
return MetricDescriptor._fetch(self, metric_type)
def list_metric_descriptors(self, filter_string=None):
"""List all metric descriptors for the project.
Example::
>>> for descriptor in client.list_metric_descriptors():
... print(descriptor.type)
:type filter_string: string or None
:param filter_string:
An optional filter expression describing the metric descriptors
to be returned. See the `filter documentation`_.
:rtype: list of :class:`~gcloud.monitoring.metric.MetricDescriptor`
:returns: A list of metric descriptor instances.
.. _filter documentation:
https://cloud.google.com/monitoring/api/v3/filters
"""
return MetricDescriptor._list(self, filter_string)
def fetch_resource_descriptor(self, resource_type):
"""Look up a resource descriptor by type.
Example::
>>> print(client.fetch_resource_descriptor('gce_instance'))
:type resource_type: string
:param resource_type: The resource type name.
:rtype: :class:`~gcloud.monitoring.resource.ResourceDescriptor`
:returns: The resource descriptor instance.
:raises: :class:`gcloud.exceptions.NotFound` if the resource descriptor
is not found.
"""
return ResourceDescriptor._fetch(self, resource_type)
def list_resource_descriptors(self, filter_string=None):
"""List all resource descriptors for the project.
Example::
>>> for descriptor in client.list_resource_descriptors():
... print(descriptor.type)
:type filter_string: string or None
:param filter_string:
An optional filter expression describing the resource descriptors
to be returned. See the `filter documentation`_.
:rtype: list of :class:`~gcloud.monitoring.resource.ResourceDescriptor`
:returns: A list of resource descriptor instances.
.. _filter documentation:
https://cloud.google.com/monitoring/api/v3/filters
"""
return ResourceDescriptor._list(self, filter_string)
| huangkuan/hack | lib/gcloud/monitoring/client.py | Python | apache-2.0 | 7,310 | 0 |
"""Test that anonymous structs/unions are transparent to member access"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class AnonymousTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(
compiler="icc",
bugnumber="llvm.org/pr15036: LLDB generates an incorrect AST layout for an anonymous struct when DWARF is generated by ICC")
def test_expr_nest(self):
self.build()
self.common_setup(self.line0)
# These should display correctly.
self.expect("expression n->foo.d", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 4"])
self.expect("expression n->b", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 2"])
def test_expr_child(self):
self.build()
self.common_setup(self.line1)
# These should display correctly.
self.expect("expression c->foo.d", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 4"])
self.expect(
"expression c->grandchild.b",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 2"])
@skipIf(
compiler="icc",
bugnumber="llvm.org/pr15036: This particular regression was introduced by r181498")
def test_expr_grandchild(self):
self.build()
self.common_setup(self.line2)
# These should display correctly.
self.expect("expression g.child.foo.d", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 4"])
self.expect("expression g.child.b", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 2"])
def test_expr_parent(self):
self.build()
if "clang" in self.getCompiler() and "3.4" in self.getCompilerVersion():
self.skipTest(
"llvm.org/pr16214 -- clang emits partial DWARF for structures referenced via typedef")
self.common_setup(self.line2)
# These should display correctly.
self.expect("expression pz", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["(type_z *) $", " = 0x0000"])
self.expect("expression z.y", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["(type_y) $", "dummy = 2"])
def test_expr_null(self):
self.build()
self.common_setup(self.line2)
# This should fail because pz is 0, but it succeeds on OS/X.
# This fails on Linux with an upstream error "Couldn't dematerialize struct", as does "p *n" with "int *n = 0".
# Note that this can also trigger llvm.org/pr15036 when run
# interactively at the lldb command prompt.
self.expect("expression *(type_z *)pz", error=True)
def test_child_by_name(self):
self.build()
# Set debugger into synchronous mode
self.dbg.SetAsync(False)
# Create a target
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
break_in_main = target.BreakpointCreateBySourceRegex(
'// Set breakpoint 2 here.', lldb.SBFileSpec(self.source))
self.assertTrue(break_in_main, VALID_BREAKPOINT)
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_main)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in main.")
thread = threads[0]
frame = thread.frames[0]
if not frame.IsValid():
self.fail("Failed to get frame 0.")
var_n = frame.FindVariable("n")
if not var_n.IsValid():
self.fail("Failed to get the variable 'n'")
elem_a = var_n.GetChildMemberWithName("a")
if not elem_a.IsValid():
self.fail("Failed to get the element a in n")
error = lldb.SBError()
value = elem_a.GetValueAsSigned(error, 1000)
if not error.Success() or value != 0:
self.fail("failed to get the correct value for element a in n")
def test_nest_flat(self):
self.build()
self.common_setup(self.line2)
# These should display correctly.
self.expect('frame variable n --flat',
substrs=['n.a = 0',
'n.b = 2',
'n.foo.c = 0',
'n.foo.d = 4'])
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break in main.c.
self.source = 'main.c'
self.line0 = line_number(self.source, '// Set breakpoint 0 here.')
self.line1 = line_number(self.source, '// Set breakpoint 1 here.')
self.line2 = line_number(self.source, '// Set breakpoint 2 here.')
def common_setup(self, line):
# Set debugger into synchronous mode
self.dbg.SetAsync(False)
# Create a target
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set breakpoints inside and outside methods that take pointers to the
# containing struct.
lldbutil.run_break_set_by_file_and_line(
self, self.source, line, num_expected_locations=1, loc_exact=True)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/lang/c/anonymous/TestAnonymous.py | Python | bsd-3-clause | 6,073 | 0.000988 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import copy
import pickle
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
# The test framework used to provide these, but not it doesn't. We'll add
# messages to as we find bugs (if indeed we find bugs).
msgs = []
rawmsgs = []
class IrcMsgTestCase(SupyTestCase):
def testLen(self):
for msg in msgs:
if msg.prefix:
strmsg = str(msg)
self.failIf(len(msg) != len(strmsg) and \
strmsg.replace(':', '') == strmsg)
def testRepr(self):
IrcMsg = ircmsgs.IrcMsg
for msg in msgs:
self.assertEqual(msg, eval(repr(msg)))
def testStr(self):
for (rawmsg, msg) in zip(rawmsgs, msgs):
strmsg = str(msg).strip()
self.failIf(rawmsg != strmsg and \
strmsg.replace(':', '') == strmsg)
def testEq(self):
for msg in msgs:
self.assertEqual(msg, msg)
self.failIf(msgs and msgs[0] == []) # Comparison to unhashable type.
def testNe(self):
for msg in msgs:
self.failIf(msg != msg)
## def testImmutability(self):
## s = 'something else'
## t = ('foo', 'bar', 'baz')
## for msg in msgs:
## self.assertRaises(AttributeError, setattr, msg, 'prefix', s)
## self.assertRaises(AttributeError, setattr, msg, 'nick', s)
## self.assertRaises(AttributeError, setattr, msg, 'user', s)
## self.assertRaises(AttributeError, setattr, msg, 'host', s)
## self.assertRaises(AttributeError, setattr, msg, 'command', s)
## self.assertRaises(AttributeError, setattr, msg, 'args', t)
## if msg.args:
## def setArgs(msg):
## msg.args[0] = s
## self.assertRaises(TypeError, setArgs, msg)
def testInit(self):
for msg in msgs:
self.assertEqual(msg, ircmsgs.IrcMsg(prefix=msg.prefix,
command=msg.command,
args=msg.args))
self.assertEqual(msg, ircmsgs.IrcMsg(msg=msg))
self.assertRaises(ValueError,
ircmsgs.IrcMsg,
args=('foo', 'bar'),
prefix='foo!bar@baz')
def testPickleCopy(self):
for msg in msgs:
self.assertEqual(msg, pickle.loads(pickle.dumps(msg)))
self.assertEqual(msg, copy.copy(msg))
def testHashNotZero(self):
zeroes = 0
for msg in msgs:
if hash(msg) == 0:
zeroes += 1
self.failIf(zeroes > (len(msgs)/10), 'Too many zero hashes.')
def testMsgKeywordHandledProperly(self):
msg = ircmsgs.notice('foo', 'bar')
msg2 = ircmsgs.IrcMsg(msg=msg, command='PRIVMSG')
self.assertEqual(msg2.command, 'PRIVMSG')
self.assertEqual(msg2.args, msg.args)
def testMalformedIrcMsgRaised(self):
self.assertRaises(ircmsgs.MalformedIrcMsg, ircmsgs.IrcMsg, ':foo')
self.assertRaises(ircmsgs.MalformedIrcMsg, ircmsgs.IrcMsg,
args=('biff',), prefix='foo!bar@baz')
def testTags(self):
m = ircmsgs.privmsg('foo', 'bar')
self.failIf(m.repliedTo)
m.tag('repliedTo')
self.failUnless(m.repliedTo)
m.tag('repliedTo')
self.failUnless(m.repliedTo)
m.tag('repliedTo', 12)
self.assertEqual(m.repliedTo, 12)
class FunctionsTestCase(SupyTestCase):
def testIsAction(self):
L = [':jemfinch!~jfincher@ts26-2.homenet.ohio-state.edu PRIVMSG'
' #sourcereview :ACTION does something',
':supybot!~supybot@underthemain.net PRIVMSG #sourcereview '
':ACTION beats angryman senseless with a Unix manual (#2)',
':supybot!~supybot@underthemain.net PRIVMSG #sourcereview '
':ACTION beats ang senseless with a 50lb Unix manual (#2)',
':supybot!~supybot@underthemain.net PRIVMSG #sourcereview '
':ACTION resizes angryman\'s terminal to 40x24 (#16)']
msgs = map(ircmsgs.IrcMsg, L)
for msg in msgs:
self.failUnless(ircmsgs.isAction(msg))
def testIsActionIsntStupid(self):
m = ircmsgs.privmsg('#x', '\x01NOTANACTION foo\x01')
self.failIf(ircmsgs.isAction(m))
m = ircmsgs.privmsg('#x', '\x01ACTION foo bar\x01')
self.failUnless(ircmsgs.isAction(m))
def testIsCtcp(self):
self.failUnless(ircmsgs.isCtcp(ircmsgs.privmsg('foo',
'\x01VERSION\x01')))
self.failIf(ircmsgs.isCtcp(ircmsgs.privmsg('foo', '\x01')))
def testIsActionFalseWhenNoSpaces(self):
msg = ircmsgs.IrcMsg('PRIVMSG #foo :\x01ACTIONfoobar\x01')
self.failIf(ircmsgs.isAction(msg))
def testUnAction(self):
s = 'foo bar baz'
msg = ircmsgs.action('#foo', s)
self.assertEqual(ircmsgs.unAction(msg), s)
def testBan(self):
channel = '#osu'
ban = '*!*@*.edu'
exception = '*!*@*ohio-state.edu'
noException = ircmsgs.ban(channel, ban)
self.assertEqual(ircutils.separateModes(noException.args[1:]),
[('+b', ban)])
withException = ircmsgs.ban(channel, ban, exception)
self.assertEqual(ircutils.separateModes(withException.args[1:]),
[('+b', ban), ('+e', exception)])
def testBans(self):
channel = '#osu'
bans = ['*!*@*', 'jemfinch!*@*']
exceptions = ['*!*@*ohio-state.edu']
noException = ircmsgs.bans(channel, bans)
self.assertEqual(ircutils.separateModes(noException.args[1:]),
[('+b', bans[0]), ('+b', bans[1])])
withExceptions = ircmsgs.bans(channel, bans, exceptions)
self.assertEqual(ircutils.separateModes(withExceptions.args[1:]),
[('+b', bans[0]), ('+b', bans[1]),
('+e', exceptions[0])])
def testUnban(self):
channel = '#supybot'
ban = 'foo!bar@baz'
self.assertEqual(str(ircmsgs.unban(channel, ban)),
'MODE %s -b :%s\r\n' % (channel, ban))
def testJoin(self):
channel = '#osu'
key = 'michiganSucks'
self.assertEqual(ircmsgs.join(channel).args, ('#osu',))
self.assertEqual(ircmsgs.join(channel, key).args,
('#osu', 'michiganSucks'))
def testJoins(self):
channels = ['#osu', '#umich']
keys = ['michiganSucks', 'osuSucks']
self.assertEqual(ircmsgs.joins(channels).args, ('#osu,#umich',))
self.assertEqual(ircmsgs.joins(channels, keys).args,
('#osu,#umich', 'michiganSucks,osuSucks'))
keys.pop()
self.assertEqual(ircmsgs.joins(channels, keys).args,
('#osu,#umich', 'michiganSucks'))
def testQuit(self):
self.failUnless(ircmsgs.quit(prefix='foo!bar@baz'))
def testOps(self):
m = ircmsgs.ops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo +ooo foo bar :baz\r\n')
def testDeops(self):
m = ircmsgs.deops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo -ooo foo bar :baz\r\n')
def testVoices(self):
m = ircmsgs.voices('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo +vvv foo bar :baz\r\n')
def testDevoices(self):
m = ircmsgs.devoices('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo -vvv foo bar :baz\r\n')
def testHalfops(self):
m = ircmsgs.halfops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo +hhh foo bar :baz\r\n')
def testDehalfops(self):
m = ircmsgs.dehalfops('#foo', ['foo', 'bar', 'baz'])
self.assertEqual(str(m), 'MODE #foo -hhh foo bar :baz\r\n')
def testMode(self):
m = ircmsgs.mode('#foo', ('-b', 'foo!bar@baz'))
s = str(m)
self.assertEqual(s, 'MODE #foo -b :foo!bar@baz\r\n')
def testIsSplit(self):
m = ircmsgs.IrcMsg(prefix="caker!~caker@ns.theshore.net",
command="QUIT",
args=('jupiter.oftc.net quasar.oftc.net',))
self.failUnless(ircmsgs.isSplit(m))
m = ircmsgs.IrcMsg(prefix="bzbot!Brad2901@ACC87473.ipt.aol.com",
command="QUIT",
args=('Read error: 110 (Connection timed out)',))
self.failIf(ircmsgs.isSplit(m))
m = ircmsgs.IrcMsg(prefix="JibberJim!~none@8212cl.b0nwbeoe.co.uk",
command="QUIT",
args=('"Bye!"',))
self.failIf(ircmsgs.isSplit(m))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| buildbot/supybot | test/test_ircmsgs.py | Python | bsd-3-clause | 10,535 | 0.001804 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow import AirflowException
from airflow.contrib.hooks.gcp_compute_hook import GceHook
from airflow.contrib.utils.gcp_field_validator import GcpBodyFieldValidator
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GceBaseOperator(BaseOperator):
"""
Abstract base operator for Google Compute Engine operators to inherit from.
"""
@apply_defaults
def __init__(self,
project_id,
zone,
resource_id,
gcp_conn_id='google_cloud_default',
api_version='v1',
*args, **kwargs):
self.project_id = project_id
self.zone = zone
self.full_location = 'projects/{}/zones/{}'.format(self.project_id,
self.zone)
self.resource_id = resource_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
self._hook = GceHook(gcp_conn_id=self.gcp_conn_id, api_version=self.api_version)
super(GceBaseOperator, self).__init__(*args, **kwargs)
def _validate_inputs(self):
if not self.project_id:
raise AirflowException("The required parameter 'project_id' is missing")
if not self.zone:
raise AirflowException("The required parameter 'zone' is missing")
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing")
def execute(self, context):
pass
class GceInstanceStartOperator(GceBaseOperator):
"""
Start an instance in Google Compute Engine.
:param project_id: Google Cloud Platform project where the Compute Engine
instance exists.
:type project_id: str
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
"""
template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version')
@apply_defaults
def __init__(self,
project_id,
zone,
resource_id,
gcp_conn_id='google_cloud_default',
api_version='v1',
*args, **kwargs):
super(GceInstanceStartOperator, self).__init__(
project_id=project_id, zone=zone, resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def execute(self, context):
return self._hook.start_instance(self.project_id, self.zone, self.resource_id)
class GceInstanceStopOperator(GceBaseOperator):
"""
Stop an instance in Google Compute Engine.
:param project_id: Google Cloud Platform project where the Compute Engine
instance exists.
:type project_id: str
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
"""
template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version')
@apply_defaults
def __init__(self,
project_id,
zone,
resource_id,
gcp_conn_id='google_cloud_default',
api_version='v1',
*args, **kwargs):
super(GceInstanceStopOperator, self).__init__(
project_id=project_id, zone=zone, resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def execute(self, context):
return self._hook.stop_instance(self.project_id, self.zone, self.resource_id)
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION = [
dict(name="machineType", regexp="^.+$"),
]
class GceSetMachineTypeOperator(GceBaseOperator):
"""
Changes the machine type for a stopped instance to the machine type specified in
the request.
:param project_id: Google Cloud Platform project where the Compute Engine
instance exists.
:type project_id: str
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param body: Body required by the Compute Engine setMachineType API, as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType#request-body
:type body: dict
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (e.g. v1).
:type api_version: str
"""
template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version')
@apply_defaults
def __init__(self,
project_id,
zone,
resource_id,
body,
gcp_conn_id='google_cloud_default',
api_version='v1',
validate_body=True,
*args, **kwargs):
self.body = body
self._field_validator = None
if validate_body:
self._field_validator = GcpBodyFieldValidator(
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION, api_version=api_version)
super(GceSetMachineTypeOperator, self).__init__(
project_id=project_id, zone=zone, resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def _validate_all_body_fields(self):
if self._field_validator:
self._field_validator.validate(self.body)
def execute(self, context):
self._validate_all_body_fields()
return self._hook.set_machine_type(self.project_id, self.zone,
self.resource_id, self.body)
| sid88in/incubator-airflow | airflow/contrib/operators/gcp_compute_operator.py | Python | apache-2.0 | 7,129 | 0.001964 |
#!/usr/bin/python
# Copyright (c) 2014-2017 Ansible Project
# Copyright (c) 2017, 2018 Will Thames
# Copyright (c) 2017, 2018 Michael De La Rue
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: rds_snapshot_info
version_added: "2.6"
short_description: obtain information about one or more RDS snapshots
description:
- Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora).
- Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed.
- This module was called C(rds_snapshot_facts) before Ansible 2.9. The usage did not change.
options:
db_snapshot_identifier:
description:
- Name of an RDS (unclustered) snapshot.
- Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
required: false
aliases:
- snapshot_name
type: str
db_instance_identifier:
description:
- RDS instance name for which to find snapshots.
- Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
required: false
type: str
db_cluster_identifier:
description:
- RDS cluster name for which to find snapshots.
- Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier)
required: false
type: str
db_cluster_snapshot_identifier:
description:
- Name of an RDS cluster snapshot.
- Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier)
required: false
type: str
snapshot_type:
description:
- Type of snapshot to find.
- By default both automated and manual snapshots will be returned.
required: false
choices: ['automated', 'manual', 'shared', 'public']
type: str
requirements:
- "python >= 2.6"
- "boto3"
author:
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Get information about an snapshot
- rds_snapshot_info:
db_snapshot_identifier: snapshot_name
register: new_database_info
# Get all RDS snapshots for an RDS instance
- rds_snapshot_info:
db_instance_identifier: helloworld-rds-master
'''
RETURN = '''
snapshots:
description: List of non-clustered snapshots
returned: When cluster parameters are not passed
type: complex
contains:
allocated_storage:
description: How many gigabytes of storage are allocated
returned: always
type: int
sample: 10
availability_zone:
description: The availability zone of the database from which the snapshot was taken
returned: always
type: str
sample: us-west-2b
db_instance_identifier:
description: Database instance identifier
returned: always
type: str
sample: hello-world-rds
db_snapshot_arn:
description: Snapshot ARN
returned: always
type: str
sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03
db_snapshot_identifier:
description: Snapshot name
returned: always
type: str
sample: rds:hello-world-rds-us1-2018-05-16-04-03
encrypted:
description: Whether the snapshot was encrypted
returned: always
type: bool
sample: true
engine:
description: Database engine
returned: always
type: str
sample: postgres
engine_version:
description: Database engine version
returned: always
type: str
sample: 9.5.10
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
instance_create_time:
description: Time the Instance was created
returned: always
type: str
sample: '2017-10-10T04:00:07.434000+00:00'
kms_key_id:
description: ID of the KMS Key encrypting the snapshot
returned: always
type: str
sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab
license_model:
description: License model
returned: always
type: str
sample: postgresql-license
master_username:
description: Database master username
returned: always
type: str
sample: dbadmin
option_group_name:
description: Database option group name
returned: always
type: str
sample: default:postgres-9-5
percent_progress:
description: Percent progress of snapshot
returned: always
type: int
sample: 100
snapshot_create_time:
description: Time snapshot was created
returned: always
type: str
sample: '2018-05-16T04:03:33.871000+00:00'
snapshot_type:
description: Type of snapshot
returned: always
type: str
sample: automated
status:
description: Status of snapshot
returned: always
type: str
sample: available
storage_type:
description: Storage type of underlying DB
returned: always
type: str
sample: gp2
tags:
description: Snapshot tags
returned: always
type: complex
contains: {}
vpc_id:
description: ID of VPC containing the DB
returned: always
type: str
sample: vpc-abcd1234
cluster_snapshots:
description: List of cluster snapshots
returned: always
type: complex
contains:
allocated_storage:
description: How many gigabytes of storage are allocated
returned: always
type: int
sample: 1
availability_zones:
description: The availability zones of the database from which the snapshot was taken
returned: always
type: list
sample:
- ca-central-1a
- ca-central-1b
cluster_create_time:
description: Date and time the cluster was created
returned: always
type: str
sample: '2018-05-17T00:13:40.223000+00:00'
db_cluster_identifier:
description: Database cluster identifier
returned: always
type: str
sample: test-aurora-cluster
db_cluster_snapshot_arn:
description: ARN of the database snapshot
returned: always
type: str
sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot
db_cluster_snapshot_identifier:
description: Snapshot identifier
returned: always
type: str
sample: test-aurora-snapshot
engine:
description: Database engine
returned: always
type: str
sample: aurora
engine_version:
description: Database engine version
returned: always
type: str
sample: 5.6.10a
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
kms_key_id:
description: ID of the KMS Key encrypting the snapshot
returned: always
type: str
sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab
license_model:
description: License model
returned: always
type: str
sample: aurora
master_username:
description: Database master username
returned: always
type: str
sample: shertel
percent_progress:
description: Percent progress of snapshot
returned: always
type: int
sample: 0
port:
description: Database port
returned: always
type: int
sample: 0
snapshot_create_time:
description: Date and time when the snapshot was created
returned: always
type: str
sample: '2018-05-17T00:23:23.731000+00:00'
snapshot_type:
description: Type of snapshot
returned: always
type: str
sample: manual
status:
description: Status of snapshot
returned: always
type: str
sample: creating
storage_encrypted:
description: Whether the snapshot is encrypted
returned: always
type: bool
sample: true
tags:
description: Tags of the snapshot
returned: always
type: complex
contains: {}
vpc_id:
description: VPC of the database
returned: always
type: str
sample: vpc-abcd1234
'''
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
try:
import botocore
except Exception:
pass # caught by AnsibleAWSModule
def common_snapshot_info(module, conn, method, prefix, params):
paginator = conn.get_paginator(method)
try:
results = paginator.paginate(**params).build_full_result()['%ss' % prefix]
except is_boto3_error_code('%sNotFound' % prefix):
results = []
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, "trying to get snapshot information")
for snapshot in results:
try:
snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix],
aws_retry=True)['TagList'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix])
return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results]
def cluster_snapshot_info(module, conn):
snapshot_name = module.params.get('db_cluster_snapshot_identifier')
snapshot_type = module.params.get('snapshot_type')
instance_name = module.params.get('db_cluster_identifier')
params = dict()
if snapshot_name:
params['DBClusterSnapshotIdentifier'] = snapshot_name
if instance_name:
params['DBClusterIdentifier'] = instance_name
if snapshot_type:
params['SnapshotType'] = snapshot_type
if snapshot_type == 'public':
params['IsPublic'] = True
elif snapshot_type == 'shared':
params['IsShared'] = True
return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params)
def standalone_snapshot_info(module, conn):
snapshot_name = module.params.get('db_snapshot_identifier')
snapshot_type = module.params.get('snapshot_type')
instance_name = module.params.get('db_instance_identifier')
params = dict()
if snapshot_name:
params['DBSnapshotIdentifier'] = snapshot_name
if instance_name:
params['DBInstanceIdentifier'] = instance_name
if snapshot_type:
params['SnapshotType'] = snapshot_type
if snapshot_type == 'public':
params['IsPublic'] = True
elif snapshot_type == 'shared':
params['IsShared'] = True
return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params)
def main():
argument_spec = dict(
db_snapshot_identifier=dict(aliases=['snapshot_name']),
db_instance_identifier=dict(),
db_cluster_identifier=dict(),
db_cluster_snapshot_identifier=dict(),
snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']]
)
if module._name == 'rds_snapshot_facts':
module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13')
conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
results = dict()
if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']:
results['snapshots'] = standalone_snapshot_info(module, conn)
if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']:
results['cluster_snapshots'] = cluster_snapshot_info(module, conn)
module.exit_json(changed=False, **results)
if __name__ == '__main__':
main()
| anryko/ansible | lib/ansible/modules/cloud/amazon/rds_snapshot_info.py | Python | gpl-3.0 | 12,831 | 0.002416 |
'''
Build a simple neural machine translation model
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import numpy
import copy
import os
import warnings
import sys
import time
from scipy import optimize, stats
from collections import OrderedDict
from sklearn.cross_validation import KFold
from data_iterator import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s'%(pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.01, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_y, maxlen=None, n_words_src=30000, n_words=30000):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen != None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
for l_x, s_x, l_y, s_y in zip(lengths_x, seqs_x, lengths_y, seqs_y):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x,seqs_y)):
x[:lengths_x[idx],idx] = s_x
x_mask[:lengths_x[idx]+1,idx] = 1.
y[:lengths_y[idx],idx] = s_y
y_mask[:lengths_y[idx]+1,idx] = 1.
return x, x_mask, y, y_mask
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True):
if nin == None:
nin = options['dim_proj']
if nout == None:
nout = options['dim_proj']
params[_p(prefix,'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix,'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None, hiero=False):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h#, r, u, preact, preactx
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
#None, None, None, None],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None):
if nin == None:
nin = options['dim']
if dim == None:
dim = options['dim']
if dimctx == None:
dimctx = options['dim']
params = param_init_gru(options, params, prefix, nin=nin, dim=dim)
# context to LSTM
Wc = norm_weight(dimctx,dim*2)
params[_p(prefix,'Wc')] = Wc
Wcx = norm_weight(dimctx,dim)
params[_p(prefix,'Wcx')] = Wcx
# attention: prev -> hidden
Wi_att = norm_weight(nin,dimctx)
params[_p(prefix,'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state == None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix,'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowc = tensor.dot(state_below, tparams[_p(prefix, 'Wi_att')])
def _step_slice(m_, x_, xx_, xc_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, Wd_att, U_att, c_tt, Ux, Wcx):
# attention
pstate_ = tensor.dot(h_, Wd_att)
pctx__ = pctx_ + pstate_[None,:,:]
pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:,:,None]).sum(0) # current context
preact = tensor.dot(h_, U)
preact += x_
preact += tensor.dot(ctx_, Wc)
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx *= r
preactx += xx_
preactx += tensor.dot(ctx_, Wcx)
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h, ctx_, alpha.T #, pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix,'Wd_att')],
tparams[_p(prefix,'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')]]
if one_step:
rval = _step(*(seqs+[init_state, None, None, pctx_, context]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0])],
non_sequences=[pctx_,
context]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params, prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'], nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = (ctx * x_mask[:,:,None]).sum(0) / x_mask.sum(0)[:,None]
#ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target)
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
proj_h = proj[0]
ctxs = proj[1]
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0],y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options, prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options, prefix='encoder_r')
ctx = concatenate([proj[0],projr[0][::-1]], axis=proj[0].ndim-1)
ctx_mean = ctx.mean(0)
#ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero
emb = tensor.switch(y[:,None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
next_state = proj[0]
ctxs = proj[1]
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score += next_p[0,nw]
if nw == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[ti])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
probs = []
n_done = 0
for x, y in iterator:
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y,
n_words_src=options['n_words_src'],
n_words=options['n_words'])
pprobs = f_log_probs(x,x_mask,y,y_mask)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
import ipdb; ipdb.set_trace()
if verbose:
print >>sys.stderr, '%d samples computed'%(n_done)
return numpy.array(probs)
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-8
updates = []
i = theano.shared(numpy.float32(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr0 * (tensor.sqrt(fix2) / fix1)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (tensor.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad'%k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2'%k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2'%k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up, profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_updir'%k) for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4)) for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads, running_grads2)]
param_up = [(p, p + udn[1]) for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, x, mask, y, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup, profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
encoder='gru',
decoder='gru_cond',
patience=10,
max_epochs=5000,
dispFreq=100,
decay_c=0.,
alpha_c=0.,
diag_c=0.,
clip_c=-1.,
lrate=0.01,
n_words_src=100000,
n_words=100000,
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size = 16,
valid_batch_size = 16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq updates
datasets=['/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'],
valid_datasets=['../data/dev/newstest2011.en.tok', '../data/dev/newstest2011.fr.tok'],
dictionaries=['/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'],
use_dropout=False,
reload_=False):
# Model options
model_options = locals().copy()
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
# reload options
if reload_ and os.path.exists(saveto):
with open('%s.pkl'%saveto, 'rb') as f:
models_options = pkl.load(f)
print 'Loading data'
train = TextIterator(datasets[0], datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
print 'Buliding sampler'
f_init, f_next = build_sampler(tparams, model_options, trng)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=profile)
print 'Done'
cost = cost.mean()
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * ((tensor.cast(y_mask.sum(0)//x_mask.sum(0), 'float32')[:,None]-
opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
cost += alpha_reg
# after any regularizer
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
print 'Building f_grad...',
f_grad = theano.function(inps, grads, profile=profile)
print 'Done'
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c**2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
history_errs = list(numpy.load(saveto)['history_errs'])
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
uidx = 0
estop = False
for eidx in xrange(max_epochs):
n_samples = 0
for x, y in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen,
n_words_src=n_words_src,
n_words=n_words)
if x == None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
cost = f_grad_shared(x, x_mask, y, y_mask)
f_update(lrate)
ud = time.time() - ud_start
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
#import ipdb; ipdb.set_trace()
if best_p != None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
if numpy.mod(uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5,x.shape[1])):
stochastic = True
sample, score = gen_sample(tparams, f_init, f_next, x[:,jj][:,None],
model_options, trng=trng, k=1, maxlen=30,
stochastic=stochastic, argmax=False)
print 'Source ',jj,': ',
for vv in x[:,jj]:
if vv == 0:
break
if vv in worddicts_r[0]:
print worddicts_r[0][vv],
else:
print 'UNK',
print
print 'Truth ',jj,' : ',
for vv in y[:,jj]:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
valid_errs = pred_probs(f_log_probs, prepare_data, model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
if numpy.isnan(valid_err):
import ipdb; ipdb.set_trace()
print 'Valid ', valid_err
print 'Seen %d samples'%n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid_err = pred_probs(f_log_probs, prepare_data, model_options, valid).mean()
print 'Valid ', valid_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
**params)
return valid_err
if __name__ == '__main__':
pass
| skaasj/dl4mt-material | session2/nmt.py | Python | bsd-3-clause | 39,416 | 0.007687 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hikaye', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='stories',
new_name='Story',
),
]
| oznurf/EKSI_HIKAYE | hikaye/migrations/0002_auto_20150819_1605.py | Python | mit | 342 | 0 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Jun 11, 2015
"""
import sys
from atom.atom import set_default
from atom.api import (
Callable,
Int,
Tuple,
Instance,
Enum,
Float,
ContainerList,
Bool,
FloatRange,
Str,
Dict,
Typed,
ForwardTyped,
observe,
)
from enaml.core.declarative import d_
from enaml.widgets.api import Container
from enaml.widgets.control import Control, ProxyControl
from atom.instance import ForwardInstance
if sys.version_info.major < 3:
str = basestring
def numpy_ndarray():
import numpy
return numpy.ndarray
class ProxyPlotArea(ProxyControl):
declaration = ForwardTyped(lambda: PlotArea)
class PlotArea(Container):
hug_width = set_default("ignore")
hug_height = set_default("ignore")
proxy = Typed(ProxyPlotArea)
setup = d_(Callable(lambda graph: None))
PEN_ARGTYPES = (tuple, list, str, dict)
BRUSH_ARGTYPES = (tuple, list, str, dict, int, float)
class PlotItem(Control):
#: Title of data series
title = d_(Str())
#: Name
name = d_(Str())
#: Row in plot area
row = d_(Int(0))
#: Column in plot area
column = d_(Int(0))
#: Pen type to use for line
line_pen = d_(Instance(PEN_ARGTYPES))
#: Pen type to use for shadow
shadow_pen = d_(Instance(PEN_ARGTYPES))
#: Fill level
fill_level = d_(Float(strict=False))
# ‘c’ one of: r, g, b, c, m, y, k, w
# R, G, B, [A] integers 0-255
# (R, G, B, [A]) tuple of integers 0-255
# float greyscale, 0.0-1.0
# int see intColor()
# (int, hues) see intColor()
# “RGB” hexadecimal strings; may begin with ‘#’
# “RGBA”
# “RRGGBB”
# “RRGGBBAA”
#: Brush fill type
fill_brush = d_(Instance(BRUSH_ARGTYPES))
#: Symbol to use for points
symbol = d_(Enum(None, "o", "s", "t", "d", "+"))
#: Symbol sizes for points
symbol_size = d_(Float(10, strict=False))
#: Symbol pen to use
symbol_pen = d_(Instance(PEN_ARGTYPES))
#: Symbol brush
symbol_brush = d_(Instance(BRUSH_ARGTYPES))
#: Show legend
show_legend = d_(Bool(False))
label_left = d_(Str())
label_right = d_(Str())
label_top = d_(Str())
label_bottom = d_(Str())
# H, V
grid = d_(Tuple(bool, default=(False, False)))
grid_alpha = d_(FloatRange(low=0.0, high=1.0, value=0.5))
#: Display a separate axis for each nested plot
multi_axis = d_(Bool(True))
axis_left_ticks = d_(Callable())
axis_bottom_ticks = d_(Callable())
#: Display the axis on log scale
log_mode = d_(Tuple(bool, default=(False, False))) # x,y
#: Enable antialiasing
antialias = d_(Bool(False))
#: Set auto range for each axis
auto_range = d_(
Enum(True, False, (True, True), (True, False), (False, True), (False, False))
)
# x-range to use if auto_range is disabled
range_x = d_(ContainerList(default=[0, 100]))
#: y-range to use if auto_range is disabled
range_y = d_(ContainerList(default=[0, 100]))
#: Automatically downsaple
auto_downsample = d_(Bool(False))
#: Clip data points to view
clip_to_view = d_(Bool(False))
#: Step mode to use
step_mode = d_(Bool(False))
#: Keep aspect ratio locked when resizing
aspect_locked = d_(Bool(False))
#: Time between updates
refresh_time = d_(Int(100))
@observe(
"line_pen",
"symbol",
"symbol_size",
"symbol_pen",
"symbol_brush",
"fill_brush",
"fill_level",
"multi_axis",
"title",
"label_left",
"label_right",
"label_top",
"label_bottom",
"grid",
"grid_alpha",
"log_mode",
"antialias",
"auto_range",
"auto_downsample",
"clip_to_view",
"step_mode",
"aspect_locked",
"axis_left_ticks",
"axis_bottom_ticks",
"show_legend",
"row",
"column",
)
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(PlotItem, self)._update_proxy(change)
@observe("range_x", "range_y")
def _update_range(self, change):
"""Handle updates and changes"""
getattr(self.proxy, "set_%s" % change["name"])(change["value"])
class PlotItem2D(PlotItem):
#: x-axis values, as a list
x = d_(ContainerList())
#: y-axis values, as a list
y = d_(ContainerList())
@observe("x", "y")
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(PlotItem2D, self)._update_proxy(change)
class PlotItem3D(PlotItem2D):
#: z-axis values, as a list
z = d_(ContainerList())
@observe("z")
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(PlotItem3D, self)._update_proxy(change)
class PlotItemArray(PlotItem2D):
"""Numpy array item"""
#: x-axis values, as a numpy array
x = d_(ForwardInstance(numpy_ndarray))
#: y-axis values, as a numpy array
y = d_(ForwardInstance(numpy_ndarray))
class PlotItemArray3D(PlotItem3D):
"""Numpy array item"""
#: Plot type
type = Enum("line")
#: x-axis values, as a numpy array
x = d_(ForwardInstance(numpy_ndarray))
#: y-axis values, as a numpy array
y = d_(ForwardInstance(numpy_ndarray))
#: z-axis values, as a numpy array
z = d_(ForwardInstance(numpy_ndarray))
class AbstractDataPlotItem(PlotItem):
@observe("data")
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(AbstractDataPlotItem, self)._update_proxy(change)
class PlotItemList(AbstractDataPlotItem):
data = d_(ContainerList())
class PlotItemDict(AbstractDataPlotItem):
data = d_(Dict(default={"x": [], "y": []}))
| frmdstryr/enamlx | enamlx/widgets/plot_area.py | Python | mit | 6,354 | 0.000158 |
# -*- coding:utf-8 -*-
################################################################################
#
# Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
#
################################################################################
'''
Created on 2015年3月14日
@author: Administrator
'''
import os
import unittest
import urllib
from urlparse import urljoin
import urlparse
from crawler.minispider.SpiderConfigParser import SpiderConfig
from crawler.minispider.SpiderHtmlParser import SpiderHtmlParser
class TestSequenceFunctions(unittest.TestCase):
'''
as for D level project, not exhausting all the possible exception attributes.
'''
def setUp(self):
self.parser = SpiderHtmlParser()
self.path = os.path.realpath(__file__)
def tearDown(self):
for filename in os.listdir(self.path[0:-23] + 'urls'):
os.remove(self.path[0:-23] + 'urls' + os.sep + filename)
def test_parse_url(self):
'''
test the logic of parse_url
'''
urls = self.parser.parse_url('http://pycm.baidu.com:8081/2/index.html',
'./urls', '.*\.(gif|png|jpg|bmp|html)$',
1, 1)
#http://pycm.baidu.com:8081/page2_1.html', 'http://pycm.baidu.com:8081/2/3/index.html'
self.assertIn("http://pycm.baidu.com:8081/page2_1.html", urls)
self.assertIn("http://pycm.baidu.com:8081/2/3/index.html", urls)
self.assertTrue(os.path.exists(self.path[0:-23] + 'urls' + os.sep
+ 'http%3A__pycm.baidu.com%3A8081_2_index.html'),
'http%3A__pycm.baidu.com%3A8081_2_index.html expecting to be created.')
def test_parse_url_B(self):
'''
test the logic of parse_url
'''
urls = self.parser.parse_url('http://pycm.baidu.com:8081',
'./urls', '.*\.(gif|png|jpg|bmp|html)$',
1, 1)
#http://pycm.baidu.com:8081/page2_1.html', 'http://pycm.baidu.com:8081/2/3/index.html'
self.assertEqual(5, len(urls), 'there should be 5 urls.')
self.assertIn("http://pycm.baidu.com:8081/page1.html", urls)
self.assertIn("http://pycm.baidu.com:8081/page2.html", urls)
self.assertIn("http://pycm.baidu.com:8081/page3.html", urls)
self.assertIn("http://pycm.baidu.com:8081/mirror/index.html", urls)
self.assertIn("http://pycm.baidu.com:8081/page4.html", urls)
def test_parse_url_404(self):
'''
test the logic of parse_url with the page 404
and the exception should be thrown
'''
urls = self.parser.parse_url('http://pycm.baidu.com:8081/2/index333.html',
'./urls', '.*\.(gif|png|jpg|bmp|html)$',
1, 1)
#http://pycm.baidu.com:8081/page2_1.html', 'http://pycm.baidu.com:8081/2/3/index.html'
self.assertTrue(len(urls) == 0, 'should not contain any element.')
if __name__ == '__main__':
unittest.main() | onehao/opensource | pyml/crawler/minispider/test/SpiderHtmlParserTest.py | Python | apache-2.0 | 3,201 | 0.010016 |
"""
hapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 20110927
version: 1.1.4
Compatible with Python versions 2.4-3.x
"""
from struct import pack, unpack, calcsize, error
import os
import sys
import time
import array
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
class _ShapeRecord:
"""A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if type(args[0]) is type("stringTest"):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values
if shapeType in (13,15,18,23,25,28,31):
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so use the full list.
shapes = self.shapes()
return shapes[i]
shp.seek(offset)
return self.__shape()
def shapes(self):
"""Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp)
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""Returns all records in a dbf file."""
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records
def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i),
record=self.record(i))
def shapeRecords(self):
"""Returns a list of combination geometry/attribute records for
all records in a shapefile."""
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())]
class Writer:
"""Provides write support for ESRI Shapefiles."""
def __init__(self, shapeType=None):
self._shapes = []
self.fields = []
self.records = []
self.shapeType = shapeType
self.shp = None
self.shx = None
self.dbf = None
# Geometry record offsets and lengths for writing shx file.
self._offsets = []
self._lengths = []
# Use deletion flags in dbf? Default is false (0).
self.deletionFlag = 0
def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb")
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size
def __bbox(self, shapes, shapeTypes=[]):
x = []
y = []
for s in shapes:
shapeType = self.shapeType
if shapeTypes:
shapeType = shapeTypes[shapes.index(s)]
px, py = list(zip(*s.points))[:2]
x.extend(px)
y.extend(py)
return [min(x), min(y), max(x), max(y)]
def __zbox(self, shapes, shapeTypes=[]):
z = []
for s in shapes:
try:
for p in s.points:
z.append(p[2])
except IndexError:
pass
if not z: z.append(0)
return [min(z), max(z)]
def __mbox(self, shapes, shapeTypes=[]):
m = [0]
for s in shapes:
try:
for p in s.points:
m.append(p[3])
except IndexError:
pass
return [min(m), max(m)]
def bbox(self):
"""Returns the current bounding box for the shapefile which is
the lower-left and upper-right corners. It does not contain the
elevation or measure extremes."""
return self.__bbox(self._shapes)
def zbox(self):
"""Returns the current z extremes for the shapefile."""
return self.__zbox(self._shapes)
def mbox(self):
"""Returns the current m extremes for the shapefile."""
return self.__mbox(self._shapes)
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.")
def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r'))
def __shpRecords(self):
"""Write the shp records"""
f = self.__getFileObj(self.shp)
f.seek(100)
recNum = 1
for s in self._shapes:
self._offsets.append(f.tell())
# Record number, Content length place holder
f.write(pack(">2i", recNum, 0))
recNum += 1
start = f.tell()
# Shape Type
f.write(pack("<i", s.shapeType))
# All shape types capable of having a bounding box
if s.shapeType in (3,5,8,13,15,18,23,25,28,31):
try:
f.write(pack("<4d", *self.__bbox([s])))
except error:
raise ShapefileException("Falied to write bounding box for record %s. Expected floats." % recNum)
# Shape types with parts
if s.shapeType in (3,5,13,15,23,25,31):
# Number of parts
f.write(pack("<i", len(s.parts)))
# Shape types with multiple points per record
if s.shapeType in (3,5,8,13,15,23,25,31):
# Number of points
f.write(pack("<i", len(s.points)))
# Write part indexes
if s.shapeType in (3,5,13,15,23,25,31):
for p in s.parts:
f.write(pack("<i", p))
# Part types for Multipatch (31)
if s.shapeType == 31:
for pt in s.partTypes:
f.write(pack("<i", pt))
# Write points for multiple-point records
if s.shapeType in (3,5,8,13,15,23,25,31):
try:
[f.write(pack("<2d", *p[:2])) for p in s.points]
except error:
raise ShapefileException("Failed to write points for record %s. Expected floats." % recNum)
# Write z extremes and values
if s.shapeType in (13,15,18,31):
try:
f.write(pack("<2d", *self.__zbox([s])))
except error:
raise ShapefileException("Failed to write elevation extremes for record %s. Expected floats." % recNum)
try:
[f.write(pack("<d", p[2])) for p in s.points]
except error:
raise ShapefileException("Failed to write elevation values for record %s. Expected floats." % recNum)
# Write m extremes and values
if s.shapeType in (23,25,31):
try:
f.write(pack("<2d", *self.__mbox([s])))
except error:
raise ShapefileException("Failed to write measure extremes for record %s. Expected floats" % recNum)
try:
[f.write(pack("<d", p[3])) for p in s.points]
except error:
raise ShapefileException("Failed to write measure values for record %s. Expected floats" % recNum)
# Write a single point
if s.shapeType in (1,11,21):
try:
f.write(pack("<2d", s.points[0][0], s.points[0][1]))
except error:
raise ShapefileException("Failed to write point for record %s. Expected floats." % recNum)
# Write a single Z value
if s.shapeType == 11:
try:
f.write(pack("<1d", s.points[0][2]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
# Write a single M value
if s.shapeType in (11,21):
try:
f.write(pack("<1d", s.points[0][3]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
# Finalize record length as 16-bit words
finish = f.tell()
length = (finish - start) // 2
self._lengths.append(length)
# start - 4 bytes is the content length field
f.seek(start-4)
f.write(pack(">i", length))
f.seek(finish)
def __shxRecords(self):
"""Writes the shx records."""
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i]))
def __dbfRecords(self):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value)
def null(self):
"""Creates a null shape."""
self._shapes.append(_Shape(NULL))
def point(self, x, y, z=0, m=0):
"""Creates a point shape."""
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape)
def line(self, parts=[], shapeType=POLYLINE):
"""Creates a line shape. This method is just a convienience method
which wraps 'poly()'.
"""
self.poly(parts, shapeType, [])
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
"""Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type.
"""
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape)
def field(self, name, fieldType="C", size="50", decimal=0):
"""Adds a dbf field descriptor to the shapefile."""
self.fields.append((name, fieldType, size, decimal))
def record(self, *recordList, **recordDict):
"""Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added."""
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val:
record.append(val)
else:
record.append("")
if record:
self.records.append(record)
def shape(self, i):
return self._shapes[i]
def shapes(self):
"""Return the current list of shapes."""
return self._shapes
def saveShp(self, target):
"""Save an shp file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords()
def saveShx(self, target):
"""Save an shx file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords()
def saveDbf(self, target):
"""Save a dbf file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords()
def save(self, target=None, shp=None, shx=None, dbf=None):
"""Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively."""
# TODO: Create a unique filename for target if None.
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif target:
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close()
class Editor(Writer):
def __init__(self, shapefile=None, shapeType=POINT, autoBalance=1):
self.autoBalance = autoBalance
if not shapefile:
Writer.__init__(self, shapeType)
elif is_string(shapefile):
base = os.path.splitext(shapefile)[0]
if os.path.isfile("%s.shp" % base):
r = Reader(base)
Writer.__init__(self, r.shapeType)
self._shapes = r.shapes()
self.fields = r.fields
self.records = r.records()
def select(self, expr):
"""Select one or more shapes (to be implemented)"""
# TODO: Implement expressions to select shapes.
pass
def delete(self, shape=None, part=None, point=None):
"""Deletes the specified part of any shape by specifying a shape
number, part number, or point number."""
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part]
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
"""Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type."""
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance()
def validate(self):
"""An optional method to try and validate the shapefile
as much as possible before writing it (not implemented)."""
#TODO: Implement validation method
pass
def balance(self):
"""Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch."""
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record()
def __fieldNorm(self, fieldName):
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_')
# Begin Testing
def test():
import doctest
doctest.NORMALIZE_WHITESPACE = 1
doctest.testfile("README.txt", verbose=1)
if __name__ == "__main__":
"""
Doctests are contained in the module 'pyshp_usage.py'. This library was developed
using Python 2.3. Python 2.4 and above have some excellent improvements in the built-in
testing libraries but for now unit testing is done using what's available in
2.3.
"""
test()
| janhui/test_engine | dev/plugins/define_boundary_ids/shapefile.py | Python | lgpl-2.1 | 38,004 | 0.005289 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_access_port_to_interface_policy_leaf_profile
short_description: Manage Fabric interface policy leaf profile interface selectors (infra:HPortS, infra:RsAccBaseGrp, infra:PortBlk)
description:
- Manage Fabric interface policy leaf profile interface selectors on Cisco ACI fabrics.
notes:
- More information about the internal APIC classes B(infra:HPortS), B(infra:RsAccBaseGrp) and B(infra:PortBlk) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/)
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
leaf_interface_profile:
description:
- The name of the Fabric access policy leaf interface profile.
required: yes
aliases: [ leaf_interface_profile_name ]
access_port_selector:
description:
- The name of the Fabric access policy leaf interface profile access port selector.
required: yes
aliases: [ name, access_port_selector_name ]
description:
description:
- The description to assign to the C(access_port_selector)
leaf_port_blk:
description:
- The name of the Fabric access policy leaf interface profile access port block.
required: yes
aliases: [ leaf_port_blk_name ]
leaf_port_blk_description:
description:
- The description to assign to the C(leaf_port_blk)
required: no
from:
description:
- The beggining (from range) of the port range block for the leaf access port block.
required: yes
aliases: [ fromPort, from_port_range ]
to:
description:
- The end (to range) of the port range block for the leaf access port block.
required: yes
aliases: [ toPort, to_port_range ]
policy_group:
description:
- The name of the fabric access policy group to be associated with the leaf interface profile interface selector.
aliases: [ policy_group_name ]
interface_type:
version_added: '2.6'
description:
- The type of interface for the static EPG deployement.
choices: [ fex, port_channel, switch_port, vpc ]
default: switch_port
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Associate an Interface Access Port Selector to an Interface Policy Leaf Profile with a Policy Group
aci_access_port_to_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
leaf_port_blk: leafportblkname
from: 13
to: 16
policy_group: policygroupname
state: present
- name: Associate an interface access port selector to an Interface Policy Leaf Profile (w/o policy group) (check if this works)
aci_access_port_to_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
leaf_port_blk: leafportblkname
from: 13
to: 16
state: present
- name: Remove an interface access port selector associated with an Interface Policy Leaf Profile
aci_access_port_to_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
state: absent
- name: Query Specific access_port_selector under given leaf_interface_profile
aci_access_port_to_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update({
'leaf_interface_profile': dict(type='str', aliases=['leaf_interface_profile_name']), # Not required for querying all objects
'access_port_selector': dict(type='str', aliases=['name', 'access_port_selector_name']), # Not required for querying all objects
'description': dict(typ='str'),
'leaf_port_blk': dict(type='str', aliases=['leaf_port_blk_name']),
'leaf_port_blk_description': dict(type='str'),
# NOTE: Keyword 'from' is a reserved word in python, so we need it as a string
'from': dict(type='str', aliases=['fromPort', 'from_port_range']),
'to': dict(type='str', aliases=['toPort', 'to_port_range']),
'policy_group': dict(type='str', aliases=['policy_group_name']),
'interface_type': dict(type='str', default='switch_port', choices=['fex', 'port_channel', 'switch_port', 'vpc']),
'state': dict(type='str', default='present', choices=['absent', 'present', 'query']),
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_interface_profile', 'access_port_selector']],
['state', 'present', ['leaf_interface_profile', 'access_port_selector']],
],
)
leaf_interface_profile = module.params['leaf_interface_profile']
access_port_selector = module.params['access_port_selector']
description = module.params['description']
leaf_port_blk = module.params['leaf_port_blk']
leaf_port_blk_description = module.params['leaf_port_blk_description']
from_ = module.params['from']
to_ = module.params['to']
policy_group = module.params['policy_group']
interface_type = module.params['interface_type']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAccPortP',
aci_rn='infra/accportprof-{0}'.format(leaf_interface_profile),
filter_target='eq(infraAccPortP.name, "{0}")'.format(leaf_interface_profile),
module_object=leaf_interface_profile
),
subclass_1=dict(
aci_class='infraHPortS',
# NOTE: normal rn: hports-{name}-typ-{type}, hence here hardcoded to range for purposes of module
aci_rn='hports-{0}-typ-range'.format(access_port_selector),
filter_target='eq(infraHPortS.name, "{0}")'.format(access_port_selector),
module_object=access_port_selector,
),
child_classes=['infraPortBlk', 'infraRsAccBaseGrp']
)
INTERFACE_TYPE_MAPPING = dict(
fex='uni/infra/funcprof/accportgrp-{0}'.format(policy_group),
port_channel='uni/infra/funcprof/accbundle-{0}'.format(policy_group),
switch_port='uni/infra/funcprof/accportgrp-{0}'.format(policy_group),
vpc='uni/infra/funcprof/accbundle-{0}'.format(policy_group),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraHPortS',
class_config=dict(
descr=description,
name=access_port_selector,
),
child_configs=[
dict(
infraPortBlk=dict(
attributes=dict(
descr=leaf_port_blk_description,
name=leaf_port_blk,
fromPort=from_,
toPort=to_,
),
),
),
dict(
infraRsAccBaseGrp=dict(
attributes=dict(
tDn=INTERFACE_TYPE_MAPPING[interface_type],
),
),
),
],
)
aci.get_diff(aci_class='infraHPortS')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| lmprice/ansible | lib/ansible/modules/network/aci/aci_access_port_to_interface_policy_leaf_profile.py | Python | gpl-3.0 | 11,226 | 0.002494 |
import convis
import numpy as np
import matplotlib.pylab as plt
plt.figure()
plt.imshow(convis.numerical_filters.gauss_filter_2d(4.0,4.0))
plt.figure()
plt.plot(convis.numerical_filters.exponential_filter_1d(tau=0.01)) | jahuth/convis | docs/filters-1.py | Python | gpl-3.0 | 218 | 0.009174 |
#!/usr/bin/env python
import lib_v2 as lib
import sys
import os
def main(argv=None):
"""
Usage is:
submit.py [--account <chargecode>] [--url <url>] -- <commandline>
Run from the working dir of the job which must contain (in addition
to the job files) a file named scheduler.conf with scheduler properties for the job.
<chargecode>, if present, gives the project to charge the job to.
Url is the url of the submitting website including the taskid parameter.
Returns 0 with "jobid=<jobid>" on stdout if job submitted ok
Returns 1 with multiline error message on stdout if error.
Returns 2 for the specific error of queue limit exceeded.
"""
#COMMAND LINE PARSING
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--account', metavar="ACCOUNT", type=str, default=lib.account,
help="The account string to use when submitting jobs. Default is read from config files.")
parser.add_argument('--url', metavar="URL", dest="URL", type=str,
help="Notification URL")
try:
cmdline_options, cmdline = parser.parse_known_args(argv)
cmdline = cmdline[1:] if not ('--' in cmdline) else cmdline[cmdline.index('--')+1:]
except Exception as e:
print "There was a problem submitting your job"
print e
sys.exit(1)
account = cmdline_options.account
url = cmdline_options.URL
#cmdline as an array (and already set)
tooltype = lib.getToolType(cmdline)
scheduler_properties = lib.getProperties("scheduler.conf")
# print scheduler_properties
scheduler_info = lib.schedulerInfo(scheduler_properties, tooltype)
# print scheduler_info
# If this is a "direct" run type job we don't need to create a qsub script, we'll just run batch_ommand.cmdline.
if scheduler_info["is_direct"]:
return lib.submitDirectJob(account, url, lib.email, lib.jobname, cmdline)
runtime = int(scheduler_info["runtime"])
useLocalDisk = False
"""
Workaround for problems with file io on oasis and longer mrbayes runs. Instead of running on
oasis, we'll copy the working dir to the compute nodes local storage and copy the results back
when the job completes. Since many mrbayes jobs timeout we need a special trick to copy results
of jobs that timeout: Right before we launch mrbayes we launch a shell script in the background
that sleeps a few min less than the job's runtime and then copies the results. If mrbayes terminates
normally the background sleep is killed automatically.
"""
if (tooltype == "mrbayes" and runtime > 60):
useLocalDisk = True
# I'm backing out the workaround by setting useLocalDisk to false.
useLocalDisk = False
# Write the command line to a file, batch_command.cmdline.
rfile = open(lib.cmdfile, "w")
rfile.write("#!/bin/sh\n")
rfile.writelines((" ".join(cmdline), "\n"))
rfile.close()
os.chmod(lib.cmdfile, 0744);
# Create the qsub script
rfile = open(lib.runfile, "w")
text = """#!/bin/sh
#PBS -q %s
#PBS -N %s
#PBS -l walltime=00:%d:00
#PBS -o scheduler_stdout.txt
#PBS -e scheduler_stderr.txt
#PBS -W umask=0007
##PBS -V
#PBS -v QOS=2
#PBS -M %s
#PBS -m ae
#PBS -A %s
""" % (scheduler_info["queue"], lib.jobname, scheduler_info["runtime"], lib.email, account)
rfile.write(text)
text = "#PBS -l nodes=%d:ppn=%d\n" % (scheduler_info["nodes"], scheduler_info["ppn"])
rfile.write(text)
rfile.write("cd %s\n" % (lib.jobdir, lib.local_jobdir)[useLocalDisk])
if useLocalDisk == True:
# Note that it's critical that newlines in the text string are all within the double
# quotes; otherwise the echo command line would be split across lines and make no sense.
text = """"Due to filesystem problems intermediate results for longer mrbayes runs
will not be available while the job is running. The result files will be
available when mrbayes finishes.
We're working to find a solution." """
rfile.write("echo %s > %s/INTERMEDIATE_RESULTS_README.TXT\n" % (text, lib.jobdir))
rfile.write("cp -r %s/* .\n" % lib.jobdir);
sleepTime = int(scheduler_info["runtime"]) - 10
rfile.write("sleep_cp.sh %s %s &\n" % (sleepTime, lib.jobdir))
text = """
source /etc/profile.d/modules.sh
echo Job starting at `date` > start.txt
curl %s\&status=START
export CIPRES_THREADSPP=%d
export CIPRES_NP=%d
%s 1>stdout.txt 2>stderr.txt
echo Job finished at `date` > done.txt
""" % (url,
int(scheduler_info["threads_per_process"]),
int(scheduler_info["mpi_processes"]),
lib.cmdfile)
rfile.write(text)
if (useLocalDisk):
text = """
echo "Job completed, starting to copy working directory."
echo "mkdir %s.complete"
mkdir %s.complete
echo "cp -r * %s.complete"
cp -r * %s.complete
echo "mv %s %s.sleep"
mv %s %s.sleep
echo "mv %s.complete %s"
mv %s.complete %s
echo "rm -rf %s.sleep"
rm -rf %s.sleep
echo "Finished copying working directory."
""" % (lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir, lib.jobdir)
rfile.write(text)
rfile.write("curl %s\&status=DONE\n" % url)
rfile.close()
return lib.submitJob()
return 0
if __name__ == "__main__":
sys.exit(main())
| SciGaP/DEPRECATED-Cipres-Airavata-POC | saminda/cipres-airavata/sdk/scripts/remote_resource/trestles/submit_v2.py | Python | apache-2.0 | 5,478 | 0.011135 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
# Work around a bug which causes segfaults if uuid is imported after
# PyQt. See here for details :
#
# https://bugs.gentoo.org/show_bug.cgi?id=317557
# http://www.riverbankcomputing.com/pipermail/pyqt/2010-December/028773.html
#
# Using __import__ rather than import so that we don't pollute the GafferUI
# namespace.
__import__( "uuid" )
## Deprecated. This legacy function only supports use with Qt4. For
# combined Qt4/Qt5 support use `from Qt import name` instead.
# Also note that the lazy argument is no longer effective, because Qt.py
# imports all modules at startup.
__qtModuleName = None
def _qtImport( name, lazy=False ) :
# decide which qt bindings to use, and apply any fix-ups we need
# to shield us from PyQt/PySide differences.
global __qtModuleName
if __qtModuleName is None :
import os
if "GAFFERUI_QT_BINDINGS" in os.environ :
__qtModuleName = os.environ["GAFFERUI_QT_BINDINGS"]
else :
# no preference stated via environment - see what we shipped with
if os.path.exists( os.environ["GAFFER_ROOT"] + "/python/PySide" ) :
__qtModuleName = "PySide"
else :
__qtModuleName = "PyQt4"
# PyQt unfortunately uses an implementation-specific
# naming scheme for its new-style signal and slot classes.
# We use this to make it compatible with PySide, according to :
#
# http://qt-project.org/wiki/Differences_Between_PySide_and_PyQt
if "PyQt" in __qtModuleName :
QtCore = __import__( __qtModuleName + ".QtCore" ).QtCore
QtCore.Signal = QtCore.pyqtSignal
# import the submodule from those bindings and return it
if lazy :
import Gaffer
return Gaffer.lazyImport( __qtModuleName + "." + name )
else :
qtModule = __import__( __qtModuleName + "." + name )
return getattr( qtModule, name )
##########################################################################
# Function to return the C++ address of a wrapped Qt object. This can
# be useful if needing to implement part of the UI in C++ and the rest
# in Python.
##########################################################################
def _qtAddress( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.unwrapinstance( o )
else :
return __shiboken().getCppPointer( o )[0]
##########################################################################
# Function to return a wrapped Qt object from the given C++ address.
# This can be useful if needing to implement part of the UI in C++ and
# the rest in Python.
##########################################################################
def _qtObject( address, type ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.wrapinstance( address, type )
else :
return __shiboken().wrapInstance( address, type )
##########################################################################
# Determines if the wrapped Qt object is still valid
# Useful when having to deal with the consequences of C++/Python deletion
# order challeneges, see:
# https://github.com/GafferHQ/gaffer/pull/3179
##########################################################################
def _qtObjectIsValid( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return not sip.isdeleted( o )
else :
return __shiboken().isValid( o )
##########################################################################
# Shiboken lives in a variety of places depending on which PySide it is.
##########################################################################
def __shiboken() :
import Qt
assert( "PyQt" not in Qt.__binding__ )
if Qt.__binding__ == "PySide2" :
try :
import PySide2.shiboken2 as shiboken
except ImportError :
import shiboken2 as shiboken
else :
try :
import PySide.shiboken
except ImportError :
import shiboken
return shiboken
##########################################################################
# now import our actual functionality
##########################################################################
# Import modules that must be imported before _GafferUI, using __import__
# to avoid polluting the GafferUI namespace.
__import__( "IECore" )
__import__( "Gaffer" )
from _GafferUI import *
# general ui stuff first
from Enums import *
from Widget import Widget
from LazyMethod import LazyMethod
from Menu import Menu
from ContainerWidget import ContainerWidget
from Window import Window
from SplitContainer import SplitContainer
from ListContainer import ListContainer
from GridContainer import GridContainer
from MenuBar import MenuBar
from EventLoop import EventLoop
from TabbedContainer import TabbedContainer
from TextWidget import TextWidget
from NumericWidget import NumericWidget
from Button import Button
from MultiLineTextWidget import MultiLineTextWidget
from Label import Label
from GLWidget import GLWidget
from ScrolledContainer import ScrolledContainer
from PathWidget import PathWidget
from PathListingWidget import PathListingWidget
from PathChooserWidget import PathChooserWidget
from Dialogue import Dialogue
from PathChooserDialogue import PathChooserDialogue
from TextInputDialogue import TextInputDialogue
from Collapsible import Collapsible
from ColorSwatch import ColorSwatch
from Slider import Slider
from ShowURL import showURL
from Spacer import Spacer
from BoolWidget import BoolWidget, CheckBox
from Image import Image
from ErrorDialogue import ErrorDialogue
from _Variant import _Variant
from VectorDataWidget import VectorDataWidget
from PathVectorDataWidget import PathVectorDataWidget
from ProgressBar import ProgressBar
from SelectionMenu import SelectionMenu
from PathFilterWidget import PathFilterWidget
from CompoundPathFilterWidget import CompoundPathFilterWidget
from InfoPathFilterWidget import InfoPathFilterWidget
from MatchPatternPathFilterWidget import MatchPatternPathFilterWidget
from FileSequencePathFilterWidget import FileSequencePathFilterWidget
from BusyWidget import BusyWidget
from NumericSlider import NumericSlider
from ColorChooser import ColorChooser
from ColorChooserDialogue import ColorChooserDialogue
from MessageWidget import MessageWidget
from NotificationMessageHandler import NotificationMessageHandler
from MenuButton import MenuButton
from MultiSelectionMenu import MultiSelectionMenu
from PopupWindow import PopupWindow
from ConfirmationDialogue import ConfirmationDialogue
from DisplayTransform import DisplayTransform
from Divider import Divider
import _Pointer
from SplineWidget import SplineWidget
from Bookmarks import Bookmarks
import WidgetAlgo
# then all the PathPreviewWidgets. note that the order
# of import controls the order of display.
from PathPreviewWidget import PathPreviewWidget
from CompoundPathPreview import CompoundPathPreview
from DeferredPathPreview import DeferredPathPreview
from InfoPathPreview import InfoPathPreview
from HeaderPathPreview import HeaderPathPreview
from DataPathPreview import DataPathPreview
# then stuff specific to graph uis
from BackgroundMethod import BackgroundMethod
from PlugValueWidget import PlugValueWidget
from StringPlugValueWidget import StringPlugValueWidget
from NumericPlugValueWidget import NumericPlugValueWidget
from BoolPlugValueWidget import BoolPlugValueWidget
from PathPlugValueWidget import PathPlugValueWidget
from FileSystemPathPlugValueWidget import FileSystemPathPlugValueWidget
from VectorDataPlugValueWidget import VectorDataPlugValueWidget
from PathVectorDataPlugValueWidget import PathVectorDataPlugValueWidget
from FileSystemPathVectorDataPlugValueWidget import FileSystemPathVectorDataPlugValueWidget
from PlugWidget import PlugWidget
from PlugLayout import PlugLayout
from Editor import Editor
from PythonEditor import PythonEditor
from GadgetWidget import GadgetWidget
from GraphEditor import GraphEditor
from ScriptWindow import ScriptWindow
from CompoundEditor import CompoundEditor
from NameWidget import NameWidget
from NameLabel import NameLabel
from NodeSetEditor import NodeSetEditor
from NodeEditor import NodeEditor
from Layouts import Layouts
from NodeMenu import NodeMenu
import FileMenu
import LayoutMenu
import EditMenu
import UserPlugs
from Frame import Frame
from CompoundNumericPlugValueWidget import CompoundNumericPlugValueWidget
from BoxPlugValueWidget import BoxPlugValueWidget
from NodeUI import NodeUI
from StandardNodeUI import StandardNodeUI
from NodeToolbar import NodeToolbar
from StandardNodeToolbar import StandardNodeToolbar
from Viewer import Viewer
from ColorSwatchPlugValueWidget import ColorSwatchPlugValueWidget
from ColorPlugValueWidget import ColorPlugValueWidget
from AboutWindow import AboutWindow
import ApplicationMenu
from BrowserEditor import BrowserEditor
from Timeline import Timeline
from MultiLineStringPlugValueWidget import MultiLineStringPlugValueWidget
from PresetsPlugValueWidget import PresetsPlugValueWidget
from GraphComponentBrowserMode import GraphComponentBrowserMode
from ToolPlugValueWidget import ToolPlugValueWidget
from LabelPlugValueWidget import LabelPlugValueWidget
from CompoundDataPlugValueWidget import CompoundDataPlugValueWidget
from LayoutPlugValueWidget import LayoutPlugValueWidget
import ScriptNodeUI
from RefreshPlugValueWidget import RefreshPlugValueWidget
import PreferencesUI
from SplinePlugValueWidget import SplinePlugValueWidget
from RampPlugValueWidget import RampPlugValueWidget
from NodeFinderDialogue import NodeFinderDialogue
from ConnectionPlugValueWidget import ConnectionPlugValueWidget
from ButtonPlugValueWidget import ButtonPlugValueWidget
import ViewUI
import ToolUI
from Playback import Playback
import MetadataWidget
from UIEditor import UIEditor
import GraphBookmarksUI
import DocumentationAlgo
import _PlugAdder
from Backups import Backups
from AnimationEditor import AnimationEditor
import CompoundNumericNoduleUI
import Examples
from NameValuePlugValueWidget import NameValuePlugValueWidget
# and then specific node uis
import DependencyNodeUI
import ComputeNodeUI
import RandomUI
import SpreadsheetUI
import ExpressionUI
import BoxUI
import ReferenceUI
import BackdropUI
import DotUI
import SubGraphUI
import SwitchUI
import ContextProcessorUI
import ContextVariablesUI
import DeleteContextVariablesUI
import TimeWarpUI
import LoopUI
import AnimationUI
import BoxIOUI
import BoxInUI
import BoxOutUI
import NameSwitchUI
# backwards compatibility
## \todo Remove me
Metadata = __import__( "Gaffer" ).Metadata
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferUI" )
| appleseedhq/gaffer | python/GafferUI/__init__.py | Python | bsd-3-clause | 12,297 | 0.024152 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_concat = rnn_cell_impl._concat
# pylint: enable=protected-access
def _transpose_batch_time(x):
"""Transposes the batch and time dimensions of a Tensor.
If the input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A Tensor.
Returns:
x transposed along the first two dimensions.
"""
x_static_shape = x.get_shape()
if x_static_shape.rank is not None and x_static_shape.rank < 2:
return x
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape.dims[1].value, x_static_shape.dims[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _best_effort_input_batch_size(flat_input):
"""Get static input batch size if available, with fallback to the dynamic one.
Args:
flat_input: An iterable of time major input Tensors of shape
`[max_time, batch_size, ...]`.
All inputs should have compatible batch sizes.
Returns:
The batch size in Python integer if available, or a scalar Tensor otherwise.
Raises:
ValueError: if there is any input with an invalid shape.
"""
for input_ in flat_input:
shape = input_.shape
if shape.rank is None:
continue
if shape.rank < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2" % input_)
batch_size = shape.dims[1].value
if batch_size is not None:
return batch_size
# Fallback to the dynamic batch size of the first input.
return array_ops.shape(flat_input[0])[1]
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all(x == inferred_dtypes[0] for x in inferred_dtypes)
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _maybe_tensor_shape_from_tensor(shape):
if isinstance(shape, ops.Tensor):
return tensor_shape.as_shape(tensor_util.constant_value(shape))
else:
return shape
def _should_cache():
"""Returns True if a default caching device should be set, otherwise False."""
if context.executing_eagerly():
return False
# Don't set a caching device when running in a loop, since it is possible that
# train steps could be wrapped in a tf.while_loop. In that scenario caching
# prevents forward computations in loop iterations from re-reading the
# updated weights.
ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingWhileContext(ctxt) is None
def _is_keras_rnn_cell(rnn_cell):
"""Check whether the cell is a Keras RNN cell.
The Keras RNN cell accept the state as a list even the state is a single
tensor, whereas the TF RNN cell does not wrap single state tensor in list.
This behavior difference should be unified in future version.
Args:
rnn_cell: An RNN cell instance that either follow the Keras interface or TF
RNN interface.
Returns:
Boolean, whether the cell is an Keras RNN cell.
"""
# Cell type check is not strict enough since there are cells created by other
# library like Deepmind that didn't inherit tf.nn.rnn_cell.RNNCell.
# Keras cells never had zero_state method, which was from the original
# interface from TF RNN cell.
return (not isinstance(rnn_cell, rnn_cell_impl.RNNCell)
and isinstance(rnn_cell, base_layer.Layer)
and getattr(rnn_cell, "zero_state", None) is None)
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on `sequence_length`.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on whether we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_length[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_length[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: int32 `Tensor` scalar.
sequence_length: int32 `Tensor` vector of size [batch_size].
min_sequence_length: int32 `Tensor` scalar, min of sequence_length.
max_sequence_length: int32 `Tensor` scalar, max of sequence_length.
zero_output: `Tensor` vector of shape [output_size].
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
# Vector describing which batch entries are finished.
copy_cond = time >= sequence_length
def _copy_one_through(output, new_output):
# TensorArray and scalar get passed through.
if isinstance(output, tensor_array_ops.TensorArray):
return new_output
if output.shape.rank == 0:
return new_output
# Otherwise propagate the old or the new value.
with ops.colocate_with(new_output):
return array_ops.where(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
if not isinstance(substate, tensor_array_ops.TensorArray):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
rank=sequence[0].get_shape().rank)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
@deprecation.deprecated(None, "Please use `keras.layers.Bidirectional("
"keras.layers.RNN(cell))`, which is equivalent to "
"this API")
@tf_export(v1=["nn.bidirectional_dynamic_rnn"])
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch.
If not provided, all batch entries are assumed to be full sequences; and
time reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_axis = 1
batch_axis = 0
else:
time_axis = 0
batch_axis = 1
def _reverse(input_, seq_lengths, seq_axis, batch_axis):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_axis=seq_axis, batch_axis=batch_axis)
else:
return array_ops.reverse(input_, axis=[seq_axis])
with vs.variable_scope("bw") as bw_scope:
def _map_reverse(inp):
return _reverse(
inp,
seq_lengths=sequence_length,
seq_axis=time_axis,
batch_axis=batch_axis)
inputs_reverse = nest.map_structure(_map_reverse, inputs)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_axis=time_axis, batch_axis=batch_axis)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
@deprecation.deprecated(
None,
"Please use `keras.layers.RNN(cell)`, which is equivalent to this API")
@tf_export(v1=["nn.dynamic_rnn"])
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for performance than correctness.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
with vs.variable_scope(scope or "rnn") as varscope:
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().rank not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
if getattr(cell, "get_initial_state", None) is not None:
state = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if not context.executing_eagerly() and sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nested) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
ValueError: If time_step is not the same for all the elements in the
inputs.
ValueError: If batch_size is not the same for all the elements in the
inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape.dims[0].value
got_batch_size = shape.dims[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _concat(batch_size, size)
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
else:
max_sequence_length = time_steps
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, element_shape, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
element_shape=element_shape,
tensor_array_name=base_name + name)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
output_ta = tuple(
_create_ta(
"output_%d" % i,
element_shape=(tensor_shape.TensorShape([const_batch_size])
.concatenate(
_maybe_tensor_shape_from_tensor(out_size))),
dtype=_infer_state_dtype(dtype, state))
for i, out_size in enumerate(flat_output_size))
input_ta = tuple(
_create_ta(
"input_%d" % i,
element_shape=flat_input_i.shape[1:],
dtype=flat_input_i.dtype)
for i, flat_input_i in enumerate(flat_input))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
else:
output_ta = tuple([0 for _ in range(time_steps.numpy())]
for i in range(len(flat_output_size)))
input_ta = flat_input
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
if in_graph_mode:
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
else:
input_t = tuple(ta[time.numpy()] for ta in input_ta)
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
# Keras RNN cells only accept state as list, even if it's a single tensor.
is_keras_rnn_cell = _is_keras_rnn_cell(cell)
if is_keras_rnn_cell and not nest.is_sequence(state):
state = [state]
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Keras cells always wrap state as list, even if it's a single tensor.
if is_keras_rnn_cell and len(new_state) == 1:
new_state = new_state[0]
# Pack state if using state tuples
output = nest.flatten(output)
if in_graph_mode:
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
else:
for ta, out in zip(output_ta_t, output):
ta[time.numpy()] = out
return (time + 1, output_ta_t, new_state)
if in_graph_mode:
# Make sure that we run at least 1 step, if necessary, to ensure
# the TensorArrays pick up the dynamic shape.
loop_bound = math_ops.minimum(
time_steps, math_ops.maximum(1, max_sequence_length))
else:
# Using max_sequence_length isn't currently supported in the Eager branch.
loop_bound = time_steps
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < loop_bound,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
maximum_iterations=time_steps,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
if in_graph_mode:
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _concat(
[const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
else:
final_outputs = output_final_ta
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
if not in_graph_mode:
final_outputs = nest.map_structure_up_to(
cell.output_size, lambda x: array_ops.stack(x, axis=0), final_outputs)
return (final_outputs, final_state)
@tf_export(v1=["nn.raw_rnn"])
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, emit_structure, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit_structure), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors which is aggregated in the `emit_ta` inside the
`while_loop`. For the first call to `loop_fn`, the `emit_output`
corresponds to the `emit_structure` which is then used to determine the
size of the `zero_tensor` for the `emit_ta` (defaults to
`cell.output_size`). For the subsequent calls to the `loop_fn`, the
`emit_output` corresponds to the actual output tensor
that is to be aggregated in the `emit_ta`. The parameter `cell_state`
and output `next_cell_state` may be either a single or (possibly nested)
tuple of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = tensor_shape.dimension_at_index(input_shape[0], 0)
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(
tensor_shape.dimension_at_index(input_shape_i, 0))
batch_size = tensor_shape.dimension_value(static_batch_size)
const_batch_size = batch_size
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else
array_ops.shape(emit) for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i,
dynamic_size=True,
element_shape=(tensor_shape.TensorShape([const_batch_size])
.concatenate(
_maybe_tensor_shape_from_tensor(size_i))),
size=0,
name="rnn_output_%d" % i)
for i, (dtype_i, size_i)
in enumerate(zip(flat_emit_dtypes, flat_emit_size))]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(_concat(batch_size, size_i), dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
def copy_fn(cur_i, cand_i):
# TensorArray and scalar get passed through.
if isinstance(cur_i, tensor_array_ops.TensorArray):
return cand_i
if cur_i.shape.rank == 0:
return cand_i
# Otherwise propagate the old or the new value.
with ops.colocate_with(cand_i):
return array_ops.where(elements_finished, cur_i, cand_i)
return nest.map_structure(copy_fn, current, candidate)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_ta = nest.map_structure(
lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
@deprecation.deprecated(
None, "Please use `keras.layers.RNN(cell, unroll=True)`, "
"which is equivalent to this API")
@tf_export(v1=["nn.static_rnn"])
def static_rnn(cell,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().rank != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape.dims[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = tensor_shape.dimension_at_index(
input_shape, 0), input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size.dims):
if tensor_shape.dimension_value(size) is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if tensor_shape.dimension_value(fixed_batch_size):
batch_size = tensor_shape.dimension_value(fixed_batch_size)
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
if getattr(cell, "get_initial_state", None) is not None:
state = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().rank not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _concat(batch_size, output_size)
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _concat(tensor_shape.dimension_value(fixed_batch_size),
output_size,
static=True)
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
# Keras RNN cells only accept state as list, even if it's a single tensor.
is_keras_rnn_cell = _is_keras_rnn_cell(cell)
if is_keras_rnn_cell and not nest.is_sequence(state):
state = [state]
for time, input_ in enumerate(inputs):
if time > 0:
varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
# Keras RNN cells only return state as list, even if it's a single tensor.
if is_keras_rnn_cell and len(state) == 1:
state = state[0]
return (outputs, state)
@tf_export("nn.static_state_saving_rnn")
def static_state_saving_rnn(cell,
inputs,
state_saver,
state_name,
sequence_length=None,
scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError("state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s" % (str(state_name),
str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" %
(len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(
cell,
inputs,
initial_state=initial_state,
sequence_length=sequence_length,
scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [
state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)
]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output
]
outputs[-1] = nest.pack_sequence_as(
structure=last_output, flat_sequence=flat_last_output)
if state_is_tuple:
state = nest.pack_sequence_as(
structure=state,
flat_sequence=[array_ops.identity(s) for s in flat_state])
else:
state = array_ops.identity(state)
return (outputs, state)
@deprecation.deprecated(None, "Please use `keras.layers.Bidirectional("
"keras.layers.RNN(cell, unroll=True))`, which is "
"equivalent to this API")
@tf_export(v1=["nn.static_bidirectional_rnn"])
def static_bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope=fw_scope)
# Backward direction
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(
structure=output_fw, flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
| theflofly/tensorflow | tensorflow/python/ops/rnn.py | Python | apache-2.0 | 66,326 | 0.004568 |
from kivy.graphics import Color, Line, Quad
from modeful.ui.diagram.relationship import Trigonometry
from modeful.ui.diagram.relationship.association import Association
class Composition(Association):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with self.canvas.before:
Color(0, 0, 0)
self._diamond_bg = Quad(points=[0]*8)
Color(0, 0, 0, .5)
self._diamond_line = Line(points=[], width=1, close=True)
def redraw(self, x1, y1, x2, y2):
super().redraw(x1, y1, x2, y2)
points = Trigonometry.get_diamond_points(x1, y1, x2, y2, size=15)
self._diamond_bg.points = points
self._diamond_line.points = points
| Modeful/poc | modeful/ui/diagram/relationship/composition.py | Python | gpl-3.0 | 774 | 0.003876 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <shadowapex@gmail.com>,
# Benjamin Bean <superman2k5@gmail.com>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# William Edwards <shadowapex@gmail.com>
#
#
# core.components.ui User interface handling module.
#
#
import logging
import pygame
import operator
from core.components import pyganim
from core.components import plugin
from core import prepare
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
logger.debug("components.ui successfully imported")
class UserInterface(object):
"""A basic user interface object.
:param image: Path to the image to load or surface.
:param position: The [x, y] position to draw the UI element.
:param screen: The pygame surface to draw the element on.
:param scale: Whether or not to scale the surface based on game's scale.
:type image: String or pygame.Surface
:type position: List
:type screen: pygame.Surface
:type scale: Boolean
"""
def __init__(self, images, position, screen, scale=True,
animation_speed=0.2, animation_loop=False):
# Handle loading a single image, multiple images, or surfaces
if type(images) is str or type(images) is unicode:
surface = pygame.image.load(images).convert_alpha()
self.original_width = surface.get_width()
self.original_height = surface.get_height()
if scale:
surface = self.scale_surface(surface)
self.images = [(surface, animation_speed)]
elif type(images) is list or type(images) is tuple:
self.images = []
for item in images:
if type(item) is str or type(item) is unicode:
surface = pygame.image.load(item).convert_alpha()
self.original_width = surface.get_width()
self.original_height = surface.get_height()
if scale:
surface = self.scale_surface(surface)
else:
self.original_width = surface.get_width()
self.original_height = surface.get_height()
if scale:
surface = self.scale_surface(surface)
else:
surface = item
self.images.append((surface, animation_speed))
else:
self.original_width = images.get_width()
self.original_height = images.get_height()
if scale:
surface = self.scale_surface(images)
else:
surface = images
self.images = [(surface, animation_speed)]
# Create a pyganimation object using our loaded images.
self.animation = pyganim.PygAnimation(self.images, loop=animation_loop)
self.animation.play()
self.animation.pause()
self.position = position
self.last_position = position
self.screen = screen
self.visible = True
self.state = ""
self.width = self.images[0][0].get_width()
self.height = self.images[0][0].get_height()
self.moving = False
self.move_destination = (0, 0)
self.move_delta = (0, 0)
self.move_duration = 0.
self.move_time = 0.
self.fading = False
self.fade_duration = 0.
self.shaking = False
def scale_surface(self, surface):
"""Scales the interface based on the game's scale.
:param: None
:type: None
"""
width = surface.get_width()
height = surface.get_height()
scaled_surface = pygame.transform.scale(surface,
(width * prepare.SCALE,
height * prepare.SCALE))
return scaled_surface
def update(self, dt):
"""Updates the object based on its current state.
:param dt: Amount of time passed in seconds since the last frame.
:type dt: Float
"""
if self.moving:
self.move_time += dt
dest = self.move_destination
dur = self.move_duration
mdt = self.move_delta
mt = self.move_time
if mt > dur:
self.position = dest
self.moving = False
if self.state == "moving" or self.state == "back":
self.state = ""
elif self.state == "forward":
self.move(self.last_position, self.move_duration)
self.state == "back"
else:
if type(self.position) is tuple:
self.position = list(self.position)
self.position[0] -= (mdt[0] * dt) / dur
self.position[1] -= (mdt[1] * dt) / dur
def draw(self):
"""Draws the UI element to the screen.
:param: None
:type: None
"""
if self.visible:
if self.shaking:
# Do shaking stuff
pos = self.position
else:
pos = self.position
self.animation.blit(self.screen, pos)
def play(self):
self.animation.play()
def pause(self):
self.animation.pause()
def stop(self):
self.animation.stop()
def shake(self, intensity, direction="random"):
"""Shakes the object a given severity.
:param intensity: How much the object will shake.
:param direction: Direction to shake in degrees, defaults to "random".
:type intensity: Int
:type direction: Int or String
"""
pass
def fade_in(self, duration=1.):
"""Fades the object in.
:param duration: Fade the object in over n seconds, defaults to 1.
:type duration: Float
"""
if not self.state == "fading_in":
self.state = "fading_in"
self.fading = "in"
self.fade_duration = duration
def fade_out(self, duration=1.):
"""Fades the object out.
:param duration: Fade the object out over n seconds, defaults to 1.
:type duration: Float
"""
if not self.state == "fading_out":
self.state = "fading_out"
self.fading = "out"
self.fade_duration = duration
def move(self, destination, duration=1.):
"""Moves the object to position over n seconds.
:param destination: The (x, y) screen destination position to move to.
:param duration: Moves the object over n seconds, defaults to 1.
:type destination: Tuple
:type duration: Float
"""
if not self.state == "moving":
self.state = "moving"
self.moving = True
self.last_position = list(self.position)
self.move_destination = destination
self.move_time = 0.
self.move_delta = map(operator.sub, self.position, destination)
self.move_duration = float(duration)
def shake_once(self, destination, duration=0.3):
"""Moves the object to a position and then back to its original position.
"""
if not self.state == "forward" or not self.state == "back":
self.move(destination, duration)
self.state = "forward"
def scale(self, width_height):
self.animation.scale(width_height)
import core.components.ui.bar
| andrefbsantos/Tuxemon | tuxemon/core/components/ui/__init__.py | Python | gpl-3.0 | 8,220 | 0.001825 |
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from mymoney.core.utils.currencies import get_currencies
class BankAccountManager(models.Manager):
def get_user_bankaccounts(self, user):
if not hasattr(user, '_cache_bankaccounts'):
user._cache_bankaccounts = user.bankaccounts.order_by('label')
return user._cache_bankaccounts
def delete_orphans(self):
"""
Delete bank account which have no more owners.
"""
self.filter(owners__isnull=True).delete()
class BankAccount(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
balance = models.DecimalField(
max_digits=10,
decimal_places=2,
default=0,
verbose_name=_('Balance'),
)
balance_initial = models.DecimalField(
max_digits=10,
decimal_places=2,
default=0,
verbose_name=_('Initial balance'),
help_text=_('Initial balance will automatically update the balance.'),
)
currency = models.CharField(
max_length=3,
choices=get_currencies(),
verbose_name=_('Currency'),
)
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
limit_choices_to={'is_staff': False, 'is_superuser': False},
verbose_name=_('Owners'),
related_name='bankaccounts',
db_table='bankaccounts_owners',
)
objects = BankAccountManager()
class Meta:
db_table = 'bankaccounts'
permissions = (("administer_owners", "Administer owners"),)
def __str__(self):
return self.label
def save(self, *args, **kwargs):
# Init balance. Merge both just in case.
if self.pk is None:
self.balance += self.balance_initial
# Otherwise update it with the new delta.
else:
original = BankAccount.objects.get(pk=self.pk)
self.balance += self.balance_initial - original.balance_initial
super(BankAccount, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.pk,
})
| ychab/mymoney | mymoney/apps/bankaccounts/models.py | Python | bsd-3-clause | 2,268 | 0 |
from cyder.cydns.views import CydnsDeleteView
from cyder.cydns.views import CydnsDetailView
from cyder.cydns.views import CydnsCreateView
from cyder.cydns.views import CydnsUpdateView
from cyder.cydns.views import CydnsListView
from cyder.cydns.cname.models import CNAME
from cyder.cydns.cname.forms import CNAMEForm
class CNAMEView(object):
model = CNAME
form_class = CNAMEForm
queryset = CNAME.objects.all().order_by('fqdn')
class CNAMEDeleteView(CNAMEView, CydnsDeleteView):
""" """
class CNAMEDetailView(CNAMEView, CydnsDetailView):
""" """
template_name = "cname/cname_detail.html"
class CNAMECreateView(CNAMEView, CydnsCreateView):
""" """
class CNAMEUpdateView(CNAMEView, CydnsUpdateView):
""" """
class CNAMEListView(CNAMEView, CydnsListView):
""" """
| ngokevin/cyder | cyder/cydns/cname/views.py | Python | bsd-3-clause | 808 | 0 |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
from Qt import QtWidgets
# This was the predecessor to the far superior GafferUI.PlugLayout
# class that we now use. It survives here as a relic because it is
# still relied upon by CompoundParameterValueWidget and
# ClassVectorParameterValueWidget. Do not use it for anything else!
class CompoundPlugValueWidget( GafferUI.PlugValueWidget ) :
## Possible values for collapsed are :
#
# True : use Collapsible container which starts off collapsed
# False : use Collapsible container which starts off opened
# None : don't use Collapsible container
#
# Note that the True/False values for collapsible just set the initial state -
# after this the current state is stored for the session on a per-node basis
# for user convenience.
#
# If summary is specified it will be called each time a child plug changes value,
# and the result used to provide a summary in the collapsible header.
def __init__( self, plug, collapsed=True, label=None, summary=None, **kw ) :
self.__column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 )
self.__label = label if label else IECore.CamelCase.toSpaced( plug.getName() )
self.__collapsible = None
if collapsed is not None :
self.__collapsible = GafferUI.Collapsible(
self.__label,
collapsed = self.__getStoredCollapseState( plug, collapsed ),
)
self.__collapsible.setChild( self.__column )
self.__collapsible.setCornerWidget( GafferUI.Label(), True )
## \todo This is fighting the default sizing applied in the Label constructor. Really we need a standard
# way of controlling size behaviours for all widgets in the public API.
self.__collapsible.getCornerWidget()._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed )
self.__collapseStateChangedConnection = self.__collapsible.stateChangedSignal().connect( Gaffer.WeakMethod( self.__collapseStateChanged ) )
GafferUI.PlugValueWidget.__init__(
self,
self.__collapsible if self.__collapsible is not None else self.__column,
plug,
**kw
)
self.__plugAddedConnection = plug.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
self.__plugRemovedConnection = plug.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
self.__childrenChangedPending = False
# arrange to build the rest of the ui in a deferred fashion. this means that we will be
# fully constructed when we call _childPlugWidget etc, rather than expecting derived
# class' implementations to work even before their constructor has completed.
# it also means we don't pay the cost of building huge uis upfront, and rather do it incrementally
# as the user opens up sections. for non-collapsed uis, we build when a parent is received, which
# allows the top level window to get the sizing right, and for collapsed uis we build when the
# the ui first becomes visible due to being opened.
if collapsed == True :
self.__visibilityChangedConnection = self.__column.visibilityChangedSignal().connect( Gaffer.WeakMethod( self.__visibilityChanged ) )
else :
self.__parentChangedConnection = self.parentChangedSignal().connect( Gaffer.WeakMethod( self.__parentChanged ) )
self.__visibilityChangedConnection = self.__column.visibilityChangedSignal().connect( Gaffer.WeakMethod( self.__visibilityChanged ) )
self.__childPlugUIs = {} # mapping from child plug to PlugWidget
self.__summary = summary
CompoundPlugValueWidget._updateFromPlug( self )
## Returns a PlugValueWidget representing the specified child plug.
# Because the ui is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def childPlugValueWidget( self, childPlug, lazy=True ) :
if not lazy and len( self.__childPlugUIs ) == 0 :
self.__updateChildPlugUIs()
w = self.__childPlugUIs.get( childPlug, None )
if w is None :
return w
elif isinstance( w, GafferUI.PlugValueWidget ) :
return w
else :
return w.plugValueWidget()
def hasLabel( self ) :
return True
## Overridden to propagate status to children.
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
for w in self.__childPlugUIs.values() :
if w is None :
continue
if isinstance( w, GafferUI.PlugValueWidget ) :
w.setReadOnly( readOnly )
elif isinstance( w, GafferUI.PlugWidget ) :
w.labelPlugValueWidget().setReadOnly( readOnly )
w.plugValueWidget().setReadOnly( readOnly )
else :
w.plugValueWidget().setReadOnly( readOnly )
def _updateFromPlug( self ) :
if self.__summary is not None and self.__collapsible is not None :
with self.getContext() :
s = self.__summary( self.getPlug() )
if s :
s = "<small>" + " ( " + s + " ) </small>"
self.__collapsible.getCornerWidget().setText( s )
## May be overridden by derived classes to return a widget to be placed
# at the top of the layout.
def _headerWidget( self ) :
return None
## May be overridden by derived classes to customise the creation of widgets
# to represent the child plugs. The returned widget must either derive from
# PlugValueWidget or must have a plugValueWidget() method which returns
# a PlugValueWidget.
def _childPlugWidget( self, childPlug ) :
result = GafferUI.PlugValueWidget.create( childPlug )
if isinstance( result, GafferUI.PlugValueWidget ) and not result.hasLabel() :
result = GafferUI.PlugWidget( result )
return result
## May be overridden by derived classes to return a widget to be placed
# at the bottom of the layout.
def _footerWidget( self ) :
return None
## Returns the Collapsible widget used to contain the child widgets,
# or None if this ui is not collapsible.
def _collapsible( self ) :
return self.__collapsible
## May be overridden by derived classes to specify which child plugs
# are represented and in what order.
def _childPlugs( self ) :
return self.getPlug().children()
## \todo Mapping plugName->widget makes us vulnerable to name changes.
# See similar comments in StandardNodeUI and StandardNodeToolbar.
def __updateChildPlugUIs( self ) :
# ditch child uis we don't need any more
childPlugs = self._childPlugs()
for childPlug in self.__childPlugUIs.keys() :
if childPlug not in childPlugs :
del self.__childPlugUIs[childPlug]
# make (or reuse existing) uis for each child plug
orderedChildUIs = []
for childPlug in childPlugs :
if childPlug.getName().startswith( "__" ) :
continue
if childPlug not in self.__childPlugUIs :
widget = self._childPlugWidget( childPlug )
assert( isinstance( widget, ( GafferUI.PlugValueWidget, type( None ) ) ) or hasattr( widget, "plugValueWidget" ) )
self.__childPlugUIs[childPlug] = widget
if widget is not None :
if isinstance( widget, GafferUI.PlugValueWidget ) :
widget.setReadOnly( self.getReadOnly() )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setReadOnly( self.getReadOnly() )
widget.plugValueWidget().setReadOnly( self.getReadOnly() )
else :
widget.plugValueWidget().setReadOnly( self.getReadOnly() )
else :
widget = self.__childPlugUIs[childPlug]
if widget is not None :
orderedChildUIs.append( widget )
if Gaffer.Metadata.value( childPlug, "divider" ) :
orderedChildUIs.append( GafferUI.Divider() )
# add header and footer
headerWidget = self._headerWidget()
if headerWidget is not None :
orderedChildUIs.insert( 0, headerWidget )
footerWidget = self._footerWidget()
if footerWidget is not None :
orderedChildUIs.append( footerWidget )
# and update the column to display them
self.__column[:] = orderedChildUIs
def __visibilityChanged( self, column ) :
assert( column is self.__column )
if self.__column.visible() :
self.__updateChildPlugUIs()
self.__visibilityChangedConnection = None # only need to build once
def __parentChanged( self, widget ) :
assert( widget is self )
if not len( self.__column ) :
self.__updateChildPlugUIs()
self.__parentChangedConnection = None # only need to build once
def __childAddedOrRemoved( self, *unusedArgs ) :
# typically many children are added and removed at once. we don't
# want to be rebuilding the ui for each individual event, so we
# add an idle callback to do the rebuild once the
# upheaval is over.
if not self.__childrenChangedPending :
GafferUI.EventLoop.addIdleCallback( self.__childrenChanged )
self.__childrenChangedPending = True
def __childrenChanged( self ) :
if not self.__column.visible() :
return
self.__updateChildPlugUIs()
self.__childrenChangedPending = False
return False # removes the callback
def __collapseStateChanged( self, widget ) :
assert( widget is self.__collapsible )
self.__setStoredCollapseState( self.getPlug(), widget.getCollapsed() )
def __setStoredCollapseState( self, plug, collapsed ) :
node = plug.node()
if "__uiCollapsed" in node :
storagePlug = node["__uiCollapsed"]
else :
storagePlug = Gaffer.ObjectPlug(
defaultValue = IECore.CompoundData(),
flags = Gaffer.Plug.Flags.Default & ~Gaffer.Plug.Flags.Serialisable,
)
node["__uiCollapsed"] = storagePlug
storage = storagePlug.getValue()
# we use the label in the key so that SectionedPlugValueWidgets and the like
# store a state per section.
key = plug.relativeName( node ) + "|" + self.__label
storage[key] = IECore.BoolData( collapsed )
storagePlug.setValue( storage )
def __getStoredCollapseState( self, plug, default ) :
node = plug.node()
if "__uiCollapsed" not in node :
return default
storagePlug = node["__uiCollapsed"]
storage = storagePlug.getValue()
key = plug.relativeName( node ) + "|" + self.__label
value = storage.get( key )
if value is None :
return default
return value.value
| appleseedhq/gaffer | python/GafferCortexUI/CompoundPlugValueWidget.py | Python | bsd-3-clause | 11,940 | 0.039698 |
from rest_framework.serializers import ModelSerializer
from ..models import BackScratcher
class ProductsSerializer(ModelSerializer):
class Meta:
model = BackScratcher
fields = ['id', 'name', 'description', 'price', 'sizes']
| agustin380/scratchbling | src/products/api/serializers.py | Python | gpl-3.0 | 247 | 0 |
from a10sdk.common.A10BaseClass import A10BaseClass
class AddressList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ipv4_address: {"type": "string", "description": "IP address", "format": "ipv4-address"}
:param ipv4_netmask: {"type": "string", "description": "IP subnet mask", "format": "ipv4-netmask"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "address-list"
self.DeviceProxy = ""
self.ipv4_address = ""
self.ipv4_netmask = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ip(A10BaseClass):
"""Class Description::
Global IP configuration subcommands.
Class ip supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param address_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ipv4-address": {"type": "string", "description": "IP address", "format": "ipv4-address"}, "optional": true, "ipv4-netmask": {"type": "string", "description": "IP subnet mask", "format": "ipv4-netmask"}}}]}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/loopback/{ifnum}/ip`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ip"
self.a10_url="/axapi/v3/interface/loopback/{ifnum}/ip"
self.DeviceProxy = ""
self.address_list = []
self.ospf = {}
self.uuid = ""
self.rip = {}
self.router = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/interface/interface_loopback_ip.py | Python | apache-2.0 | 2,191 | 0.008672 |
import saas_server
import res_config
| fevxie/odoo-saas-tools | saas_server/models/__init__.py | Python | lgpl-3.0 | 37 | 0 |
""" Compute HalsteadMetric Metrics.
HalsteadMetric metrics, created by Maurice H. HalsteadMetric in 1977, consist
of a number of measures, including:
Program length (N): N = N1 + N2
Program vocabulary (n): n = n1 + n2
Volume (V): V = N * LOG2(n)
Difficulty (D): D = (n1/2) * (N2/n2)
Effort (E): E = D * V
Average Volume (avgV) avgV = sum(V)/m
Average Effort (avgE) avgE = sum(E)/m
where:
n1 = number of distinct operands
n2 = number of distinct operators
N1 = total number of operands
N2 = total number of operators
m = number of modules
What constitues an operand or operator is often open to
interpretation. In this implementation for the Python language:
operators are of type OP, INDENT, DEDENT, or NEWLINE since these
serve the same purpose as braces and semicolon in C/C++, etc.
operands are not operators or whitespace or comments
(this means operands include keywords)
$Id: halstead.py,v 1.3 2005/09/17 04:28:12 rcharney Exp $
"""
__version__ = "$Revision: 1.3 $"[11:-2]
__author__ = 'Reg. Charney <pymetrics@charneyday.com>'
import math
import time
from metricbase import MetricBase
from globals import *
class HalsteadMetric( MetricBase ):
""" Compute various HalsteadMetric metrics. """
totalV = 0
totalE = 0
numModules = 0
def __init__( self, context, runMetrics, metrics, pa, *args, **kwds ):
""" Initialization for the HalsteadMetric metrics."""
self.inFile = context['inFile']
self.context = context
self.runMetrics = runMetrics
self.metrics = metrics
self.pa = pa
self.inFile = context['inFile']
self.numOperators = 0
self.numOperands = 0
self.uniqueOperators = {}
self.uniqueOperands = {}
HalsteadMetric.numModules += 1
# initialize category accummulators as dictionaries
self.hsDict = {}
for t in ['token','stmt','block','function','class','module','run']:
self.uniqueOperators[t] = {}
self.uniqueOperands[t] = {}
#for v in ['N','N1','N2','n','n1','n2','V','D','E','avgV','avgE']:
# self.hsDict[(t,v)] = 0
def processToken( self, currentFcn, currentClass, tok, *args, **kwds ):
""" Collect token data for Halstead metrics."""
if tok.type in [WS, EMPTY, ENDMARKER, NEWLINE, EMPTY, COMMENT]:
pass
elif tok.type in [OP, INDENT, DEDENT]:
self.numOperators += 1
self.uniqueOperators['token'][tok.text] = self.uniqueOperators['token'].get(tok.text, 0) + 1
else:
self.numOperands += 1
sDict = self.context.__repr__()
k = (sDict,tok.text)
self.uniqueOperands['token'][k] = self.uniqueOperands['token'].get(tok.text, 0) + 1
def processStmt( self, currentFcn, currentClass, stmt, *args, **kwds ):
""" Collect statement data for Halstead metrics."""
result = None
# the two lines following this comment would compute the Halstead
# metrics for each statement in the run, However, it is
# normally overkill, so these lines are commented out.
#lineNum = stmt[0].row
#result = self.computeCategory( 'stmt', lineNum, stmt )
return result
def processBlock( self, currentFcn, currentClass, block, *args, **kwds ):
""" Collect block data for Halstead metrics."""
result = None
# the two lines following this comment would compute the Halstead
# metrics for each statement in the run, However, it is
# normally overkill, so the two lines are commented out.
#blockNum = self.context['blockNum']
#result = self.computeCategory( 'block', blockNum, block )
return result
def processFunction( self, currentFcn, currentClass, fcn, *args, **kwds ):
""" Collect function data for Halstead metrics."""
result = self.computeCategory( 'function', currentFcn, fcn )
return result
def processClass( self, currentFcn, currentClass, cls, *args, **kwds ):
""" Collect class data for Halstead metrics."""
result = self.computeCategory( 'class', currentClass, cls )
return result
def processModule( self, moduleName, mod, *args, **kwds ):
""" Collect module data for Halstead metrics."""
result = self.computeCategory( 'module', moduleName, mod )
return result
def processRun( self, run, *args, **kwds ):
""" Collect run data for Halstead metrics."""
datestamp = time.strftime("%Y-%m-%d.%H:%m%Z",time.localtime())
result = self.computeCategory( 'run', datestamp, run )
return result
def __LOGb( self, x, b ):
""" convert to LOGb(x) from natural logs."""
try:
result = math.log( x ) / math.log ( b )
except OverflowError:
result = 1.0
return result
def computeIncr( self, cat, tok, uniqueOperators, uniqueOperands ):
""" Compute increment for token depending on which category it falls into."""
operatorIncr = operandIncr = 0
if tok.type in [WS, EMPTY, ENDMARKER, NEWLINE, EMPTY, COMMENT]:
return (operatorIncr,operandIncr)
if tok.type in [OP, INDENT, DEDENT]:
operatorIncr = 1
uniqueOperators[tok.text] = uniqueOperators.get(tok.text, 0) + 1
else:
operandIncr = 1
uniqueOperands[tok.text] = uniqueOperands.get(tok.text,0) + 1
return (operatorIncr,operandIncr)
def computeCategory( self, cat, mod, lst ):
""" Collection data for cat of code."""
modID= id( mod )
numOperators = numOperands = 0
for tok in lst:
result = self.computeIncr( cat, tok, self.uniqueOperators[cat], self.uniqueOperands[cat] )
numOperators += result[0]
numOperands += result[1]
result = self.compute( cat, modID, numOperators, numOperands, self.uniqueOperators[cat], self.uniqueOperands[cat] )
return result
def compute( self, cat, modID, numOperators, numOperands, uniqueOperators, uniqueOperands, *args, **kwds ):
""" Do actual calculations here."""
n1 = len( uniqueOperands )
n2 = len( uniqueOperators )
N1 = numOperands
N2 = numOperators
N = N1 + N2
n = n1 + n2
V = float(N) * self.__LOGb( n, 2 )
try:
D = (float(n1)/2.0) * (float(N2)/float(n2))
except ZeroDivisionError:
D = 0.0
E = D * V
HalsteadMetric.totalV += V
HalsteadMetric.totalE += E
avgV = HalsteadMetric.totalV / HalsteadMetric.numModules
avgE = HalsteadMetric.totalE / HalsteadMetric.numModules
self.hsDict[(cat,modID,'n1')] = n1
self.hsDict[(cat,modID,'n2')] = n2
self.hsDict[(cat,modID,'N1')] = N1
self.hsDict[(cat,modID,'N2')] = N2
self.hsDict[(cat,modID,'N')] = N
self.hsDict[(cat,modID,'n')] = n
self.hsDict[(cat,modID,'V')] = V
self.hsDict[(cat,modID,'D')] = D
self.hsDict[(cat,modID,'E')] = E
self.hsDict[(cat,modID,'numModules')] = HalsteadMetric.numModules
self.hsDict[(cat,modID,'avgV')] = avgV
self.hsDict[(cat,modID,'avgE')] = avgE
return self.hsDict
def display( self, cat=None ):
""" Display the computed Halstead Metrics."""
if self.pa.quietSw:
return self.hsDict
hdr = "\nHalstead Metrics for %s" % self.inFile
print hdr
print "-"*len(hdr) + '\n'
if len( self.hsDict ) == 0:
print "%-8s %-30s " % ('**N/A**','All Halstead metrics are zero')
return self.hsDict
keyList = self.hsDict.keys()
keyList.sort()
if 0:
for k,i,v in keyList:
if cat:
if k!=cat:
continue
print "%14.2f %s %s %s" % (self.hsDict[(k,i,v)],k,i,v)
print
hdr1 = "Category Identifier D E N N1 N2 V avgE avgV n n1 n2"
hdr2 = "-------- ---------------------------------- -------- -------- ----- ---- ---- -------- -------- -------- ----- ---- ----"
# 12345678 123456789012345678901234567890 12345678 12345678 12345 1234 1234 12345678 12345678 12345678 12345 1234 1234
fmt1 = "%-8s %-33s "
fmt2 = "%8.2e %8.2e %5d %4d %4d %8.2e %8.2e %8.2e %5d %4d %4d"
# this loop uses the Main Line Standards break logic. It does this to convert the
# normal vertical output to a horizontal format. The control variables are the
# category name and the identifier value.
oldK = oldI = None
vDict = {}
vList = []
hdrSw = True # output header for first time thru
for k,i,v in keyList:
# only print data for the category we want
if cat:
if k != cat:
continue
if v == "numModules": # ignore this value for now
continue
if (oldK,oldI) != (k,i): # change in category/id
if oldK and oldI: # this is not first time thru
#t = tuple([self.hsDict[(k,i,v)] for v in vList])
t = tuple([vDict[v] for v in vList])
print fmt1 % (k,i),
print fmt2 % t
# initialize for next set of category/id
vDict = {}
vDict[v] = self.hsDict[(k,i,v)]
vList = []
vList.append( v )
oldK = k
oldI = i
if hdrSw:
print hdr1
print hdr2
hdrSw = False
else: # we are still in the same category/id
vDict[v] = self.hsDict[(k,i,v)]
vList.append( v )
print
return self.hsDict
| ipmb/PyMetrics | PyMetrics/halstead.py | Python | gpl-2.0 | 10,366 | 0.016593 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-28 13:17
from __future__ import unicode_literals
from django.db import migrations
import ideascube.models
class Migration(migrations.Migration):
dependencies = [
('library', '0005_auto_20160712_1324'),
]
operations = [
migrations.AlterField(
model_name='book',
name='lang',
field=ideascube.models.LanguageField(choices=[
('af', 'Afrikaans'), ('am', 'አማርኛ'), ('ar', 'العربيّة'),
('ast', 'Asturianu'), ('az', 'Azərbaycanca'),
('be', 'Беларуская'), ('bg', 'Български'), ('bm', 'Bambara'),
('bn', 'বাংলা'), ('br', 'Brezhoneg'), ('bs', 'Bosanski'),
('ca', 'Català'), ('cs', 'Česky'), ('cy', 'Cymraeg'),
('da', 'Dansk'), ('de', 'Deutsch'), ('el', 'Ελληνικά'),
('en', 'English'), ('en-au', 'Australian english'),
('en-gb', 'British english'), ('eo', 'Esperanto'),
('es', 'Español'), ('es-ar', 'Español de argentina'),
('es-co', 'Español de colombia'),
('es-mx', 'Español de mexico'),
('es-ni', 'Español de nicaragua'),
('es-ve', 'Español de venezuela'), ('et', 'Eesti'),
('eu', 'Basque'), ('fa', 'فارسی'), ('fi', 'Suomi'),
('fr', 'Français'), ('fy', 'Frysk'), ('ga', 'Gaeilge'),
('gd', 'Gàidhlig'), ('gl', 'Galego'), ('he', 'עברית'),
('hi', 'Hindi'), ('hr', 'Hrvatski'), ('hu', 'Magyar'),
('ia', 'Interlingua'), ('id', 'Bahasa indonesia'),
('io', 'Ido'), ('is', 'Íslenska'), ('it', 'Italiano'),
('ja', '日本語'), ('ka', 'ქართული'), ('kk', 'Қазақ'),
('km', 'Khmer'), ('kn', 'Kannada'), ('ko', '한국어'),
('ku', 'Kurdî'), ('lb', 'Lëtzebuergesch'), ('ln', 'Lingála'),
('lt', 'Lietuviškai'), ('lv', 'Latviešu'),
('mk', 'Македонски'), ('ml', 'Malayalam'), ('mn', 'Mongolian'),
('mr', 'मराठी'), ('my', 'မြန်မာဘာသာ'),
('nb', 'Norsk (bokmål)'), ('ne', 'नेपाली'),
('nl', 'Nederlands'), ('nn', 'Norsk (nynorsk)'),
('no', 'Norsk'), ('os', 'Ирон'), ('pa', 'Punjabi'),
('pl', 'Polski'), ('ps', 'پښتو'), ('pt', 'Português'),
('pt-br', 'Português brasileiro'), ('rn', 'Kirundi'),
('ro', 'Română'), ('ru', 'Русский'), ('sk', 'Slovensky'),
('sl', 'Slovenščina'), ('so', 'Af-soomaali'), ('sq', 'Shqip'),
('sr', 'Српски'), ('sr-latn', 'Srpski (latinica)'),
('sv', 'Svenska'), ('sw', 'Kiswahili'), ('ta', 'தமிழ்'),
('te', 'తెలుగు'), ('th', 'ภาษาไทย'), ('ti', 'ትግርኛ'),
('tr', 'Türkçe'), ('tt', 'Татарча'), ('udm', 'Удмурт'),
('uk', 'Українська'), ('ur', 'اردو'), ('vi', 'Tiếng việt'),
('wo', 'Wolof'), ('zh-hans', '简体中文'), ('zh-hant', '繁體中文')
], max_length=10, verbose_name='Language'),
),
]
| ideascube/ideascube | ideascube/library/migrations/0006_auto_20160728_1317.py | Python | agpl-3.0 | 3,408 | 0 |
from sqlalchemy import Column, String, Integer
from BusTrack.repository import Base, session
from BusTrack.repository.models import STRING_LEN_SMALL
class UserType(Base):
__tablename__ = 'user_type'
id = Column(Integer, primary_key=True)
role_name = Column(String(STRING_LEN_SMALL))
@staticmethod
def __create_default_role__():
if session.query(UserType).count() != 0:
return
driver = UserType()
driver.role_name = 'Driver'
parent = UserType()
parent.role_name = 'Parent'
admin = UserType()
admin.role_name = 'Admin'
session.add(driver)
session.add(parent)
session.add(admin)
session.commit()
session.close()
| Rjtsahu/School-Bus-Tracking | BusTrack/repository/models/UserType.py | Python | gpl-3.0 | 743 | 0 |
import subprocess, threading
from subprocess import PIPE
class TimedSubProc (object):
def __init__(self, cmd):
self.cmd = cmd.split()
self.process = None
def run(self, timeout=5, stdin=None, stdout=PIPE, stderr=PIPE):
self.output = None
def target():
self.process = subprocess.Popen(self.cmd, stdin=stdin, stdout=stdout, stderr=stderr)
self.output = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Process timeout! Terminating...'
self.process.terminate()
thread.join()
return False
return (self.process.returncode, self.output[0], self.output[1])
| maxspad/MGrader | autograder/modules/questions/timedsubproc.py | Python | bsd-3-clause | 861 | 0.013937 |
# -*- coding: utf-8 -*-
"""
Various utilities
Authors: Ed Rousseau <rousseau@redhat.com>, Zack Cerza <zcerza@redhat.com, David Malcolm <dmalcolm@redhat.com>
"""
__author__ = """Ed Rousseau <rousseau@redhat.com>,
Zack Cerza <zcerza@redhat.com,
David Malcolm <dmalcolm@redhat.com>
"""
import os
import sys
import subprocess
import cairo
import predicate
import errno
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk
from gi.repository import GObject
from config import config
from time import sleep
from logging import debugLogger as logger
from logging import TimeStamp
from __builtin__ import file
def screenshot(file='screenshot.png', timeStamp=True):
"""
This function wraps the ImageMagick import command to take a screenshot.
The file argument may be specified as 'foo', 'foo.png', or using any other
extension that ImageMagick supports. PNG is the default.
By default, screenshot filenames are in the format of foo_YYYYMMDD-hhmmss.png .
The timeStamp argument may be set to False to name the file foo.png.
"""
if not isinstance(timeStamp, bool):
raise TypeError("timeStampt must be True or False")
# config is supposed to create this for us. If it's not there, bail.
assert os.path.isdir(config.scratchDir)
baseName = ''.join(file.split('.')[0:-1])
fileExt = file.split('.')[-1].lower()
if not baseName:
baseName = file
fileExt = 'png'
if timeStamp:
ts = TimeStamp()
newFile = ts.fileStamp(baseName) + '.' + fileExt
path = config.scratchDir + newFile
else:
newFile = baseName + '.' + fileExt
path = config.scratchDir + newFile
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GdkPixbuf
rootWindow = Gdk.get_default_root_window()
geometry = rootWindow.get_geometry()
pixbuf = GdkPixbuf.Pixbuf(colorspace=GdkPixbuf.Colorspace.RGB,
has_alpha=False,
bits_per_sample=8,
width=geometry[2],
height=geometry[3])
pixbuf = Gdk.pixbuf_get_from_window(rootWindow, 0, 0,
geometry[2], geometry[3])
# GdkPixbuf.Pixbuf.save() needs 'jpeg' and not 'jpg'
if fileExt == 'jpg':
fileExt = 'jpeg'
try:
pixbuf.savev(path, fileExt, [], [])
except GObject.GError:
raise ValueError("Failed to save screenshot in %s format" % fileExt)
assert os.path.exists(path)
logger.log("Screenshot taken: " + path)
return path
def run(string, timeout=config.runTimeout, interval=config.runInterval, desktop=None, dumb=False, appName=''):
"""
Runs an application. [For simple command execution such as 'rm *', use os.popen() or os.system()]
If dumb is omitted or is False, polls at interval seconds until the application is finished starting, or until timeout is reached.
If dumb is True, returns when timeout is reached.
"""
if not desktop:
from tree import root as desktop
args = string.split()
os.environ['GTK_MODULES'] = 'gail:atk-bridge'
pid = subprocess.Popen(args, env=os.environ).pid
if not appName:
appName = args[0]
if dumb:
# We're starting a non-AT-SPI-aware application. Disable startup
# detection.
doDelay(timeout)
else:
# Startup detection code
# The timing here is not totally precise, but it's good enough for now.
time = 0
while time < timeout:
time = time + interval
try:
for child in desktop.children[::-1]:
if child.name == appName:
for grandchild in child.children:
if grandchild.roleName == 'frame':
from procedural import focus
focus.application.node = child
doDelay(interval)
return pid
except AttributeError: # pragma: no cover
pass
doDelay(interval)
return pid
def doDelay(delay=None):
"""
Utility function to insert a delay (with logging and a configurable
default delay)
"""
if delay is None:
delay = config.defaultDelay
if config.debugSleep:
logger.log("sleeping for %f" % delay)
sleep(delay)
class Highlight (Gtk.Window): # pragma: no cover
def __init__(self, x, y, w, h): # pragma: no cover
super(Highlight, self).__init__()
self.set_decorated(False)
self.set_has_resize_grip(False)
self.set_default_size(w, h)
self.screen = self.get_screen()
self.visual = self.screen.get_rgba_visual()
if self.visual is not None and self.screen.is_composited():
self.set_visual(self.visual)
self.set_app_paintable(True)
self.connect("draw", self.area_draw)
self.show_all()
self.move(x, y)
def area_draw(self, widget, cr): # pragma: no cover
cr.set_source_rgba(.0, .0, .0, 0.0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
cr.set_operator(cairo.OPERATOR_OVER)
cr.set_source_rgb(0.9, 0.1, 0.1)
cr.set_line_width(6)
cr.rectangle(0, 0, self.get_size()[0], self.get_size()[1])
cr.stroke()
class Blinker(object): # pragma: no cover
INTERVAL_MS = 1000
main_loop = GObject.MainLoop()
def __init__(self, x, y, w, h): # pragma: no cover
self.highlight_window = Highlight(x, y, w, h)
if self.highlight_window.screen.is_composited() is not False:
self.timeout_handler_id = GObject.timeout_add(
Blinker.INTERVAL_MS, self.destroyHighlight)
self.main_loop.run()
else:
self.highlight_window.destroy()
def destroyHighlight(self): # pragma: no cover
self.highlight_window.destroy()
self.main_loop.quit()
return False
class Lock(object):
"""
A mutex implementation that uses atomicity of the mkdir operation in UNIX-like
systems. This can be used by scripts to provide for mutual exlusion, either in single
scripts using threads etc. or i.e. to handle sitations of possible collisions among
multiple running scripts. You can choose to make randomized single-script wise locks
or a more general locks if you do not choose to randomize the lockdir name
"""
def __init__(self, location='/tmp', lockname='dogtail_lockdir_', randomize=True):
"""
You can change the default lockdir location or name. Setting randomize to
False will result in no random string being appened to the lockdir name.
"""
self.lockdir = os.path.join(os.path.normpath(location), lockname)
if randomize:
self.lockdir = "%s%s" % (self.lockdir, self.__getPostfix())
def lock(self):
"""
Creates a lockdir based on the settings on Lock() instance creation.
Raises OSError exception of the lock is already present. Should be
atomic on POSIX compliant systems.
"""
locked_msg = 'Dogtail lock: Already locked with the same lock'
if not os.path.exists(self.lockdir):
try:
os.mkdir(self.lockdir)
return self.lockdir
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(self.lockdir):
raise OSError(locked_msg)
else:
raise OSError(locked_msg)
def unlock(self):
"""
Removes a lock. Will raise OSError exception if the lock was not present.
Should be atomic on POSIX compliant systems.
"""
import os # have to import here for situations when executed from __del__
if os.path.exists(self.lockdir):
try:
os.rmdir(self.lockdir)
except OSError as e:
if e.erron == errno.EEXIST:
raise OSError('Dogtail unlock: lockdir removed elsewhere!')
else:
raise OSError('Dogtail unlock: not locked')
def __del__(self):
"""
Makes sure lock is removed when the process ends. Although not when killed indeed.
"""
self.unlock()
def __getPostfix(self):
import random
import string
return ''.join(random.choice(string.letters + string.digits) for x in range(5))
a11yDConfKey = 'org.gnome.desktop.interface'
def isA11yEnabled():
"""
Checks if accessibility is enabled via DConf.
"""
from gi.repository.Gio import Settings
InterfaceSettings = Settings(a11yDConfKey)
dconfEnabled = InterfaceSettings.get_boolean('toolkit-accessibility')
if os.environ.get('GTK_MODULES', '').find('gail:atk-bridge') == -1:
envEnabled = False
else:
envEnabled = True # pragma: no cover
return (dconfEnabled or envEnabled)
def bailBecauseA11yIsDisabled():
if sys.argv[0].endswith("pydoc"):
return # pragma: no cover
try:
if file("/proc/%s/cmdline" % os.getpid()).read().find('epydoc') != -1:
return # pragma: no cover
except: # pragma: no cover
pass # pragma: no cover
logger.log("Dogtail requires that Assistive Technology support be enabled."
"\nYou can enable accessibility with sniff or by running:\n"
"'gsettings set org.gnome.desktop.interface toolkit-accessibility true'\nAborting...")
sys.exit(1)
def enableA11y(enable=True):
"""
Enables accessibility via DConf.
"""
from gi.repository.Gio import Settings
InterfaceSettings = Settings(a11yDConfKey)
InterfaceSettings.set_boolean('toolkit-accessibility', enable)
def checkForA11y():
"""
Checks if accessibility is enabled, and halts execution if it is not.
"""
if not isA11yEnabled(): # pragma: no cover
bailBecauseA11yIsDisabled()
def checkForA11yInteractively(): # pragma: no cover
"""
Checks if accessibility is enabled, and presents a dialog prompting the
user if it should be enabled if it is not already, then halts execution.
"""
if isA11yEnabled():
return
from gi.repository import Gtk
dialog = Gtk.Dialog('Enable Assistive Technology Support?',
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_QUIT, Gtk.ResponseType.CLOSE,
"_Enable", Gtk.ResponseType.ACCEPT))
question = """Dogtail requires that Assistive Technology Support be enabled for it to function. Would you like to enable Assistive Technology support now?
Note that you will have to log out for the change to fully take effect.
""".strip()
dialog.set_default_response(Gtk.ResponseType.ACCEPT)
questionLabel = Gtk.Label(label=question)
questionLabel.set_line_wrap(True)
dialog.vbox.pack_start(questionLabel, True, True, 0)
dialog.show_all()
result = dialog.run()
if result == Gtk.ResponseType.ACCEPT:
logger.log("Enabling accessibility...")
enableA11y()
elif result == Gtk.ResponseType.CLOSE:
bailBecauseA11yIsDisabled()
dialog.destroy()
class GnomeShell(object): # pragma: no cover
"""
Utility class to help working with certain atributes of gnome-shell.
Currently that means handling the Application menu available for apps
on the top gnome-shell panel. Searching for the menu and its items is
somewhat tricky due to fuzzy a11y tree of gnome-shell, mainly since the
actual menu is not present as child to the menu-spawning button. Also,
the menus get constructed/destroyed on the fly with application focus
changes. Thus current application name as displayed plus a reference
known menu item (with 'Quit' as default) are required by these methods.
"""
def __init__(self, classic_mode=False):
from tree import root
self.shell = root.application('gnome-shell')
def getApplicationMenuList(self, search_by_item='Quit'):
"""
Returns list of all menu item nodes. Searches for the menu by a reference item.
Provide a different item name, if the 'Quit' is not present - but beware picking one
present elsewhere, like 'Lock' or 'Power Off' present under the user menu.
"""
matches = self.shell.findChildren(
predicate.GenericPredicate(name=search_by_item, roleName='label'))
for match in matches:
ancestor = match.parent.parent.parent
if ancestor.roleName == 'panel':
return ancestor.findChildren(predicate.GenericPredicate(roleName='label'))
from tree import SearchError
raise SearchError("Could not find the Application menu based on '%s' item. Please provide an existing reference item"
% search_by_item)
def getApplicationMenuButton(self, app_name):
"""
Returns the application menu 'button' node as present on the gnome-shell top panel.
"""
try:
return self.shell[0][0][3].child(app_name, roleName='label')
except:
from tree import SearchError
raise SearchError(
"Application menu button of %s could not be found within gnome-shell!" % app_name)
def getApplicationMenuItem(self, item, search_by_item='Quit'):
"""
Returns a particilar menu item node. Uses a different 'Quit' or custom item name for reference, but also
attempts to use the given item if the general reference fails.
"""
try:
menu_items = self.getApplicationMenuList(search_by_item)
except:
menu_items = self.getApplicationMenuList(item)
for node in menu_items:
if node.name == item:
return node
raise Exception(
'Could not find the item, did application focus change?')
def clickApplicationMenuItem(self, app_name, item, search_by_item='Quit'):
"""
Executes the given menu item through opening the menu first followed
by a click at the particular item. The menu search reference 'Quit'
may be customized. Also attempts to use the given item for reference
if search fails with the default/custom one.
"""
self.getApplicationMenuButton(app_name).click()
self.getApplicationMenuItem(item, search_by_item).click()
| Lorquas/dogtail | dogtail/utils.py | Python | gpl-2.0 | 14,664 | 0.002523 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Burmese language.
.. seealso:: http://en.wikipedia.org/wiki/Burmese_language
"""
from translate.lang import common
class my(common.Common):
"""This class represents Burmese."""
puncdict = {
u".": u"။",
}
ignoretests = ["startcaps", "simplecaps"]
| bluemini/kuma | vendor/packages/translate/lang/my.py | Python | mpl-2.0 | 1,082 | 0 |
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from answer import Answer
from base import Base
class Question(Base):
__tablename__ = 'question'
id = Column(Integer, primary_key=True)
question = Column(String(256))
answers = relationship('Answer', backref='question',
lazy='dynamic')
test_id = Column(Integer, ForeignKey('test.id'))
| ZhuChara2004/MA-Summer-Practice-2016 | homeworks/team-2/prof-test/backend/models/question.py | Python | gpl-3.0 | 438 | 0.004566 |
###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
| hamishcunningham/fishy-wifi | wegrow-cloudside/elf-data-collector/webserver4/server-again.py | Python | agpl-3.0 | 6,539 | 0.001682 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="serverless-wsgi",
version="3.0.0",
python_requires=">3.6",
author="Logan Raarup",
author_email="logan@logan.dk",
description="Amazon AWS API Gateway WSGI wrapper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/logandk/serverless-wsgi",
py_modules=["serverless_wsgi"],
install_requires=["werkzeug>2"],
classifiers=(
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
keywords="wsgi serverless aws lambda api gateway apigw flask django pyramid",
)
| logandk/serverless-wsgi | setup.py | Python | mit | 867 | 0.001153 |
# Copyright (c) 2012 Adam Stokes <adam.stokes@canonical.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, DebianPlugin, UbuntuPlugin
class Dpkg(Plugin, DebianPlugin, UbuntuPlugin):
"""Debian Package Management
"""
plugin_name = 'dpkg'
profiles = ('sysmgmt', 'packagemanager')
def setup(self):
self.add_cmd_output("dpkg -l", root_symlink="installed-debs")
if self.get_option("verify"):
self.add_cmd_output("dpkg -V")
self.add_cmd_output("dpkg -C")
self.add_copy_spec([
"/var/cache/debconf/config.dat",
"/etc/debconf.conf"
])
if not self.get_option("all_logs"):
limit = self.get_option("log_size")
self.add_copy_spec_limit("/var/log/dpkg.log",
sizelimit=limit)
else:
self.add_copy_spec("/var/log/dpkg.log*")
# vim: et ts=4 sw=4
| lmiccini/sos | sos/plugins/dpkg.py | Python | gpl-2.0 | 1,590 | 0 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test classes for code snippet for modeling article."""
from google.appengine.ext import ndb
from tests import AppEngineTestbedCase
from . import relation_model_models as models
class ContactTestCase(AppEngineTestbedCase):
"""A test case for the Contact model with relationship model."""
def setUp(self):
"""Creates 1 contact and 1 company.
Assuming the contact belongs to tmatsuo's addressbook.
"""
super(ContactTestCase, self).setUp()
self.myaddressbook_key = ndb.Key('AddressBook', 'tmatsuo')
mary = models.Contact(parent=self.myaddressbook_key, name='Mary')
mary.put()
self.mary_key = mary.key
google = models.Company(name='Google')
google.put()
self.google_key = google.key
candit = models.Company(name='Candit')
candit.put()
self.candit_key = candit.key
def test_relationship(self):
"""Two companies hire Mary."""
mary = self.mary_key.get()
google = self.google_key.get()
candit = self.candit_key.get()
# first google hires Mary
models.ContactCompany(parent=self.myaddressbook_key,
contact=mary.key,
company=google.key,
title='engineer').put()
# then another company named 'candit' hires Mary too
models.ContactCompany(parent=self.myaddressbook_key,
contact=mary.key,
company=candit.key,
title='president').put()
# get the list of companies that Mary belongs to
self.assertEqual(len(mary.companies), 2)
| JPO1/python-docs-samples | appengine/ndb/modeling/relation_model_models_test.py | Python | apache-2.0 | 2,299 | 0 |
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the following classes:
* :class:`~aeneas.validator.Validator`, assessing whether user input is well-formed;
* :class:`~aeneas.validator.ValidatorResult`, a record holding validation result and possibly messages.
"""
from __future__ import absolute_import
from __future__ import print_function
import io
from aeneas.analyzecontainer import AnalyzeContainer
from aeneas.container import Container
from aeneas.container import ContainerFormat
from aeneas.executetask import AdjustBoundaryAlgorithm
from aeneas.hierarchytype import HierarchyType
from aeneas.idsortingalgorithm import IDSortingAlgorithm
from aeneas.logger import Loggable
from aeneas.runtimeconfiguration import RuntimeConfiguration
from aeneas.syncmap import SyncMapFormat
from aeneas.syncmap import SyncMapHeadTailFormat
from aeneas.textfile import TextFileFormat
import aeneas.globalconstants as gc
import aeneas.globalfunctions as gf
class Validator(Loggable):
"""
A validator to assess whether user input is well-formed.
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
"""
ALLOWED_VALUES = [
#
# NOTE disabling the check on language since now we support multiple TTS
# COMMENTED (
# COMMENTED gc.PPN_JOB_LANGUAGE,
# COMMENTED Language.ALLOWED_VALUES
# COMMENTED ),
# COMMENTED (
# COMMENTED gc.PPN_TASK_LANGUAGE,
# COMMENTED Language.ALLOWED_VALUES
# COMMENTED ),
#
(
gc.PPN_JOB_IS_HIERARCHY_TYPE,
HierarchyType.ALLOWED_VALUES
),
(
gc.PPN_JOB_OS_CONTAINER_FORMAT,
ContainerFormat.ALLOWED_VALUES
),
(
gc.PPN_JOB_OS_HIERARCHY_TYPE,
HierarchyType.ALLOWED_VALUES
),
(
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
TextFileFormat.ALLOWED_VALUES
),
(
gc.PPN_TASK_OS_FILE_FORMAT,
SyncMapFormat.ALLOWED_VALUES
),
(
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT,
IDSortingAlgorithm.ALLOWED_VALUES
),
(
gc.PPN_TASK_ADJUST_BOUNDARY_ALGORITHM,
AdjustBoundaryAlgorithm.ALLOWED_VALUES
),
(
gc.PPN_TASK_OS_FILE_HEAD_TAIL_FORMAT,
SyncMapHeadTailFormat.ALLOWED_VALUES
)
]
IMPLIED_PARAMETERS = [
(
# is_hierarchy_type=paged => is_task_dir_name_regex
gc.PPN_JOB_IS_HIERARCHY_TYPE,
[HierarchyType.PAGED],
[gc.PPN_JOB_IS_TASK_DIRECTORY_NAME_REGEX]
),
(
# is_text_type=unparsed => is_text_unparsed_id_sort
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
[TextFileFormat.UNPARSED],
[gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT]
),
(
# is_text_type=munparsed => is_text_munparsed_l1_id_regex
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
[TextFileFormat.MUNPARSED],
[gc.PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX]
),
(
# is_text_type=munparsed => is_text_munparsed_l2_id_regex
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
[TextFileFormat.MUNPARSED],
[gc.PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX]
),
(
# is_text_type=munparsed => is_text_munparsed_l3_id_regex
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
[TextFileFormat.MUNPARSED],
[gc.PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX]
),
(
# is_text_type=unparsed => is_text_unparsed_class_regex or
# is_text_unparsed_id_regex
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
[TextFileFormat.UNPARSED],
[
gc.PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX
]
),
(
# os_task_file_format=smil => os_task_file_smil_audio_ref
# os_task_file_format=smilh => os_task_file_smil_audio_ref
# os_task_file_format=smilm => os_task_file_smil_audio_ref
gc.PPN_TASK_OS_FILE_FORMAT,
[SyncMapFormat.SMIL, SyncMapFormat.SMILH, SyncMapFormat.SMILM],
[gc.PPN_TASK_OS_FILE_SMIL_AUDIO_REF]
),
(
# os_task_file_format=smil => os_task_file_smil_page_ref
# os_task_file_format=smilh => os_task_file_smil_page_ref
# os_task_file_format=smilm => os_task_file_smil_page_ref
gc.PPN_TASK_OS_FILE_FORMAT,
[SyncMapFormat.SMIL, SyncMapFormat.SMILH, SyncMapFormat.SMILM],
[gc.PPN_TASK_OS_FILE_SMIL_PAGE_REF]
),
(
# task_adjust_boundary_algorithm=percent => task_adjust_boundary_percent_value
gc.PPN_TASK_ADJUST_BOUNDARY_ALGORITHM,
[AdjustBoundaryAlgorithm.PERCENT],
[gc.PPN_TASK_ADJUST_BOUNDARY_PERCENT_VALUE]
),
(
# task_adjust_boundary_algorithm=rate => task_adjust_boundary_rate_value
gc.PPN_TASK_ADJUST_BOUNDARY_ALGORITHM,
[AdjustBoundaryAlgorithm.RATE],
[gc.PPN_TASK_ADJUST_BOUNDARY_RATE_VALUE]
),
(
# task_adjust_boundary_algorithm=rate_aggressive => task_adjust_boundary_rate_value
gc.PPN_TASK_ADJUST_BOUNDARY_ALGORITHM,
[AdjustBoundaryAlgorithm.RATEAGGRESSIVE],
[gc.PPN_TASK_ADJUST_BOUNDARY_RATE_VALUE]
),
(
# task_adjust_boundary_algorithm=currentend => task_adjust_boundary_currentend_value
gc.PPN_TASK_ADJUST_BOUNDARY_ALGORITHM,
[AdjustBoundaryAlgorithm.AFTERCURRENT],
[gc.PPN_TASK_ADJUST_BOUNDARY_AFTERCURRENT_VALUE]
),
(
# task_adjust_boundary_algorithm=rate => task_adjust_boundary_nextstart_value
gc.PPN_TASK_ADJUST_BOUNDARY_ALGORITHM,
[AdjustBoundaryAlgorithm.BEFORENEXT],
[gc.PPN_TASK_ADJUST_BOUNDARY_BEFORENEXT_VALUE]
),
(
# task_adjust_boundary_algorithm=offset => task_adjust_boundary_offset_value
gc.PPN_TASK_ADJUST_BOUNDARY_ALGORITHM,
[AdjustBoundaryAlgorithm.OFFSET],
[gc.PPN_TASK_ADJUST_BOUNDARY_OFFSET_VALUE]
)
]
JOB_REQUIRED_PARAMETERS = [
gc.PPN_JOB_LANGUAGE,
gc.PPN_JOB_OS_CONTAINER_FORMAT,
gc.PPN_JOB_OS_FILE_NAME,
]
TASK_REQUIRED_PARAMETERS = [
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
gc.PPN_TASK_LANGUAGE,
gc.PPN_TASK_OS_FILE_FORMAT,
gc.PPN_TASK_OS_FILE_NAME,
]
TASK_REQUIRED_PARAMETERS_EXTERNAL_NAME = [
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
gc.PPN_TASK_LANGUAGE,
gc.PPN_TASK_OS_FILE_FORMAT,
]
TXT_REQUIRED_PARAMETERS = [
gc.PPN_JOB_IS_AUDIO_FILE_NAME_REGEX,
gc.PPN_JOB_IS_AUDIO_FILE_RELATIVE_PATH,
gc.PPN_JOB_IS_HIERARCHY_PREFIX,
gc.PPN_JOB_IS_HIERARCHY_TYPE,
gc.PPN_JOB_IS_TEXT_FILE_NAME_REGEX,
gc.PPN_JOB_IS_TEXT_FILE_RELATIVE_PATH,
gc.PPN_JOB_LANGUAGE,
gc.PPN_JOB_OS_CONTAINER_FORMAT,
gc.PPN_JOB_OS_FILE_NAME,
gc.PPN_JOB_OS_HIERARCHY_PREFIX,
gc.PPN_JOB_OS_HIERARCHY_TYPE,
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
gc.PPN_TASK_OS_FILE_FORMAT,
gc.PPN_TASK_OS_FILE_NAME,
]
XML_JOB_REQUIRED_PARAMETERS = [
gc.PPN_JOB_OS_CONTAINER_FORMAT,
gc.PPN_JOB_OS_FILE_NAME,
gc.PPN_JOB_OS_HIERARCHY_PREFIX,
gc.PPN_JOB_OS_HIERARCHY_TYPE,
]
XML_TASK_REQUIRED_PARAMETERS = [
gc.PPN_TASK_IS_AUDIO_FILE_XML,
gc.PPN_TASK_IS_TEXT_FILE_FORMAT,
gc.PPN_TASK_IS_TEXT_FILE_XML,
gc.PPN_TASK_LANGUAGE,
gc.PPN_TASK_OS_FILE_FORMAT,
gc.PPN_TASK_OS_FILE_NAME,
]
TAG = u"Validator"
def __init__(self, rconf=None, logger=None):
super(Validator, self).__init__(rconf=rconf, logger=logger)
self.result = None
def check_file_encoding(self, input_file_path):
"""
Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log([u"Checking encoding of file '%s'", input_file_path])
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_file_encoding"):
return self.result
if not gf.file_can_be_read(input_file_path):
self._failed(u"File '%s' cannot be read." % (input_file_path))
return self.result
with io.open(input_file_path, "rb") as file_object:
bstring = file_object.read()
self._check_utf8_encoding(bstring)
return self.result
def check_raw_string(self, string, is_bstring=True):
"""
Check whether the given string
is properly UTF-8 encoded (if ``is_bytes`` is ``True``),
it is not empty, and
it does not contain reserved characters.
:param string string: the byte string or Unicode string to be checked
:param bool is_bstring: if True, string is a byte string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log(u"Checking the given byte string")
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_raw_string"):
return self.result
if is_bstring:
self._check_utf8_encoding(string)
if not self.result.passed:
return self.result
string = gf.safe_unicode(string)
self._check_not_empty(string)
if not self.result.passed:
return self.result
self._check_reserved_characters(string)
return self.result
def check_configuration_string(
self,
config_string,
is_job=True,
external_name=False
):
"""
Check whether the given job or task configuration string
is well-formed (if ``is_bstring`` is ``True``)
and it has all the required parameters.
:param string config_string: the byte string or Unicode string to be checked
:param bool is_job: if ``True``, ``config_string`` is a job config string
:param bool external_name: if ``True``, the task name is provided externally,
and it is not required to appear
in the config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
if is_job:
self.log(u"Checking job configuration string")
else:
self.log(u"Checking task configuration string")
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_configuration_string"):
return self.result
if is_job:
required_parameters = self.JOB_REQUIRED_PARAMETERS
elif external_name:
required_parameters = self.TASK_REQUIRED_PARAMETERS_EXTERNAL_NAME
else:
required_parameters = self.TASK_REQUIRED_PARAMETERS
is_bstring = gf.is_bytes(config_string)
if is_bstring:
self.log(u"Checking that config_string is well formed")
self.check_raw_string(config_string, is_bstring=True)
if not self.result.passed:
return self.result
config_string = gf.safe_unicode(config_string)
self.log(u"Checking required parameters")
parameters = gf.config_string_to_dict(config_string, self.result)
self._check_required_parameters(required_parameters, parameters)
self.log([u"Checking config_string: returning %s", self.result.passed])
return self.result
def check_config_txt(self, contents, is_config_string=False):
"""
Check whether the given TXT config file contents
(if ``is_config_string`` is ``False``) or
TXT config string (if ``is_config_string`` is ``True``)
is well-formed and it has all the required parameters.
:param string contents: the TXT config file contents or TXT config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log(u"Checking contents TXT config file")
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_config_txt"):
return self.result
is_bstring = gf.is_bytes(contents)
if is_bstring:
self.log(u"Checking that contents is well formed")
self.check_raw_string(contents, is_bstring=True)
if not self.result.passed:
return self.result
contents = gf.safe_unicode(contents)
if not is_config_string:
self.log(u"Converting file contents to config string")
contents = gf.config_txt_to_string(contents)
self.log(u"Checking required parameters")
required_parameters = self.TXT_REQUIRED_PARAMETERS
parameters = gf.config_string_to_dict(contents, self.result)
self._check_required_parameters(required_parameters, parameters)
self.log([u"Checking contents: returning %s", self.result.passed])
return self.result
def check_config_xml(self, contents):
"""
Check whether the given XML config file contents
is well-formed and it has all the required parameters.
:param string contents: the XML config file contents or XML config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log(u"Checking contents XML config file")
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_config_xml"):
return self.result
contents = gf.safe_bytes(contents)
self.log(u"Checking that contents is well formed")
self.check_raw_string(contents, is_bstring=True)
if not self.result.passed:
return self.result
self.log(u"Checking required parameters for job")
job_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=True)
self._check_required_parameters(self.XML_JOB_REQUIRED_PARAMETERS, job_parameters)
if not self.result.passed:
return self.result
self.log(u"Checking required parameters for task")
tasks_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=False)
for parameters in tasks_parameters:
self.log([u"Checking required parameters for task: '%s'", parameters])
self._check_required_parameters(self.XML_TASK_REQUIRED_PARAMETERS, parameters)
if not self.result.passed:
return self.result
return self.result
def check_container(self, container_path, container_format=None, config_string=None):
"""
Check whether the given container is well-formed.
:param string container_path: the path of the container to be checked
:param container_format: the format of the container
:type container_format: :class:`~aeneas.container.ContainerFormat`
:param string config_string: the configuration string generated by the wizard
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log([u"Checking container '%s'", container_path])
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_container"):
return self.result
if not (gf.file_exists(container_path) or gf.directory_exists(container_path)):
self._failed(u"Container '%s' not found." % container_path)
return self.result
container = Container(container_path, container_format)
try:
self.log(u"Checking container has config file")
if config_string is not None:
self.log(u"Container with config string from wizard")
self.check_config_txt(config_string, is_config_string=True)
elif container.has_config_xml:
self.log(u"Container has XML config file")
contents = container.read_entry(container.entry_config_xml)
if contents is None:
self._failed(u"Unable to read the contents of XML config file.")
return self.result
self.check_config_xml(contents)
elif container.has_config_txt:
self.log(u"Container has TXT config file")
contents = container.read_entry(container.entry_config_txt)
if contents is None:
self._failed(u"Unable to read the contents of TXT config file.")
return self.result
self.check_config_txt(contents, is_config_string=False)
else:
self._failed(u"Container does not have a TXT or XML configuration file.")
self.log(u"Checking we have a valid job in the container")
if not self.result.passed:
return self.result
self.log(u"Analyze the contents of the container")
analyzer = AnalyzeContainer(container)
if config_string is not None:
job = analyzer.analyze(config_string=config_string)
else:
job = analyzer.analyze()
self._check_analyzed_job(job, container)
except OSError:
self._failed(u"Unable to read the contents of the container.")
return self.result
def _are_safety_checks_disabled(self, caller=u"unknown_function"):
"""
Return ``True`` if safety checks are disabled.
:param string caller: the name of the caller function
:rtype: bool
"""
if self.rconf.safety_checks:
return False
self.log_warn([u"Safety checks disabled => %s passed", caller])
return True
def _failed(self, msg):
"""
Log a validation failure.
:param string msg: the error message
"""
self.log(msg)
self.result.passed = False
self.result.add_error(msg)
self.log(u"Failed")
def _check_utf8_encoding(self, bstring):
"""
Check whether the given sequence of bytes
is properly encoded in UTF-8.
:param bytes bstring: the byte string to be checked
"""
if not gf.is_bytes(bstring):
self._failed(u"The given string is not a sequence of bytes")
return
if not gf.is_utf8_encoded(bstring):
self._failed(u"The given string is not encoded in UTF-8.")
def _check_not_empty(self, string):
"""
Check whether the given string has zero length.
:param string string: the byte string or Unicode string to be checked
"""
if len(string) == 0:
self._failed(u"The given string has zero length")
def _check_reserved_characters(self, ustring):
"""
Check whether the given Unicode string contains reserved characters.
:param string ustring: the string to be checked
"""
forbidden = [c for c in gc.CONFIG_RESERVED_CHARACTERS if c in ustring]
if len(forbidden) > 0:
self._failed(u"The given string contains the reserved characters '%s'." % u" ".join(forbidden))
def _check_allowed_values(self, parameters):
"""
Check whether the given parameter value is allowed.
Log messages into ``self.result``.
:param dict parameters: the given parameters
"""
for key, allowed_values in self.ALLOWED_VALUES:
self.log([u"Checking allowed values for parameter '%s'", key])
if key in parameters:
value = parameters[key]
if value not in allowed_values:
self._failed(u"Parameter '%s' has value '%s' which is not allowed." % (key, value))
return
self.log(u"Passed")
def _check_implied_parameters(self, parameters):
"""
Check whether at least one of the keys in implied_keys
is in ``parameters``,
when a given ``key=value`` is present in ``parameters``,
for some value in values.
Log messages into ``self.result``.
:param dict parameters: the given parameters
"""
for key, values, implied_keys in self.IMPLIED_PARAMETERS:
self.log([u"Checking implied parameters by '%s'='%s'", key, values])
if (key in parameters) and (parameters[key] in values):
found = False
for implied_key in implied_keys:
if implied_key in parameters:
found = True
if not found:
if len(implied_keys) == 1:
msg = u"Parameter '%s' is required when '%s'='%s'." % (implied_keys[0], key, parameters[key])
else:
msg = u"At least one of [%s] is required when '%s'='%s'." % (",".join(implied_keys), key, parameters[key])
self._failed(msg)
return
self.log(u"Passed")
def _check_required_parameters(
self,
required_parameters,
parameters
):
"""
Check whether the given parameter dictionary contains
all the required paramenters.
Log messages into ``self.result``.
:param list required_parameters: required parameters
:param dict parameters: parameters specified by the user
"""
self.log([u"Checking required parameters '%s'", required_parameters])
self.log(u"Checking input parameters are not empty")
if (parameters is None) or (len(parameters) == 0):
self._failed(u"No parameters supplied.")
return
self.log(u"Checking no required parameter is missing")
for req_param in required_parameters:
if req_param not in parameters:
self._failed(u"Required parameter '%s' not set." % req_param)
return
self.log(u"Checking all parameter values are allowed")
self._check_allowed_values(parameters)
self.log(u"Checking all implied parameters are present")
self._check_implied_parameters(parameters)
return self.result
def _check_analyzed_job(self, job, container):
"""
Check that the job object generated from the given container
is well formed, that it has at least one task,
and that the text file of each task has the correct encoding.
Log messages into ``self.result``.
:param job: the Job object generated from container
:type job: :class:`~aeneas.job.Job`
:param container: the Container object
:type container: :class:`~aeneas.container.Container`
"""
self.log(u"Checking the Job object generated from container")
self.log(u"Checking that the Job is not None")
if job is None:
self._failed(u"Unable to create a Job from the container.")
return
self.log(u"Checking that the Job has at least one Task")
if len(job) == 0:
self._failed(u"Unable to create at least one Task from the container.")
return
if self.rconf[RuntimeConfiguration.JOB_MAX_TASKS] > 0:
self.log(u"Checking that the Job does not have too many Tasks")
if len(job) > self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]:
self._failed(u"The Job has %d Tasks, more than the maximum allowed (%d)." % (
len(job),
self.rconf[RuntimeConfiguration.JOB_MAX_TASKS]
))
return
self.log(u"Checking that each Task text file is well formed")
for task in job.tasks:
self.log([u"Checking Task text file '%s'", task.text_file_path])
text_file_bstring = container.read_entry(task.text_file_path)
if (text_file_bstring is None) or (len(text_file_bstring) == 0):
self._failed(u"Text file '%s' is empty" % task.text_file_path)
return
self._check_utf8_encoding(text_file_bstring)
if not self.result.passed:
self._failed(u"Text file '%s' is not encoded in UTF-8" % task.text_file_path)
return
self._check_not_empty(text_file_bstring)
if not self.result.passed:
self._failed(u"Text file '%s' is empty" % task.text_file_path)
return
self.log([u"Checking Task text file '%s': passed", task.text_file_path])
self.log(u"Checking each Task text file is well formed: passed")
class ValidatorResult(object):
"""
A structure to contain the result of a validation.
"""
TAG = u"ValidatorResult"
def __init__(self):
self.passed = True
self.warnings = []
self.errors = []
def __unicode__(self):
msg = [
u"Passed: %s" % self.passed,
self.pretty_print(warnings=True)
]
return u"\n".join(msg)
def __str__(self):
return gf.safe_str(self.__unicode__())
def pretty_print(self, warnings=False):
"""
Pretty print warnings and errors.
:param bool warnings: if ``True``, also print warnings.
:rtype: string
"""
msg = []
if (warnings) and (len(self.warnings) > 0):
msg.append(u"Warnings:")
for warning in self.warnings:
msg.append(u" %s" % warning)
if len(self.errors) > 0:
msg.append(u"Errors:")
for error in self.errors:
msg.append(u" %s" % error)
return u"\n".join(msg)
@property
def passed(self):
"""
The result of a validation.
Return ``True`` if passed, possibly with emitted warnings.
Return ``False`` if not passed, that is, at least one error emitted.
:rtype: bool
"""
return self.__passed
@passed.setter
def passed(self, passed):
self.__passed = passed
@property
def warnings(self):
"""
The list of emitted warnings.
:rtype: list of strings
"""
return self.__warnings
@warnings.setter
def warnings(self, warnings):
self.__warnings = warnings
@property
def errors(self):
"""
The list of emitted errors.
:rtype: list of strings
"""
return self.__errors
@errors.setter
def errors(self, errors):
self.__errors = errors
def add_warning(self, message):
"""
Add a message to the warnings.
:param string message: the message to be added
"""
self.warnings.append(message)
def add_error(self, message):
"""
Add a message to the errors.
:param string message: the message to be added
"""
self.errors.append(message)
| danielbair/aeneas | aeneas/validator.py | Python | agpl-3.0 | 28,502 | 0.001193 |
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import re
import subprocess
import utils
def _list_mounts():
logging.debug('listing mounts...')
seen_targets = set()
mounts = []
with open('/proc/mounts', 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split()
if len(parts) < 4:
continue
target = parts[0]
mount_point = parts[1]
fstype = parts[2]
opts = parts[3]
if not os.access(mount_point, os.W_OK):
continue
if target in seen_targets:
continue # probably a bind mount
seen_targets.add(target)
if fstype == 'fuseblk':
fstype = 'ntfs' # most likely
logging.debug('found mount "%s" at "%s"' % (target, mount_point))
mounts.append({
'target': target,
'mount_point': mount_point,
'fstype': fstype,
'opts': opts,
})
return mounts
def _list_disks():
if os.path.exists('/dev/disk/by-id/'):
return _list_disks_dev_by_id()
else: # fall back to fdisk -l
return _list_disks_fdisk()
def _list_disks_dev_by_id():
logging.debug('listing disks using /dev/disk/by-id/')
disks_by_dev = {}
partitions_by_dev = {}
for entry in os.listdir('/dev/disk/by-id/'):
parts = entry.split('-', 1)
if len(parts) < 2:
continue
target = os.path.realpath(os.path.join('/dev/disk/by-id/', entry))
bus, entry = parts
m = re.search('-part(\d+)$', entry)
if m:
part_no = int(m.group(1))
entry = re.sub('-part\d+$', '', entry)
else:
part_no = None
parts = entry.split('_')
if len(parts) < 2:
vendor = parts[0]
model = ''
else:
vendor, model = parts[:2]
if part_no is not None:
logging.debug('found partition "%s" at "%s" on bus "%s": "%s %s"' % (part_no, target, bus, vendor, model))
partitions_by_dev[target] = {
'target': target,
'bus': bus,
'vendor': vendor,
'model': model,
'part_no': part_no,
'unmatched': True
}
else:
logging.debug('found disk at "%s" on bus "%s": "%s %s"' % (target, bus, vendor, model))
disks_by_dev[target] = {
'target': target,
'bus': bus,
'vendor': vendor,
'model': model,
'partitions': []
}
# group partitions by disk
for dev, partition in partitions_by_dev.items():
for disk_dev, disk in disks_by_dev.items():
if dev.startswith(disk_dev):
disk['partitions'].append(partition)
partition.pop('unmatched', None)
# add separate partitions that did not match any disk
for partition in partitions_by_dev.values():
if partition.pop('unmatched', False):
disks_by_dev[partition['target']] = partition
partition['partitions'] = [dict(partition)]
# prepare flat list of disks
disks = disks_by_dev.values()
disks.sort(key=lambda d: d['vendor'])
for disk in disks:
disk['partitions'].sort(key=lambda p: p['part_no'])
return disks
def _list_disks_fdisk():
try:
output = subprocess.check_output(['fdisk', '-l'], stderr=utils.DEV_NULL)
except Exception as e:
logging.error('failed to list disks using "fdisk -l": %s' % e, exc_info=True)
return []
disks = []
disk = None
def add_disk(d):
logging.debug('found disk at "%s" on bus "%s": "%s %s"' %
(d['target'], d['bus'], d['vendor'], d['model']))
for part in d['partitions']:
logging.debug('found partition "%s" at "%s" on bus "%s": "%s %s"' %
(part['part_no'], part['target'], part['bus'], part['vendor'], part['model']))
disks.append(d)
for line in output.split('\n'):
line = line.replace('*', '')
line = re.sub('\s+', ' ', line.strip())
if not line:
continue
if line.startswith('Disk /dev/'):
if disk and disk['partitions']:
add_disk(disk)
parts = line.split()
disk = {
'target': parts[1].strip(':'),
'bus': '',
'vendor': '',
'model': parts[2] + ' ' + parts[3].strip(','),
'partitions': []
}
elif line.startswith('/dev/') and disk:
parts = line.split()
part_no = re.findall('\d+$', parts[0])
partition = {
'part_no': int(part_no[0]) if part_no else None,
'target': parts[0],
'bus': '',
'vendor': '',
'model': parts[4] + ' ' + ' '.join(parts[6:]),
}
disk['partitions'].append(partition)
if disk and disk['partitions']:
add_disk(disk)
disks.sort(key=lambda d: d['target'])
for disk in disks:
disk['partitions'].sort(key=lambda p: p['part_no'])
return disks
def list_mounted_disks():
mounted_disks = []
try:
disks = _list_disks()
mounts_by_target = dict((m['target'], m) for m in _list_mounts())
for disk in disks:
for partition in disk['partitions']:
mount = mounts_by_target.get(partition['target'])
if mount:
partition.update(mount)
# filter out unmounted partitions
disk['partitions'] = [p for p in disk['partitions'] if p.get('mount_point')]
# filter out unmounted disks
mounted_disks = [d for d in disks if d['partitions']]
except Exception as e:
logging.error('failed to list mounted disks: %s' % e, exc_info=True)
return mounted_disks
def list_mounted_partitions():
mounted_partitions = {}
try:
disks = _list_disks()
mounts_by_target = dict((m['target'], m) for m in _list_mounts())
for disk in disks:
for partition in disk['partitions']:
mount = mounts_by_target.get(partition['target'])
if mount:
partition.update(mount)
mounted_partitions[partition['target']] = partition
except Exception as e:
logging.error('failed to list mounted partitions: %s' % e, exc_info=True)
return mounted_partitions
| ccrisan/motioneye | motioneye/diskctl.py | Python | gpl-3.0 | 7,702 | 0.006492 |
from ._compat import PY2, filename_to_ui, get_text_stderr
from .utils import echo
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception
exit_code = 1
def __init__(self, message):
if PY2:
if message is not None:
message = message.encode('utf-8')
Exception.__init__(self, message)
self.message = message
def format_message(self):
return self.message
def show(self, file=None):
if file is None:
file = get_text_stderr()
echo('Error: %s' % self.format_message(), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message, ctx=None):
ClickException.__init__(self, message)
self.ctx = ctx
def show(self, file=None):
if file is None:
file = get_text_stderr()
color = None
if self.ctx is not None:
color = self.ctx.color
echo(self.ctx.get_usage() + '\n', file=file, color=color)
echo('Error: %s' % self.format_message(), file=file, color=color)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(self, message, ctx=None, param=None,
param_hint=None):
UsageError.__init__(self, message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.opts or [self.param.human_readable_name]
else:
return 'Invalid value: %s' % self.message
if isinstance(param_hint, (tuple, list)):
param_hint = ' / '.join('"%s"' % x for x in param_hint)
return 'Invalid value for %s: %s' % (param_hint, self.message)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(self, message=None, ctx=None, param=None,
param_hint=None, param_type=None):
BadParameter.__init__(self, message, ctx, param, param_hint)
self.param_type = param_type
def format_message(self):
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.opts or [self.param.human_readable_name]
else:
param_hint = None
if isinstance(param_hint, (tuple, list)):
param_hint = ' / '.join('"%s"' % x for x in param_hint)
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += '. ' + msg_extra
else:
msg = msg_extra
return 'Missing %s%s%s%s' % (
param_type,
param_hint and ' %s' % param_hint or '',
msg and '. ' or '.',
msg or '',
)
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(self, option_name, message=None, possibilities=None,
ctx=None):
if message is None:
message = 'no such option: %s' % option_name
UsageError.__init__(self, message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self):
bits = [self.message]
if self.possibilities:
if len(self.possibilities) == 1:
bits.append('Did you mean %s?' % self.possibilities[0])
else:
possibilities = sorted(self.possibilities)
bits.append('(Possible options: %s)' % ', '.join(possibilities))
return ' '.join(bits)
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
"""
def __init__(self, option_name, message, ctx=None):
UsageError.__init__(self, message, ctx)
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename, hint=None):
ui_filename = filename_to_ui(filename)
if hint is None:
hint = 'unknown error'
ClickException.__init__(self, hint)
self.ui_filename = ui_filename
self.filename = filename
def format_message(self):
return 'Could not open file %s: %s' % (self.ui_filename, self.message)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
| sourlows/rating-cruncher | src/lib/click/exceptions.py | Python | apache-2.0 | 6,390 | 0.000156 |
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = CharField(_("Name of User"), blank=True, max_length=255)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| mistalaba/cookiecutter-django | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/models.py | Python | bsd-3-clause | 466 | 0 |
#! /usr/bin/python
'''
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
For example,
1
/ \
2 3
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Return the sum = 12 + 13 = 25.
'''
from node_struct import TreeNode
class Solution:
def leafNode(self, root):
if not root.left and not root.right:
return True
return False
def inOrderTraversal(self, root, currentPath, path):
if not root:
return
# visit()
currentPath = 10 * currentPath + root.val
if self.leafNode(root):
path.append(currentPath)
else:
self.inOrderTraversal(root.left, currentPath, path)
self.inOrderTraversal(root.right, currentPath, path)
# @param root, a tree node
# @return an integer
def sumNumbers(self, root):
path = list()
self.inOrderTraversal(root, 0, path)
return sum(path)
if __name__ == '__main__':
solution = Solution()
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.right = TreeNode(4)
root.left.left = TreeNode(5)
print solution.sumNumbers(root)
print solution.sumNumbers(None)
| shub0/algorithm-data-structure | python/sum_roof_to_leaf.py | Python | bsd-3-clause | 1,428 | 0.002101 |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
from jrunner.common import *
from oslo.config import cfg
opts = [
cfg.StrOpt(
'worker_module',
default='jrunner.jobqueue.workers.simple',
help='Worker module'),
]
CONF = cfg.CONF
CONF.register_opts(opts, 'jobqueue')
def get_worker_module():
try:
return importlib.import_module(CONF.jobqueue.worker_module)
except Exception as err:
LOG.exception(err)
raise Exception("Failed to import worker module")
| gabriel-samfira/jrunner | jrunner/jobqueue/workers/__init__.py | Python | apache-2.0 | 1,084 | 0 |
"""
Django settings for mysite2 project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f@d3+wz7y8uj!+alcvc!6du++db!-3jh6=vr(%z(e^2n5_fml-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myauthen',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| wasit7/PythonDay | django/mysite2/mysite2/settings.py | Python | bsd-3-clause | 3,183 | 0.001257 |
# -*- coding: utf-8 -*-
"""Converts an IRI to a URI."""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
import urllib.parse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF),
(0xE000, 0xF8FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % o for o in c.encode("utf-8")])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri, str):
(scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri)
authority = authority.encode("idna").decode("utf-8")
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
"ftp://ftp.is.co.za/rfc/rfc1808.txt",
"http://www.ietf.org/rfc/rfc2396.txt",
"ldap://[2001:db8::7]/c=GB?objectClass?one",
"mailto:John.Doe@example.com",
"news:comp.infosystems.www.servers.unix",
"tel:+1-816-555-1212",
"telnet://192.0.2.16:80/",
"urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
"""Test that the right type of escaping is done for each part of the URI."""
self.assertEqual(
"http://xn--o3h.com/%E2%98%84",
iri2uri("http://\N{COMET}.com/\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/?fred=%E2%98%84",
iri2uri("http://bitworking.org/?fred=\N{COMET}"),
)
self.assertEqual(
"http://bitworking.org/#%E2%98%84",
iri2uri("http://bitworking.org/#\N{COMET}"),
)
self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}"))
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"),
)
self.assertEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")),
)
self.assertNotEqual(
"/fred?bar=%E2%98%9A#%E2%98%84",
iri2uri(
"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8")
),
)
unittest.main()
| endlessm/chromium-browser | tools/swarming_client/third_party/httplib2/python3/httplib2/iri2uri.py | Python | bsd-3-clause | 4,153 | 0.000963 |
from django.contrib import admin
from .models import Gallery, Photo
class PhotoInline(admin.StackedInline):
model = Photo
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PhotoInline]
admin.site.register(Gallery, GalleryAdmin)
| gygcnc/gygcnc | gygcnc/image_gallery/admin.py | Python | bsd-3-clause | 255 | 0.003922 |
import os
import platform
import subprocess
class SoundPlayer:
"""Simple audio file player, invokes afplay on macs, mpg123 otherwise."""
def __init__(self):
self.basedir = os.path.dirname(__file__)
self.sounds = {
'startup': 'run.mp3',
'shutdown': 'quit.mp3',
'run': 'run_command.mp3',
'red': 'red.mp3',
'green': 'green.mp3'
}
self.player = 'mpg123'
if platform.system() == 'Darwin':
self.player = 'afplay'
def play(self, name):
if name not in self.sounds:
print 'sound "%s" not found in mapping' % name
sound_file = os.path.join(self.basedir, 'sounds', self.sounds[name])
subprocess.call([self.player, sound_file], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
class BaseNotifier(object):
"Notifiers need only to implement notify"
def notify(self, event):
raise NotImplementedError()
class TextNotifier(object):
"Basic text notifier"
def notify(self, event):
print 'Notify: ', event
class SoundNotifier(object):
"Simple notifier that uses SoundPlayer"
def __init__(self):
self.player = SoundPlayer()
def notify(self, event):
print 'mutter: %s' % event
if event in self.player.sounds:
self.player.play(event)
| tmc/mutter | mutter/notifiers.py | Python | bsd-2-clause | 1,358 | 0.003682 |
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
int_or_none,
parse_iso8601,
strip_or_none,
)
class ToggleIE(InfoExtractor):
IE_NAME = 'toggle'
_VALID_URL = r'(?:https?://(?:(?:www\.)?mewatch|video\.toggle)\.sg/(?:en|zh)/(?:[^/]+/){2,}|toggle:)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.mewatch.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115',
'info_dict': {
'id': '343115',
'ext': 'mp4',
'title': 'Lion Moms Premiere',
'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b',
'upload_date': '20150910',
'timestamp': 1441858274,
},
'params': {
'skip_download': 'm3u8 download',
}
}, {
'note': 'DRM-protected video',
'url': 'http://www.mewatch.sg/en/movies/dug-s-special-mission/341413',
'info_dict': {
'id': '341413',
'ext': 'wvm',
'title': 'Dug\'s Special Mission',
'description': 'md5:e86c6f4458214905c1772398fabc93e0',
'upload_date': '20150827',
'timestamp': 1440644006,
},
'params': {
'skip_download': 'DRM-protected wvm download',
}
}, {
# this also tests correct video id extraction
'note': 'm3u8 links are geo-restricted, but Android/mp4 is okay',
'url': 'http://www.mewatch.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861',
'info_dict': {
'id': '332861',
'ext': 'mp4',
'title': '28th SEA Games (5 Show) - Episode 11',
'description': 'md5:3cd4f5f56c7c3b1340c50a863f896faa',
'upload_date': '20150605',
'timestamp': 1433480166,
},
'params': {
'skip_download': 'DRM-protected wvm download',
},
'skip': 'm3u8 links are geo-restricted'
}, {
'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/zh/series/zero-calling-s2-hd/ep13/336367',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/movies/seven-days/321936',
'only_matching': True,
}, {
'url': 'https://www.mewatch.sg/en/tv-show/news/may-2017-cna-singapore-tonight/fri-19-may-2017/512456',
'only_matching': True,
}, {
'url': 'http://www.mewatch.sg/en/channels/eleven-plus/401585',
'only_matching': True,
}]
_API_USER = 'tvpapi_147'
_API_PASS = '11111'
def _real_extract(self, url):
video_id = self._match_id(url)
params = {
'initObj': {
'Locale': {
'LocaleLanguage': '',
'LocaleCountry': '',
'LocaleDevice': '',
'LocaleUserState': 0
},
'Platform': 0,
'SiteGuid': 0,
'DomainID': '0',
'UDID': '',
'ApiUser': self._API_USER,
'ApiPass': self._API_PASS
},
'MediaID': video_id,
'mediaType': 0,
}
info = self._download_json(
'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo',
video_id, 'Downloading video info json', data=json.dumps(params).encode('utf-8'))
title = info['MediaName']
formats = []
for video_file in info.get('Files', []):
video_url, vid_format = video_file.get('URL'), video_file.get('Format')
if not video_url or video_url == 'NA' or not vid_format:
continue
ext = determine_ext(video_url)
vid_format = vid_format.replace(' ', '')
# if geo-restricted, m3u8 is inaccessible, but mp4 is okay
if ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, ext='mp4', m3u8_id=vid_format,
note='Downloading %s m3u8 information' % vid_format,
errnote='Failed to download %s m3u8 information' % vid_format,
fatal=False)
for f in m3u8_formats:
# Apple FairPlay Streaming
if '/fpshls/' in f['url']:
continue
formats.append(f)
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
video_url, video_id, mpd_id=vid_format,
note='Downloading %s MPD manifest' % vid_format,
errnote='Failed to download %s MPD manifest' % vid_format,
fatal=False))
elif ext == 'ism':
formats.extend(self._extract_ism_formats(
video_url, video_id, ism_id=vid_format,
note='Downloading %s ISM manifest' % vid_format,
errnote='Failed to download %s ISM manifest' % vid_format,
fatal=False))
elif ext == 'mp4':
formats.append({
'ext': ext,
'url': video_url,
'format_id': vid_format,
})
if not formats:
for meta in (info.get('Metas') or []):
if meta.get('Key') == 'Encryption' and meta.get('Value') == '1':
raise ExtractorError(
'This video is DRM protected.', expected=True)
# Most likely because geo-blocked
raise ExtractorError('No downloadable videos found', expected=True)
self._sort_formats(formats)
thumbnails = []
for picture in info.get('Pictures', []):
if not isinstance(picture, dict):
continue
pic_url = picture.get('URL')
if not pic_url:
continue
thumbnail = {
'url': pic_url,
}
pic_size = picture.get('PicSize', '')
m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size)
if m:
thumbnail.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
thumbnails.append(thumbnail)
def counter(prefix):
return int_or_none(
info.get(prefix + 'Counter') or info.get(prefix.lower() + '_counter'))
return {
'id': video_id,
'title': title,
'description': strip_or_none(info.get('Description')),
'duration': int_or_none(info.get('Duration')),
'timestamp': parse_iso8601(info.get('CreationDate') or None),
'average_rating': float_or_none(info.get('Rating')),
'view_count': counter('View'),
'like_count': counter('Like'),
'thumbnails': thumbnails,
'formats': formats,
}
class MeWatchIE(InfoExtractor):
IE_NAME = 'mewatch'
_VALID_URL = r'https?://(?:(?:www|live)\.)?mewatch\.sg/watch/[^/?#&]+-(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.mewatch.sg/watch/Recipe-Of-Life-E1-179371',
'info_dict': {
'id': '1008625',
'ext': 'mp4',
'title': 'Recipe Of Life 味之道',
'timestamp': 1603306526,
'description': 'md5:6e88cde8af2068444fc8e1bc3ebf257c',
'upload_date': '20201021',
},
'params': {
'skip_download': 'm3u8 download',
},
}, {
'url': 'https://www.mewatch.sg/watch/Little-Red-Dot-Detectives-S2-搜密。打卡。小红点-S2-E1-176232',
'only_matching': True,
}, {
'url': 'https://www.mewatch.sg/watch/Little-Red-Dot-Detectives-S2-%E6%90%9C%E5%AF%86%E3%80%82%E6%89%93%E5%8D%A1%E3%80%82%E5%B0%8F%E7%BA%A2%E7%82%B9-S2-E1-176232',
'only_matching': True,
}, {
'url': 'https://live.mewatch.sg/watch/Recipe-Of-Life-E41-189759',
'only_matching': True,
}]
def _real_extract(self, url):
item_id = self._match_id(url)
custom_id = self._download_json(
'https://cdn.mewatch.sg/api/items/' + item_id,
item_id, query={'segments': 'all'})['customId']
return self.url_result(
'toggle:' + custom_id, ToggleIE.ie_key(), custom_id)
| rg3/youtube-dl | youtube_dl/extractor/toggle.py | Python | unlicense | 8,970 | 0.0019 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
import boto3
from moto import mock_s3
from airflow.providers.amazon.aws.operators.s3 import S3DeleteObjectsOperator
class TestS3DeleteObjectsOperator(unittest.TestCase):
@mock_s3
def test_s3_delete_single_object(self):
bucket = "testbucket"
key = "path/data.txt"
conn = boto3.client('s3')
conn.create_bucket(Bucket=bucket)
conn.upload_fileobj(Bucket=bucket, Key=key, Fileobj=io.BytesIO(b"input"))
# The object should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key)
assert len(objects_in_dest_bucket['Contents']) == 1
assert objects_in_dest_bucket['Contents'][0]['Key'] == key
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_single_object", bucket=bucket, keys=key)
op.execute(None)
# There should be no object found in the bucket created earlier
assert 'Contents' not in conn.list_objects(Bucket=bucket, Prefix=key)
@mock_s3
def test_s3_delete_multiple_objects(self):
bucket = "testbucket"
key_pattern = "path/data"
n_keys = 3
keys = [key_pattern + str(i) for i in range(n_keys)]
conn = boto3.client('s3')
conn.create_bucket(Bucket=bucket)
for k in keys:
conn.upload_fileobj(Bucket=bucket, Key=k, Fileobj=io.BytesIO(b"input"))
# The objects should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_pattern)
assert len(objects_in_dest_bucket['Contents']) == n_keys
assert sorted(x['Key'] for x in objects_in_dest_bucket['Contents']) == sorted(keys)
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_multiple_objects", bucket=bucket, keys=keys)
op.execute(None)
# There should be no object found in the bucket created earlier
assert 'Contents' not in conn.list_objects(Bucket=bucket, Prefix=key_pattern)
@mock_s3
def test_s3_delete_prefix(self):
bucket = "testbucket"
key_pattern = "path/data"
n_keys = 3
keys = [key_pattern + str(i) for i in range(n_keys)]
conn = boto3.client('s3')
conn.create_bucket(Bucket=bucket)
for k in keys:
conn.upload_fileobj(Bucket=bucket, Key=k, Fileobj=io.BytesIO(b"input"))
# The objects should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_pattern)
assert len(objects_in_dest_bucket['Contents']) == n_keys
assert sorted(x['Key'] for x in objects_in_dest_bucket['Contents']) == sorted(keys)
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_prefix", bucket=bucket, prefix=key_pattern)
op.execute(None)
# There should be no object found in the bucket created earlier
assert 'Contents' not in conn.list_objects(Bucket=bucket, Prefix=key_pattern)
| Acehaidrey/incubator-airflow | tests/providers/amazon/aws/operators/test_s3_delete_objects.py | Python | apache-2.0 | 3,825 | 0.003137 |
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors
| piratecb/up1and | app/main/__init__.py | Python | mit | 98 | 0.010204 |
from io import BytesIO
class PreDownload(BytesIO):
def __init__(self, initial_bytes, url):
super(PreDownload, self).__init__(initial_bytes)
if not url:
raise ValueError('url must be provided')
self.url = url
| JohnVinyard/zounds | zounds/datasets/predownload.py | Python | mit | 250 | 0 |
from flask import Flask, request, redirect, url_for
app = Flask(__name__)
logged_in = False
LOGIN_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
<form method="POST" action="">
<input type="text" name="username">
<input type="password" name="password">
<input type="submit" value="submit">
</form>
</body>
</html>
"""
@app.route("/")
def index():
return "hello world"
@app.route("/two")
def two():
return "two"
@app.route("/login", methods=["GET", "POST"])
def login():
global logged_in
if logged_in:
return redirect(url_for("mypage"))
if request.method == "GET":
return LOGIN_TEMPLATE
if not request.form.get("username") or not request.form.get("password"):
return LOGIN_TEMPLATE
logged_in = True
return "logged in"
@app.route("/mypage")
def mypage():
global logged_in
if not logged_in:
return redirect(url_for("login"))
return "mypage"
@app.route("/logout")
def logout():
global logged_in
if not logged_in:
return redirect(url_for("login"))
logged_in = False
return "logout"
def main():
app.run(debug=True)
if __name__ == "__main__":
main()
| altnight/individual-sandbox | diary/20171022/sample/auth_sample/server.py | Python | apache-2.0 | 1,243 | 0.006436 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'options_dialog_base.ui'
#
# Created: Mon Feb 17 11:50:09 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_OptionsDialogBase(object):
def setupUi(self, OptionsDialogBase):
OptionsDialogBase.setObjectName(_fromUtf8("OptionsDialogBase"))
OptionsDialogBase.resize(683, 453)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/inasafe/icon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
OptionsDialogBase.setWindowIcon(icon)
self.gridLayout_2 = QtGui.QGridLayout(OptionsDialogBase)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.buttonBox = QtGui.QDialogButtonBox(OptionsDialogBase)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Help|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 0, 1, 1)
self.tabWidget = QtGui.QTabWidget(OptionsDialogBase)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_basic = QtGui.QWidget()
self.tab_basic.setObjectName(_fromUtf8("tab_basic"))
self.gridLayout_4 = QtGui.QGridLayout(self.tab_basic)
self.gridLayout_4.setMargin(0)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.scrollArea = QtGui.QScrollArea(self.tab_basic)
self.scrollArea.setFrameShape(QtGui.QFrame.NoFrame)
self.scrollArea.setFrameShadow(QtGui.QFrame.Sunken)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 638, 454))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.gridLayout = QtGui.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.cbxVisibleLayersOnly = QtGui.QCheckBox(self.scrollAreaWidgetContents)
self.cbxVisibleLayersOnly.setObjectName(_fromUtf8("cbxVisibleLayersOnly"))
self.gridLayout.addWidget(self.cbxVisibleLayersOnly, 0, 0, 1, 1)
self.cbxSetLayerNameFromTitle = QtGui.QCheckBox(self.scrollAreaWidgetContents)
self.cbxSetLayerNameFromTitle.setEnabled(True)
self.cbxSetLayerNameFromTitle.setObjectName(_fromUtf8("cbxSetLayerNameFromTitle"))
self.gridLayout.addWidget(self.cbxSetLayerNameFromTitle, 1, 0, 1, 1)
self.cbxZoomToImpact = QtGui.QCheckBox(self.scrollAreaWidgetContents)
self.cbxZoomToImpact.setEnabled(True)
self.cbxZoomToImpact.setObjectName(_fromUtf8("cbxZoomToImpact"))
self.gridLayout.addWidget(self.cbxZoomToImpact, 2, 0, 1, 1)
self.cbxHideExposure = QtGui.QCheckBox(self.scrollAreaWidgetContents)
self.cbxHideExposure.setEnabled(True)
self.cbxHideExposure.setObjectName(_fromUtf8("cbxHideExposure"))
self.gridLayout.addWidget(self.cbxHideExposure, 3, 0, 1, 1)
self.cbxClipToViewport = QtGui.QCheckBox(self.scrollAreaWidgetContents)
self.cbxClipToViewport.setChecked(False)
self.cbxClipToViewport.setObjectName(_fromUtf8("cbxClipToViewport"))
self.gridLayout.addWidget(self.cbxClipToViewport, 4, 0, 1, 1)
self.cbxClipHard = QtGui.QCheckBox(self.scrollAreaWidgetContents)
self.cbxClipHard.setObjectName(_fromUtf8("cbxClipHard"))
self.gridLayout.addWidget(self.cbxClipHard, 5, 0, 1, 1)
self.cbxShowPostprocessingLayers = QtGui.QCheckBox(self.scrollAreaWidgetContents)
self.cbxShowPostprocessingLayers.setObjectName(_fromUtf8("cbxShowPostprocessingLayers"))
self.gridLayout.addWidget(self.cbxShowPostprocessingLayers, 6, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_6 = QtGui.QLabel(self.scrollAreaWidgetContents)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout.addWidget(self.label_6)
self.dsbFemaleRatioDefault = QtGui.QDoubleSpinBox(self.scrollAreaWidgetContents)
self.dsbFemaleRatioDefault.setAccelerated(True)
self.dsbFemaleRatioDefault.setMaximum(1.0)
self.dsbFemaleRatioDefault.setSingleStep(0.01)
self.dsbFemaleRatioDefault.setObjectName(_fromUtf8("dsbFemaleRatioDefault"))
self.horizontalLayout.addWidget(self.dsbFemaleRatioDefault)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.gridLayout.addLayout(self.horizontalLayout, 7, 0, 1, 1)
self.grpNotImplemented = QtGui.QGroupBox(self.scrollAreaWidgetContents)
self.grpNotImplemented.setObjectName(_fromUtf8("grpNotImplemented"))
self.gridLayout_3 = QtGui.QGridLayout(self.grpNotImplemented)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.lineEdit_4 = QtGui.QLineEdit(self.grpNotImplemented)
self.lineEdit_4.setEnabled(True)
self.lineEdit_4.setObjectName(_fromUtf8("lineEdit_4"))
self.horizontalLayout_4.addWidget(self.lineEdit_4)
self.toolButton_4 = QtGui.QToolButton(self.grpNotImplemented)
self.toolButton_4.setEnabled(True)
self.toolButton_4.setObjectName(_fromUtf8("toolButton_4"))
self.horizontalLayout_4.addWidget(self.toolButton_4)
self.gridLayout_3.addLayout(self.horizontalLayout_4, 8, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.grpNotImplemented)
self.label_4.setEnabled(True)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_3.addWidget(self.label_4, 7, 0, 1, 1)
self.cbxBubbleLayersUp = QtGui.QCheckBox(self.grpNotImplemented)
self.cbxBubbleLayersUp.setEnabled(True)
self.cbxBubbleLayersUp.setObjectName(_fromUtf8("cbxBubbleLayersUp"))
self.gridLayout_3.addWidget(self.cbxBubbleLayersUp, 0, 0, 1, 1)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_5 = QtGui.QLabel(self.grpNotImplemented)
self.label_5.setEnabled(True)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_5.addWidget(self.label_5)
self.spinBox = QtGui.QSpinBox(self.grpNotImplemented)
self.spinBox.setEnabled(True)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.horizontalLayout_5.addWidget(self.spinBox)
self.gridLayout_3.addLayout(self.horizontalLayout_5, 9, 0, 1, 1)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.lineEdit = QtGui.QLineEdit(self.grpNotImplemented)
self.lineEdit.setEnabled(True)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.horizontalLayout_2.addWidget(self.lineEdit)
self.toolButton = QtGui.QToolButton(self.grpNotImplemented)
self.toolButton.setEnabled(True)
self.toolButton.setObjectName(_fromUtf8("toolButton"))
self.horizontalLayout_2.addWidget(self.toolButton)
self.gridLayout_3.addLayout(self.horizontalLayout_2, 2, 0, 1, 1)
self.label = QtGui.QLabel(self.grpNotImplemented)
self.label.setEnabled(True)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_3.addWidget(self.label, 1, 0, 1, 1)
self.cbxUseThread = QtGui.QCheckBox(self.grpNotImplemented)
self.cbxUseThread.setObjectName(_fromUtf8("cbxUseThread"))
self.gridLayout_3.addWidget(self.cbxUseThread, 10, 0, 1, 1)
self.gridLayout.addWidget(self.grpNotImplemented, 8, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem1, 9, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout_4.addWidget(self.scrollArea, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_basic, _fromUtf8(""))
self.tab_templates = QtGui.QWidget()
self.tab_templates.setObjectName(_fromUtf8("tab_templates"))
self.gridLayout_5 = QtGui.QGridLayout(self.tab_templates)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.lblOrganisationLogo = QtGui.QLabel(self.tab_templates)
self.lblOrganisationLogo.setEnabled(True)
self.lblOrganisationLogo.setObjectName(_fromUtf8("lblOrganisationLogo"))
self.gridLayout_5.addWidget(self.lblOrganisationLogo, 0, 0, 1, 1)
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.leOrgLogoPath = QtGui.QLineEdit(self.tab_templates)
self.leOrgLogoPath.setEnabled(True)
self.leOrgLogoPath.setObjectName(_fromUtf8("leOrgLogoPath"))
self.horizontalLayout_9.addWidget(self.leOrgLogoPath)
self.toolOrgLogoPath = QtGui.QToolButton(self.tab_templates)
self.toolOrgLogoPath.setEnabled(True)
self.toolOrgLogoPath.setObjectName(_fromUtf8("toolOrgLogoPath"))
self.horizontalLayout_9.addWidget(self.toolOrgLogoPath)
self.gridLayout_5.addLayout(self.horizontalLayout_9, 1, 0, 1, 1)
self.lblNorthArrowPath = QtGui.QLabel(self.tab_templates)
self.lblNorthArrowPath.setEnabled(True)
self.lblNorthArrowPath.setObjectName(_fromUtf8("lblNorthArrowPath"))
self.gridLayout_5.addWidget(self.lblNorthArrowPath, 2, 0, 1, 1)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.leNorthArrowPath = QtGui.QLineEdit(self.tab_templates)
self.leNorthArrowPath.setEnabled(True)
self.leNorthArrowPath.setObjectName(_fromUtf8("leNorthArrowPath"))
self.horizontalLayout_8.addWidget(self.leNorthArrowPath)
self.toolNorthArrowPath = QtGui.QToolButton(self.tab_templates)
self.toolNorthArrowPath.setEnabled(True)
self.toolNorthArrowPath.setObjectName(_fromUtf8("toolNorthArrowPath"))
self.horizontalLayout_8.addWidget(self.toolNorthArrowPath)
self.gridLayout_5.addLayout(self.horizontalLayout_8, 3, 0, 1, 1)
self.lblReportTemplate = QtGui.QLabel(self.tab_templates)
self.lblReportTemplate.setEnabled(True)
self.lblReportTemplate.setObjectName(_fromUtf8("lblReportTemplate"))
self.gridLayout_5.addWidget(self.lblReportTemplate, 4, 0, 1, 1)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.leReportTemplatePath = QtGui.QLineEdit(self.tab_templates)
self.leReportTemplatePath.setEnabled(True)
self.leReportTemplatePath.setObjectName(_fromUtf8("leReportTemplatePath"))
self.horizontalLayout_10.addWidget(self.leReportTemplatePath)
self.toolReportTemplatePath = QtGui.QToolButton(self.tab_templates)
self.toolReportTemplatePath.setEnabled(True)
self.toolReportTemplatePath.setObjectName(_fromUtf8("toolReportTemplatePath"))
self.horizontalLayout_10.addWidget(self.toolReportTemplatePath)
self.gridLayout_5.addLayout(self.horizontalLayout_10, 5, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.tab_templates)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_5.addWidget(self.label_2, 6, 0, 1, 1)
self.txtDisclaimer = QtGui.QPlainTextEdit(self.tab_templates)
self.txtDisclaimer.setObjectName(_fromUtf8("txtDisclaimer"))
self.gridLayout_5.addWidget(self.txtDisclaimer, 7, 0, 1, 1)
self.tabWidget.addTab(self.tab_templates, _fromUtf8(""))
self.tab_advanced = QtGui.QWidget()
self.tab_advanced.setObjectName(_fromUtf8("tab_advanced"))
self.gridLayout_6 = QtGui.QGridLayout(self.tab_advanced)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.lblKeywordCache = QtGui.QLabel(self.tab_advanced)
self.lblKeywordCache.setEnabled(True)
self.lblKeywordCache.setObjectName(_fromUtf8("lblKeywordCache"))
self.gridLayout_6.addWidget(self.lblKeywordCache, 0, 0, 1, 1)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.leKeywordCachePath = QtGui.QLineEdit(self.tab_advanced)
self.leKeywordCachePath.setEnabled(True)
self.leKeywordCachePath.setObjectName(_fromUtf8("leKeywordCachePath"))
self.horizontalLayout_6.addWidget(self.leKeywordCachePath)
self.toolKeywordCachePath = QtGui.QToolButton(self.tab_advanced)
self.toolKeywordCachePath.setEnabled(True)
self.toolKeywordCachePath.setObjectName(_fromUtf8("toolKeywordCachePath"))
self.horizontalLayout_6.addWidget(self.toolKeywordCachePath)
self.gridLayout_6.addLayout(self.horizontalLayout_6, 1, 0, 1, 1)
self.cbxUseSentry = QtGui.QCheckBox(self.tab_advanced)
self.cbxUseSentry.setObjectName(_fromUtf8("cbxUseSentry"))
self.gridLayout_6.addWidget(self.cbxUseSentry, 2, 0, 1, 1)
self.textBrowser = QtGui.QTextBrowser(self.tab_advanced)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textBrowser.sizePolicy().hasHeightForWidth())
self.textBrowser.setSizePolicy(sizePolicy)
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.gridLayout_6.addWidget(self.textBrowser, 3, 0, 1, 1)
self.cbxDevMode = QtGui.QCheckBox(self.tab_advanced)
self.cbxDevMode.setObjectName(_fromUtf8("cbxDevMode"))
self.gridLayout_6.addWidget(self.cbxDevMode, 4, 0, 1, 1)
self.cbxNativeZonalStats = QtGui.QCheckBox(self.tab_advanced)
self.cbxNativeZonalStats.setObjectName(_fromUtf8("cbxNativeZonalStats"))
self.gridLayout_6.addWidget(self.cbxNativeZonalStats, 5, 0, 1, 1)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem2, 6, 0, 1, 1)
self.tabWidget.addTab(self.tab_advanced, _fromUtf8(""))
self.gridLayout_2.addWidget(self.tabWidget, 0, 0, 1, 1)
self.retranslateUi(OptionsDialogBase)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), OptionsDialogBase.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), OptionsDialogBase.reject)
QtCore.QMetaObject.connectSlotsByName(OptionsDialogBase)
OptionsDialogBase.setTabOrder(self.cbxVisibleLayersOnly, self.lineEdit)
OptionsDialogBase.setTabOrder(self.lineEdit, self.cbxSetLayerNameFromTitle)
OptionsDialogBase.setTabOrder(self.cbxSetLayerNameFromTitle, self.lineEdit_4)
OptionsDialogBase.setTabOrder(self.lineEdit_4, self.toolButton_4)
OptionsDialogBase.setTabOrder(self.toolButton_4, self.spinBox)
OptionsDialogBase.setTabOrder(self.spinBox, self.tabWidget)
OptionsDialogBase.setTabOrder(self.tabWidget, self.cbxZoomToImpact)
OptionsDialogBase.setTabOrder(self.cbxZoomToImpact, self.cbxHideExposure)
OptionsDialogBase.setTabOrder(self.cbxHideExposure, self.cbxBubbleLayersUp)
OptionsDialogBase.setTabOrder(self.cbxBubbleLayersUp, self.toolButton)
OptionsDialogBase.setTabOrder(self.toolButton, self.cbxShowPostprocessingLayers)
OptionsDialogBase.setTabOrder(self.cbxShowPostprocessingLayers, self.buttonBox)
OptionsDialogBase.setTabOrder(self.buttonBox, self.cbxClipToViewport)
OptionsDialogBase.setTabOrder(self.cbxClipToViewport, self.cbxUseThread)
OptionsDialogBase.setTabOrder(self.cbxUseThread, self.dsbFemaleRatioDefault)
OptionsDialogBase.setTabOrder(self.dsbFemaleRatioDefault, self.cbxClipHard)
OptionsDialogBase.setTabOrder(self.cbxClipHard, self.scrollArea)
OptionsDialogBase.setTabOrder(self.scrollArea, self.txtDisclaimer)
OptionsDialogBase.setTabOrder(self.txtDisclaimer, self.leNorthArrowPath)
OptionsDialogBase.setTabOrder(self.leNorthArrowPath, self.toolNorthArrowPath)
OptionsDialogBase.setTabOrder(self.toolNorthArrowPath, self.leOrgLogoPath)
OptionsDialogBase.setTabOrder(self.leOrgLogoPath, self.toolOrgLogoPath)
OptionsDialogBase.setTabOrder(self.toolOrgLogoPath, self.cbxDevMode)
OptionsDialogBase.setTabOrder(self.cbxDevMode, self.textBrowser)
OptionsDialogBase.setTabOrder(self.textBrowser, self.cbxUseSentry)
OptionsDialogBase.setTabOrder(self.cbxUseSentry, self.leKeywordCachePath)
OptionsDialogBase.setTabOrder(self.leKeywordCachePath, self.toolKeywordCachePath)
def retranslateUi(self, OptionsDialogBase):
OptionsDialogBase.setWindowTitle(_translate("OptionsDialogBase", "InaSAFE - Options", None))
self.cbxVisibleLayersOnly.setText(_translate("OptionsDialogBase", "Only show visible layers in InaSAFE dock", None))
self.cbxSetLayerNameFromTitle.setText(_translate("OptionsDialogBase", "Set QGIS layer name from \'title\' in keywords", None))
self.cbxZoomToImpact.setText(_translate("OptionsDialogBase", "Zoom to impact layer on scenario estimate completion", None))
self.cbxHideExposure.setText(_translate("OptionsDialogBase", "Hide exposure layer on scenario estimate completion", None))
self.cbxClipToViewport.setToolTip(_translate("OptionsDialogBase", "Turn on to clip hazard and exposure layers to the currently visible extent on the map canvas", None))
self.cbxClipToViewport.setText(_translate("OptionsDialogBase", "Clip datasets to visible extent before analysis", None))
self.cbxClipHard.setText(_translate("OptionsDialogBase", "When clipping, also clip features (i.e. will clip polygon smaller)", None))
self.cbxShowPostprocessingLayers.setToolTip(_translate("OptionsDialogBase", "Turn on to see the intermediate files generated by the postprocessing steps in the map canvas", None))
self.cbxShowPostprocessingLayers.setText(_translate("OptionsDialogBase", "Show intermediate layers generated by postprocessing", None))
self.label_6.setText(_translate("OptionsDialogBase", "Female ratio default value", None))
self.grpNotImplemented.setTitle(_translate("OptionsDialogBase", "Not yet implemented", None))
self.toolButton_4.setText(_translate("OptionsDialogBase", "...", None))
self.label_4.setText(_translate("OptionsDialogBase", "Organisation name (for maps, reports etc.)", None))
self.cbxBubbleLayersUp.setText(_translate("OptionsDialogBase", "Bubble exposure and hazard layers to top when selected", None))
self.label_5.setText(_translate("OptionsDialogBase", "DPI (Maps and reports)", None))
self.toolButton.setText(_translate("OptionsDialogBase", "...", None))
self.label.setText(_translate("OptionsDialogBase", "Location for results", None))
self.cbxUseThread.setText(_translate("OptionsDialogBase", "Run analysis in a separate thread (experimental)", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_basic), _translate("OptionsDialogBase", "Basic Options", None))
self.lblOrganisationLogo.setText(_translate("OptionsDialogBase", "Organisation logo", None))
self.toolOrgLogoPath.setText(_translate("OptionsDialogBase", "...", None))
self.lblNorthArrowPath.setText(_translate("OptionsDialogBase", "North arrow image", None))
self.toolNorthArrowPath.setText(_translate("OptionsDialogBase", "...", None))
self.lblReportTemplate.setText(_translate("OptionsDialogBase", "Report templates directory", None))
self.toolReportTemplatePath.setText(_translate("OptionsDialogBase", "...", None))
self.label_2.setText(_translate("OptionsDialogBase", "Organisation disclaimer text", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_templates), _translate("OptionsDialogBase", "Template Options", None))
self.lblKeywordCache.setText(_translate("OptionsDialogBase", "Keyword cache for remote datasources", None))
self.toolKeywordCachePath.setText(_translate("OptionsDialogBase", "...", None))
self.cbxUseSentry.setText(_translate("OptionsDialogBase", "Help to improve InaSAFE by submitting errors to a remote server", None))
self.textBrowser.setHtml(_translate("OptionsDialogBase", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.Lucida Grande UI\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Cantarell\'; font-size:12pt; font-weight:600; color:#f50000;\">Note:</span><span style=\" font-family:\'Cantarell\'; font-size:12pt;\"> The above setting requires a QGIS restart to disable / enable. Error messages and diagnostic information will be posted to http://sentry.linfiniti.com/inasafe-desktop/. Some institutions may not allow you to enable this feature - check with your network administrator if unsure. Although the data is submitted anonymously, the information contained in tracebacks may contain file system paths which reveal your identity or other information from your system.</span></p></body></html>", None))
self.cbxDevMode.setText(_translate("OptionsDialogBase", "Enable developer mode for dock webkit (needs restart)", None))
self.cbxNativeZonalStats.setText(_translate("OptionsDialogBase", "Use QGIS zonal statistics (leave unchecked to use InaSAFE\'s zonal statistics)", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_advanced), _translate("OptionsDialogBase", "Advanced", None))
import resources_rc
| assefay/inasafe | safe_qgis/ui/options_dialog_base.py | Python | gpl-3.0 | 23,148 | 0.003629 |
import codecs
import os.path
import sys
import setuptools
# READ README.md for long description on PyPi.
try:
long_description = open("README.md", encoding="utf-8").read()
except Exception as e:
sys.stderr.write(f"Failed to read README.md:\n {e}\n")
sys.stderr.flush()
long_description = ""
# Get the package's version number of the __init__.py file
def read(rel_path):
"""Read the file located at the provided relative path."""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), "r") as fp:
return fp.read()
def get_version(rel_path):
"""Get the package's version number.
We fetch the version number from the `__version__` variable located in the
package root's `__init__.py` file. This way there is only a single source
of truth for the package's version number.
"""
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
INSTALL_REQS = read("requirements.txt").splitlines()
setuptools.setup(
name="blackjax",
author="The BlackJAX team",
version=get_version("blackjax/__init__.py"),
description="Flexible and fast inference in Python",
long_description=long_description,
packages=setuptools.find_packages(),
install_requires=INSTALL_REQS,
long_description_content_type="text/markdown",
keywords="probabilistic machine learning bayesian statistics sampling algorithms",
license="Apache License 2.0",
)
| blackjax-devs/blackjax | setup.py | Python | apache-2.0 | 1,650 | 0.000606 |
import collections
import itertools
import sys
def count_homopolymers( fh ):
s = []
print "building..."
for line in fh:
if line.startswith( '>' ):
continue
s.append( line.strip() )
print "counting..."
runs = collections.defaultdict(int)
best = collections.defaultdict(int)
last = None
for c in ''.join(s):
runs[c] += 1
best[c] = max(best[c], runs[c])
if last is not None and c != last:
runs[last] = 0
last = c
return best
if __name__ == '__main__':
print count_homopolymers( sys.stdin )
| supernifty/mgsa | mgsa/count_homopolymers.py | Python | mit | 550 | 0.038182 |
from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
raises(ValueError, lambda: plot(x, y))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| sahilshekhawat/sympy | sympy/plotting/tests/test_plot.py | Python | bsd-3-clause | 8,460 | 0.002719 |
from common.constants import gameModes
from pp import ez
from pp import wifipiano3
from pp import cicciobello
PP_CALCULATORS = {
gameModes.STD: ez.Ez,
gameModes.TAIKO: ez.Ez,
gameModes.CTB: cicciobello.Cicciobello,
gameModes.MANIA: wifipiano3.WiFiPiano
}
| osuripple/lets | pp/__init__.py | Python | agpl-3.0 | 272 | 0 |
from compiler.compiler import LambdaCompiler
def main():
f = open('input.txt', 'r')
compiler = LambdaCompiler(f)
compiler.perform('output.py')
if __name__ == "__main__":
main()
| felipewaku/compiladores-p2 | lambda_compiler/__main__.py | Python | mit | 195 | 0.010256 |
import re
from django.conf import settings
from tincan import (
Activity,
ActivityDefinition,
LanguageMap
)
from xapi.patterns.base import BasePattern
from xapi.patterns.eco_verbs import (
LearnerCreatesWikiPageVerb,
LearnerEditsWikiPageVerb
)
class BaseWikiRule(BasePattern): # pylint: disable=abstract-method
def convert(self, evt, course_id):
title = None
obj = None
try:
# We need to do this because we receive a string instead than a dictionary
# event_data = json.loads(evt['event'])
event_data = evt['event']
title = event_data['POST'].get('title', None)
except: # pylint: disable=bare-except
pass
if title:
title = title[0] # from parametervalues to single value
verb = self.get_verb() # pylint: disable=no-member
obj = Activity(
id=self.fix_id(self.base_url, evt['context']['path']),
definition=ActivityDefinition(
name=LanguageMap({'en-US': title}),
type="http://www.ecolearning.eu/expapi/activitytype/wiki"
)
)
else:
verb = None # Skip the not really created pages
return verb, obj
class CreateWikiRule(BaseWikiRule, LearnerCreatesWikiPageVerb):
def match(self, evt, course_id):
return re.match(
'/courses/'+settings.COURSE_ID_PATTERN+'/wiki/_create/?',
evt['event_type'])
class EditWikiRule(BaseWikiRule, LearnerEditsWikiPageVerb):
def match(self, evt, course_id):
return re.match(
'/courses/'+settings.COURSE_ID_PATTERN+r'/wiki/\w+/_edit/?',
evt['event_type'])
| marcore/pok-eco | xapi/patterns/manage_wiki.py | Python | agpl-3.0 | 1,746 | 0.001145 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Animation'
db.create_table(u'aldryn_wow_animation', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('animation_class', self.gf('django.db.models.fields.CharField')(default='bounce', max_length=25)),
('infinite', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
))
db.send_create_signal(u'aldryn_wow', ['Animation'])
# Adding model 'WOWAnimation'
db.create_table(u'aldryn_wow_wowanimation', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('animation_class', self.gf('django.db.models.fields.CharField')(default='bounce', max_length=25)),
('duration', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('delay', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('offset', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('iteration', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'aldryn_wow', ['WOWAnimation'])
def backwards(self, orm):
# Deleting model 'Animation'
db.delete_table(u'aldryn_wow_animation')
# Deleting model 'WOWAnimation'
db.delete_table(u'aldryn_wow_wowanimation')
models = {
u'aldryn_wow.animation': {
'Meta': {'object_name': 'Animation', '_ormbases': ['cms.CMSPlugin']},
'animation_class': ('django.db.models.fields.CharField', [], {'default': "'bounce'", 'max_length': '25'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'infinite': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'aldryn_wow.wowanimation': {
'Meta': {'object_name': 'WOWAnimation', '_ormbases': ['cms.CMSPlugin']},
'animation_class': ('django.db.models.fields.CharField', [], {'default': "'bounce'", 'max_length': '25'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'delay': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'iteration': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'offset': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['aldryn_wow'] | narayanaditya95/aldryn-wow | aldryn_wow/south_migrations/0001_initial.py | Python | bsd-3-clause | 5,058 | 0.007513 |
from rest_framework import serializers
from ..upload_handling.serializers import ArticleImageSerializer
class ArticleSerializer(serializers.Serializer):
main_title = serializers.CharField(max_length=255)
sub_title = serializers.CharField(max_length=255)
author = serializers.CharField(max_length=255)
image = ArticleImageSerializer()
date = serializers.CharField(max_length=40)
text = serializers.CharField()
| REBradley/WineArb | winearb/articles/serializers.py | Python | bsd-3-clause | 435 | 0.002299 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-like layers and utilities that implement Spectral Normalization.
Based on "Spectral Normalization for Generative Adversarial Networks" by Miyato,
et al in ICLR 2018. https://openreview.net/pdf?id=B1QRgziT-
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numbers
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import base_layer_utils as keras_base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
__all__ = [
'compute_spectral_norm', 'spectral_normalize', 'spectral_norm_regularizer',
'spectral_normalization_custom_getter', 'keras_spectral_normalization'
]
# tf.bfloat16 should work, but tf.matmul converts those to tf.float32 which then
# can't directly be assigned back to the tf.bfloat16 variable.
_OK_DTYPES_FOR_SPECTRAL_NORM = (dtypes.float16, dtypes.float32, dtypes.float64)
_PERSISTED_U_VARIABLE_SUFFIX = 'spectral_norm_u'
def compute_spectral_norm(w_tensor, power_iteration_rounds=1, name=None):
"""Estimates the largest singular value in the weight tensor.
Args:
w_tensor: The weight matrix whose spectral norm should be computed.
power_iteration_rounds: The number of iterations of the power method to
perform. A higher number yeilds a better approximation.
name: An optional scope name.
Returns:
The largest singular value (the spectral norm) of w.
"""
with variable_scope.variable_scope(name, 'spectral_norm'):
# The paper says to flatten convnet kernel weights from
# (C_out, C_in, KH, KW) to (C_out, C_in * KH * KW). But TensorFlow's Conv2D
# kernel weight shape is (KH, KW, C_in, C_out), so it should be reshaped to
# (KH * KW * C_in, C_out), and similarly for other layers that put output
# channels as last dimension.
# n.b. this means that w here is equivalent to w.T in the paper.
w = array_ops.reshape(w_tensor, (-1, w_tensor.get_shape()[-1]))
# Persisted approximation of first left singular vector of matrix `w`.
u_var = variable_scope.get_variable(
_PERSISTED_U_VARIABLE_SUFFIX,
shape=(w.shape[0], 1),
dtype=w.dtype,
initializer=init_ops.random_normal_initializer(),
trainable=False)
u = u_var
# Use power iteration method to approximate spectral norm.
for _ in range(power_iteration_rounds):
# `v` approximates the first right singular vector of matrix `w`.
v = nn.l2_normalize(math_ops.matmul(array_ops.transpose(w), u))
u = nn.l2_normalize(math_ops.matmul(w, v))
# Update persisted approximation.
with ops.control_dependencies([u_var.assign(u, name='update_u')]):
u = array_ops.identity(u)
u = array_ops.stop_gradient(u)
v = array_ops.stop_gradient(v)
# Largest singular value of `w`.
spectral_norm = math_ops.matmul(
math_ops.matmul(array_ops.transpose(u), w), v)
spectral_norm.shape.assert_is_fully_defined()
spectral_norm.shape.assert_is_compatible_with([1, 1])
return spectral_norm[0][0]
def spectral_normalize(w, power_iteration_rounds=1, name=None):
"""Normalizes a weight matrix by its spectral norm.
Args:
w: The weight matrix to be normalized.
power_iteration_rounds: The number of iterations of the power method to
perform. A higher number yeilds a better approximation.
name: An optional scope name.
Returns:
A normalized weight matrix tensor.
"""
with variable_scope.variable_scope(name, 'spectral_normalize'):
w_normalized = w / compute_spectral_norm(
w, power_iteration_rounds=power_iteration_rounds)
return array_ops.reshape(w_normalized, w.get_shape())
def spectral_norm_regularizer(scale, power_iteration_rounds=1, scope=None):
"""Returns a functions that can be used to apply spectral norm regularization.
Small spectral norms enforce a small Lipschitz constant, which is necessary
for Wasserstein GANs.
Args:
scale: A scalar multiplier. 0.0 disables the regularizer.
power_iteration_rounds: The number of iterations of the power method to
perform. A higher number yeilds a better approximation.
scope: An optional scope name.
Returns:
A function with the signature `sn(weights)` that applies spectral norm
regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.0:
raise ValueError(
'Setting a scale less than 0 on a regularizer: %g' % scale)
if scale == 0.0:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def sn(weights, name=None):
"""Applies spectral norm regularization to weights."""
with ops.name_scope(scope, 'SpectralNormRegularizer', [weights]) as name:
scale_t = ops.convert_to_tensor(
scale, dtype=weights.dtype.base_dtype, name='scale')
return math_ops.multiply(
scale_t,
compute_spectral_norm(
weights, power_iteration_rounds=power_iteration_rounds),
name=name)
return sn
def _default_name_filter(name):
"""A filter function to identify common names of weight variables.
Args:
name: The variable name.
Returns:
Whether `name` is a standard name for a weight/kernel variables used in the
Keras, tf.layers, tf.contrib.layers or tf.contrib.slim libraries.
"""
match = re.match(r'(.*\/)?(depthwise_|pointwise_)?(weights|kernel)$', name)
return match is not None
def spectral_normalization_custom_getter(name_filter=_default_name_filter,
power_iteration_rounds=1):
"""Custom getter that performs Spectral Normalization on a weight tensor.
Specifically it divides the weight tensor by its largest singular value. This
is intended to stabilize GAN training, by making the discriminator satisfy a
local 1-Lipschitz constraint.
Based on [Spectral Normalization for Generative Adversarial Networks][sn-gan].
[sn-gan]: https://openreview.net/forum?id=B1QRgziT-
To reproduce an SN-GAN, apply this custom_getter to every weight tensor of
your discriminator. The last dimension of the weight tensor must be the number
of output channels.
Apply this to layers by supplying this as the `custom_getter` of a
`tf.variable_scope`. For example:
with tf.variable_scope('discriminator',
custom_getter=spectral_norm_getter()):
net = discriminator_fn(net)
IMPORTANT: Keras does not respect the custom_getter supplied by the
VariableScope, so Keras users should use `keras_spectral_normalization`
instead of (or in addition to) this approach.
It is important to carefully select to which weights you want to apply
Spectral Normalization. In general you want to normalize the kernels of
convolution and dense layers, but you do not want to normalize biases. You
also want to avoid normalizing batch normalization (and similar) variables,
but in general such layers play poorly with Spectral Normalization, since the
gamma can cancel out the normalization in other layers. By default we supply a
filter that matches the kernel variable names of the dense and convolution
layers of the tf.layers, tf.contrib.layers, tf.keras and tf.contrib.slim
libraries. If you are using anything else you'll need a custom `name_filter`.
This custom getter internally creates a variable used to compute the spectral
norm by power iteration. It will update every time the variable is accessed,
which means the normalized discriminator weights may change slightly whilst
training the generator. Whilst unusual, this matches how the paper's authors
implement it, and in general additional rounds of power iteration can't hurt.
Args:
name_filter: Optionally, a method that takes a Variable name as input and
returns whether this Variable should be normalized.
power_iteration_rounds: The number of iterations of the power method to
perform per step. A higher number yeilds a better approximation of the
true spectral norm.
Returns:
A custom getter function that applies Spectral Normalization to all
Variables whose names match `name_filter`.
Raises:
ValueError: If name_filter is not callable.
"""
if not callable(name_filter):
raise ValueError('name_filter must be callable')
def _internal_getter(getter, name, *args, **kwargs):
"""A custom getter function that applies Spectral Normalization.
Args:
getter: The true getter to call.
name: Name of new/existing variable, in the same format as
tf.get_variable.
*args: Other positional arguments, in the same format as tf.get_variable.
**kwargs: Keyword arguments, in the same format as tf.get_variable.
Returns:
The return value of `getter(name, *args, **kwargs)`, spectrally
normalized.
Raises:
ValueError: If used incorrectly, or if `dtype` is not supported.
"""
if not name_filter(name):
return getter(name, *args, **kwargs)
if name.endswith(_PERSISTED_U_VARIABLE_SUFFIX):
raise ValueError(
'Cannot apply Spectral Normalization to internal variables created '
'for Spectral Normalization. Tried to normalized variable [%s]' %
name)
if kwargs['dtype'] not in _OK_DTYPES_FOR_SPECTRAL_NORM:
raise ValueError('Disallowed data type {}'.format(kwargs['dtype']))
# This layer's weight Variable/PartitionedVariable.
w_tensor = getter(name, *args, **kwargs)
if len(w_tensor.get_shape()) < 2:
raise ValueError(
'Spectral norm can only be applied to multi-dimensional tensors')
return spectral_normalize(
w_tensor,
power_iteration_rounds=power_iteration_rounds,
name=(name + '/spectral_normalize'))
return _internal_getter
@contextlib.contextmanager
def keras_spectral_normalization(name_filter=_default_name_filter,
power_iteration_rounds=1):
"""A context manager that enables Spectral Normalization for Keras.
Keras doesn't respect the `custom_getter` in the VariableScope, so this is a
bit of a hack to make things work.
Usage:
with keras_spectral_normalization():
net = discriminator_fn(net)
Args:
name_filter: Optionally, a method that takes a Variable name as input and
returns whether this Variable should be normalized.
power_iteration_rounds: The number of iterations of the power method to
perform per step. A higher number yeilds a better approximation of the
true spectral norm.
Yields:
A context manager that wraps the standard Keras variable creation method
with the `spectral_normalization_custom_getter`.
"""
original_make_variable = keras_base_layer_utils.make_variable
sn_getter = spectral_normalization_custom_getter(
name_filter=name_filter, power_iteration_rounds=power_iteration_rounds)
def make_variable_wrapper(name, *args, **kwargs):
return sn_getter(original_make_variable, name, *args, **kwargs)
keras_base_layer_utils.make_variable = make_variable_wrapper
yield
keras_base_layer_utils.make_variable = original_make_variable
| hfp/tensorflow-xsmm | tensorflow/contrib/gan/python/features/python/spectral_normalization_impl.py | Python | apache-2.0 | 12,318 | 0.003491 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pygmo(CMakePackage):
"""Parallel Global Multiobjective Optimizer (and its Python alter ego
PyGMO) is a C++ / Python platform to perform parallel computations of
optimisation tasks (global and local) via the asynchronous generalized
island model."""
homepage = "https://esa.github.io/pygmo2/"
url = "https://github.com/esa/pygmo2/archive/v2.18.0.tar.gz"
git = "https://github.com/esa/pygmo2.git"
version('master', branch='master')
version('2.18.0', sha256='9f081cc973297894af09f713f889870ac452bfb32b471f9f7ba08a5e0bb9a125')
depends_on('pagmo2', type=('build', 'link'))
depends_on('mpi', type='build')
depends_on('py-pybind11@2.6.0:2.6.2', type='build')
depends_on('cmake@3.1:', type='build')
variant('shared', default=True, description='Build shared libraries')
def cmake_args(self):
args = [
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
]
return args
| LLNL/spack | var/spack/repos/builtin/packages/pygmo/package.py | Python | lgpl-2.1 | 1,202 | 0.002496 |
from app import constants
from flask_babel import lazy_gettext as _
from flask_wtf import FlaskForm
from flask_wtf.file import FileField
from wtforms import StringField, SelectField, DateField
from wtforms.validators import InputRequired, Optional
from app.models.examination import test_type_default
class CourseForm(FlaskForm):
title = StringField(_('Title'), validators=[InputRequired()])
description = StringField(_('Description'))
class EducationForm(FlaskForm):
title = StringField(_('Title'), validators=[InputRequired()])
class EditForm(FlaskForm):
date = DateField(_('Date'),
validators=[Optional()],
format=constants.DATE_FORMAT)
course = SelectField(_('Course'), coerce=int,
validators=[InputRequired()])
education = SelectField(_('Education'), coerce=int,
validators=[InputRequired()])
test_type = SelectField(_('Examination type'), coerce=str,
default=test_type_default,
validators=[InputRequired()])
comment = StringField(_('Comment'))
examination = FileField(_('Examination'))
answers = FileField(_('Answers'))
| viaict/viaduct | app/forms/examination.py | Python | mit | 1,224 | 0 |
"""
Registers signal handlers at startup.
"""
# pylint: disable=unused-import
import openedx.core.djangoapps.monitoring.exceptions
| synergeticsedx/deployment-wipro | openedx/core/djangoapps/monitoring/startup.py | Python | agpl-3.0 | 131 | 0 |
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Command to start up the Docker plugin.
"""
from os import umask
from stat import S_IRUSR, S_IWUSR, S_IXUSR
from twisted.python.usage import Options
from twisted.internet.endpoints import serverFromString
from twisted.application.internet import StreamServerEndpointService
from twisted.web.server import Site
from twisted.python.filepath import FilePath
from twisted.internet.address import UNIXAddress
from ..common.script import (
flocker_standard_options, FlockerScriptRunner, main_for_service)
from ._api import VolumePlugin
from ..node.script import get_configuration
from ..apiclient import FlockerClient
from ..control.httpapi import REST_API_PORT
PLUGIN_PATH = FilePath("/run/docker/plugins/flocker/flocker.sock")
@flocker_standard_options
class DockerPluginOptions(Options):
"""
Command-line options for the Docker plugin.
"""
optParameters = [
["rest-api-port", "p", REST_API_PORT,
"Port to connect to for control service REST API."],
["agent-config", "c", "/etc/flocker/agent.yml",
"The configuration file for the local agent."],
]
def postOptions(self):
self['agent-config'] = FilePath(self['agent-config'])
class DockerPluginScript(object):
"""
Start the Docker plugin.
"""
def _create_listening_directory(self, directory_path):
"""
Create the parent directory for the Unix socket if it doesn't exist.
:param FilePath directory_path: The directory to create.
"""
original_umask = umask(0)
try:
if not directory_path.exists():
directory_path.makedirs()
directory_path.chmod(S_IRUSR | S_IWUSR | S_IXUSR)
finally:
umask(original_umask)
def main(self, reactor, options):
# Many places in both twisted.web and Klein are unhappy with
# listening on Unix socket, e.g.
# https://twistedmatrix.com/trac/ticket/5406 "fix" that by
# pretending we have a port number. Yes, I feel guilty.
UNIXAddress.port = 0
# We can use /etc/flocker/agent.yml and /etc/flocker/node.crt to load
# some information we need:
agent_config = get_configuration(options)
control_host = agent_config['control-service']['hostname']
node_id = agent_config['node-credential'].uuid
certificates_path = options["agent-config"].parent()
control_port = options["rest-api-port"]
flocker_client = FlockerClient(reactor, control_host, control_port,
certificates_path.child(b"cluster.crt"),
certificates_path.child(b"plugin.crt"),
certificates_path.child(b"plugin.key"))
self._create_listening_directory(PLUGIN_PATH.parent())
endpoint = serverFromString(
reactor, "unix:{}:mode=600".format(PLUGIN_PATH.path))
service = StreamServerEndpointService(endpoint, Site(
VolumePlugin(reactor, flocker_client, node_id).app.resource()))
return main_for_service(reactor, service)
def docker_plugin_main():
"""
Script entry point that runs the Docker plugin.
"""
return FlockerScriptRunner(script=DockerPluginScript(),
options=DockerPluginOptions()).main()
| hackday-profilers/flocker | flocker/dockerplugin/_script.py | Python | apache-2.0 | 3,412 | 0 |
#!/usr/bin/env python
'''
Main entry to worch from a waf wscript file.
Use the following in the options(), configure() and build() waf wscript methods:
ctx.load('orch.tools', tooldir='.')
'''
def options(opt):
opt.add_option('--orch-config', action = 'store', default = 'orch.cfg',
help='Give an orchestration configuration file.')
opt.add_option('--orch-start', action = 'store', default = 'start',
help='Set the section to start the orchestration')
def configure(cfg):
import orch.configure
orch.configure.configure(cfg)
def build(bld):
import orch.build
orch.build.build(bld)
# the stuff below is for augmenting waf
import time
from orch.wafutil import exec_command
from orch.util import string2list
default_step_cwd = dict(
download = '{download_dir}',
unpack = '{source_dir}',
patch = '{source_dir}',
prepare = '{build_dir}',
build = '{build_dir}',
install = '{build_dir}',
)
# Main interface to worch configuration items
class WorchConfig(object):
def __init__(self, **pkgcfg):
self._config = pkgcfg
def __getattr__(self, name):
return self._config[name]
def get(self, name, default = None):
return self._config.get(name,default)
def format(self, string, **kwds):
'''
Return a string formatted with kwds and configuration items
'''
d = dict(self._config, **kwds)
return string.format(**d)
def depends_step(self, step):
'''
Return a list of steps that this step depends on
'''
d = self._config.get('depends')
if not d: return list()
ds = [x[1] for x in [s.split(':') for s in string2list(d)] if x[0] == step]
return ds
def dependencies(self):
'''
Return all dependencies set via "depends" configuration items
return list of tuples: (mystep, package, package_step)
eg: ('prepare', 'gcc', 'install')
'''
ret = list()
try:
deps = getattr(self, 'depends', None)
except KeyError:
return list()
for dep in string2list(deps):
mystep, other = dep.split(':')
pkg,pkg_step = other.split('_',1)
ret.append((mystep, pkg, pkg_step))
return ret
def exports(self):
'''
Return all environment settings via export_* configuration items
return list of tuples: (variable, value, operator) for exports
eg: ('PATH', '/blah/blah', 'prepend')
'''
ret = list()
for key,val in self._config.items():
if not key.startswith('export_'):
continue
var = key[len('export_'):]
oper = 'set'
for maybe in ['prepend', 'append', 'set']:
if val.startswith(maybe+':'):
oper = maybe
val = val[len(maybe)+1:]
ret.append((var, val, oper))
return ret
# Augment the task generator with worch-specific methods
from waflib.TaskGen import taskgen_method
@taskgen_method
def worch_hello(self):
'Just testing'
print ("%s" % self.worch.format('Hi from worch, my name is "{package}/{version}" and I am using "{dumpenv_cmd}" with extra {extra}', extra='spice'))
print ('My bld.env: %s' % (self.bld.env.keys(),))
print ('My all_envs: %s' % (sorted(self.bld.all_envs.keys()),))
print ('My env: %s' % (self.env.keys(),))
print ('My groups: %s' % (self.env['orch_group_dict'].keys(),))
print ('My packages: %s' % (self.env['orch_package_list'],))
# print ('My package dict: %s' % '\n'.join(['%s=%s' %kv for kv in sorted(self.bld.env['orch_package_dict'][self.worch.package].items())]))
@taskgen_method
def step(self, name, rule, **kwds):
'''
Make a worch installation step.
This invokes the build context on the rule with the following augmentations:
- the given step name is prefixed with the package name
- if the rule is a string (scriptlet) then the worch exec_command is used
- successful execution of the rule leads to a worch control file being produced.
'''
step_name = '%s_%s' % (self.worch.package, name)
# append control file as an additional output
target = string2list(kwds.get('target', ''))
if not isinstance(target, list):
target = [target]
cn = self.control_node(name)
if not cn in target:
target.append(cn)
kwds['target'] = target
kwds.setdefault('env', self.env)
cwd = kwds.get('cwd')
if not cwd:
cwd = default_step_cwd.get(name)
if cwd:
cwd = self.worch.format(cwd)
cwd = self.make_node(cwd)
msg.debug('orch: using cwd for step "%s": %s' % (step_name, cwd.abspath()))
kwds['cwd'] = cwd.abspath()
depends = self.worch.depends_step(name)
after = string2list(kwds.get('after',[])) + depends
if after:
kwds['after'] = after
msg.debug('orch: run %s AFTER: %s' % (step_name, after))
# functionalize scriptlet
rulefun = rule
if isinstance(rule, type('')):
rulefun = lambda t: exec_command(t, rule)
# curry the real rule function in order to write control file if successful
def runit(t):
rc = rulefun(t)
if not rc:
msg.debug('orch: successfully ran %s' % step_name)
cn.write(time.asctime(time.localtime()) + '\n')
return rc
# msg.debug('orch: step "%s" with %s in %s\nsource=%s\ntarget=%s' % \
# (step_name, rulefun, cwd, kwds.get('source'), kwds.get('target')))
# have to switch group each time as steps are called already asynchronously
self.bld.set_group(self.worch.group)
return self.bld(name=step_name, rule = runit, **kwds)
@taskgen_method
def control_node(self, step, package = None):
'''
Return a node for the control file given step of this package or optionally another package.
'''
if not package:
package = self.worch.package
filename = '%s_%s' % (package, step)
path = self.worch.format('{control_dir}/{filename}', filename=filename)
return self.path.find_or_declare(path)
@taskgen_method
def make_node(self, path, parent_node=None):
if not parent_node:
if path.startswith('/'):
parent_node = self.bld.root
else:
parent_node = self.bld.bldnode
return parent_node.make_node(path)
import waflib.Logs as msg
from waflib.Build import BuildContext
def worch_package(ctx, worch_config, *args, **kw):
# transfer waf-specific keywords explicitly
kw['name'] = worch_config['package']
kw['features'] = ' '.join(string2list(worch_config['features']))
kw['use'] = worch_config.get('use')
# make the TaskGen object for the package
worch=WorchConfig(**worch_config)
tgen = ctx(*args, worch=worch, **kw)
tgen.env = ctx.all_envs[worch.package]
tgen.env.env = tgen.env.munged_env
msg.debug('orch: package "%s" with features: %s' % \
(kw['name'], ', '.join(kw['features'].split())))
return tgen
BuildContext.worch_package = worch_package
del worch_package
| hwaf/hwaf | py-hwaftools/orch/tools.py | Python | bsd-3-clause | 7,185 | 0.010717 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 27 13:35:59 2017
@author: mkammoun.lct
"""
import numpy as np
import matplotlib.pyplot as pl
from bisect import bisect
import math
n=200
n2=10000
def per(theta,n):
perm=[]
for i in range(1,n+1):
if np.random.binomial(1,theta/(float(theta)+i-1))==1:
perm.append(i)
else:
j=np.random.randint(i-1)
k=perm[j]
perm[j]=i
perm.append(k)
return perm
per(0.1,1000)
def RSK(p):
'''Given a permutation p, spit out a pair of Young tableaux'''
P = []; Q = []
def insert(m, n=0):
'''Insert m into P, then place n in Q at the same place'''
for r in range(len(P)):
if m > P[r][-1]:
P[r].append(m);
return
c = bisect(P[r], m)
P[r][c],m = m,P[r][c]
P.append([m])
return P
for i in range(len(p)):
insert(int(p[i]), i+1)
return map(len,P)
def pointspos(per):
rsk=RSK(per)
return [rsk[i]-i-1 for i in range(len(rsk)) if (rsk[i]-i -1) >=0]
pointspos([1,2,3])
## seulement les points entre [-3 rac(n) et 3 rac(n)]
alea1={}
alea2={}
for i in range(int(3*n**0.5)+1):
alea1[i]=0
alea2[i]=0
for j in range(n2):
per_unif=np.random.permutation(range(1,np.random.poisson(n)+1))
per_ewens=per(0.1,np.random.poisson(n))
print j
p1=pointspos(per_unif)
p2=pointspos(per_ewens)
for i in p1 :
if i<3*n**0.5:
alea1[i]+=1
for i in p2 :
if i<3*n**0.5:
alea2[i]+=1
x=range(int(3*n**0.5+1))
a1=np.array([alea1[i]for i in x])/float(n2)
a2=np.array([alea2[i]for i in x])/float(n2)
x2=np.array(range(int(1000*2*n**0.5+1)))/1000
a3=np.array(np.arccos(np.array(x2)/(2*n**0.5)))/math.pi
pl.plot(x,a1,"*",label="uniform")
pl.plot(x,a2,"+",label="Ewens")
pl.plot(x2,a3,label="approximation sinus")
pl.legend() | kammmoun/PFE | codes/Ewens&uniform+RSK_rho_1.py | Python | apache-2.0 | 2,011 | 0.03083 |
import datetime
import os
from unittest import mock
import pytest
from nanogen import models
example_post = """\
# Test Post
And this is my _markdown_ **content**.
Look, it also has:
* an
* unordered
* list
"""
example_config = """\
[site]
author = Example user
email = user@example.com
description = A test description
url = http://www.example.com
title = Test Example
"""
def test_post(tmpdir):
f = tmpdir.mkdir('blog').join('2018-01-01-test-post.md')
f.write(example_post)
file_path = os.path.join(str(tmpdir), 'blog', '2018-01-01-test-post.md')
p = models.Post(str(tmpdir), file_path)
assert p.filename == '2018-01-01-test-post.md'
assert p.title == 'Test Post'
assert p.raw_content == example_post
expected_markdown = example_post.strip().splitlines()
assert p.markdown_content == '\n'.join(expected_markdown[2:])
assert p.pub_date == datetime.datetime(2018, 1, 1, 0, 0, 0)
assert p.slug == 'test-post'
assert p.html_filename == 'test-post.html'
assert p.permapath == os.path.join(str(tmpdir), '2018', '01', 'test-post.html')
assert p.permalink == os.path.join('2018', '01', 'test-post.html')
def test_blog_create(tmpdir):
path = tmpdir.mkdir('blog')
config_file = path.join('blog.cfg')
config_file.write(example_config)
blog = models.Blog(str(path))
assert len(blog.posts) == 0
assert blog.config['site']['author'] == 'Example user'
assert blog.config['site']['email'] == 'user@example.com'
assert blog.config['site']['description'] == 'A test description'
assert blog.config['site']['url'] == 'http://www.example.com'
assert blog.config['site']['title'] == 'Test Example'
def test_blog_init(tmpdir):
path = tmpdir.mkdir('blog')
blog = models.Blog(str(path))
blog.init()
listing = [os.path.basename(str(file)) for file in path.listdir()]
assert len(listing) == 4
assert 'blog.cfg' in listing
assert '_layout' in listing
assert '_posts' in listing
assert '_drafts' in listing
def test_blog_new_post(tmpdir):
path = tmpdir.mkdir('blog')
blog = models.Blog(str(path))
blog.init()
before_posts = blog.collect_posts()
assert len(before_posts) == 0
with mock.patch('subprocess.call'):
blog.new_post('Test title', draft=False)
after_posts = blog.collect_posts()
assert len(after_posts) == 1
today = datetime.date.today()
expected_filename = '{}-{:02d}-{:02d}-test-title.md'.format(
today.year,
today.month,
today.day
)
assert after_posts[0].filename == expected_filename
def test_blog_new_draft(tmpdir):
path = tmpdir.mkdir('blog')
blog = models.Blog(str(path))
blog.init()
before_posts = blog.collect_posts()
assert len(before_posts) == 0
with mock.patch('subprocess.call'):
blog.new_post('Test title', draft=True)
after_posts = blog.collect_posts()
assert len(after_posts) == 0
def test_blog_copy_static_files(tmpdir):
path = tmpdir.mkdir('blog')
site_path = path.mkdir('_site')
# Add a static file to the projet
blog = models.Blog(str(path))
blog.init()
blog.copy_static_files()
site_static_path = site_path.join('static')
static_files = [os.path.basename(str(file)) for file in site_static_path.listdir()]
assert 'blog.css' in static_files
def test_blog_generate_posts(tmpdir):
path = tmpdir.mkdir('blog')
site_path = path.mkdir('_site')
# Set up a nanogen blog for posts
blog = models.Blog(str(path))
blog.init()
with mock.patch('subprocess.call'):
blog.new_post('Test title 1', draft=False)
post_template = path.join('_layout').join('post.html')
post_template.write("""\
<!doctype html>
<html>
<body>Single post template would go here.</body>
</html>
""")
# Refresh the blog instance to better emulate real-world usage
blog = models.Blog(str(path))
blog.generate_posts()
today = datetime.date.today()
expected_post_dir = site_path.join('{}'.format(today.year)).join('{:02d}'.format(today.month))
generated_posts = [os.path.basename(str(file)) for file in expected_post_dir.listdir()]
assert len(generated_posts) == 1
assert 'test-title-1.html' in generated_posts
def test_blog_generate_index_page(tmpdir):
path = tmpdir.mkdir('blog')
site_path = path.mkdir('_site')
# Set up a nanogen blog for posts
blog = models.Blog(str(path))
blog.init()
with mock.patch('subprocess.call'):
blog.new_post('Test title 1', draft=False)
# Refresh the blog instance to better emulate real-world usage
blog = models.Blog(str(path))
blog.generate_index_page()
site_dir = [os.path.basename(str(file)) for file in site_path.listdir()]
assert 'index.html' in site_dir
def test_blog_generate_feeds_no_feed_files(tmpdir):
path = tmpdir.mkdir('blog')
site_path = path.mkdir('_site')
# Set up a nanogen blog for posts
blog = models.Blog(str(path))
blog.init()
# Remove the feed files
os.unlink(os.path.join(blog.PATHS['layout'], 'rss.xml'))
os.unlink(os.path.join(blog.PATHS['layout'], 'feed.json'))
# Refresh the blog instance to better emulate real-world usage
blog = models.Blog(str(path))
blog.generate_feeds()
site_dir = [os.path.basename(str(file)) for file in site_path.listdir()]
assert 'rss.xml' not in site_dir
assert 'feed.json' not in site_dir
def test_blog_feeds(tmpdir):
path = tmpdir.mkdir('blog')
site_path = path.mkdir('_site')
# Set up a nanogen blog for posts
blog = models.Blog(str(path))
blog.init()
with mock.patch('subprocess.call'):
blog.new_post('Test title 1', draft=False)
# Refresh the blog instance to better emulate real-world usage
blog = models.Blog(str(path))
blog.generate_feeds()
site_dir = [os.path.basename(str(file)) for file in site_path.listdir()]
assert 'rss.xml' in site_dir
assert 'feed.json' in site_dir
def test_blog_build_and_clean(tmpdir):
path = tmpdir.mkdir('blog')
site_path = path.mkdir('_site')
# Set up a nanogen blog for posts
blog = models.Blog(str(path))
blog.init()
with mock.patch('subprocess.call'):
blog.new_post('Test title 1', draft=False)
post_template = path.join('_layout').join('post.html')
post_template.write("""\
<!doctype html>
<html>
<body>Post template would go here.</body>
</html>
""")
index_template = path.join('_layout').join('index.html')
index_template.write("""\
<!doctype html>
<html>
<body>Index template would go here.</body>
</html>
""")
blog_config = path.join('_layout').join('blog.cfg')
blog_config.write(example_config)
# Refresh the blog instance to better emulate real-world usage
blog = models.Blog(str(path))
blog.build()
site_dir = [os.path.basename(str(file)) for file in site_path.listdir()]
assert 'index.html' in site_dir
today = datetime.date.today()
expected_post_dir = site_path.join('{}'.format(today.year)).join('{:02d}'.format(today.month))
generated_posts = [os.path.basename(str(file)) for file in expected_post_dir.listdir()]
assert len(generated_posts) == 1
assert 'test-title-1.html' in generated_posts
blog.clean()
assert not os.path.isdir(str(site_path))
def test_blog_build_and_clean_with_drafts(tmpdir):
path = tmpdir.mkdir('blog')
preview_path = path.mkdir('_preview')
# Set up a nanogen blog for posts
blog = models.Blog(str(path))
blog.init()
with mock.patch('subprocess.call'):
blog.new_post('Test post', draft=False)
blog.new_post('Draft post', draft=True)
post_template = path.join('_layout').join('post.html')
post_template.write("""\
<!doctype html>
<html>
<body>Post template would go here.</body>
</html>
""")
index_template = path.join('_layout').join('index.html')
index_template.write("""\
<!doctype html>
<html>
<body>Index template would go here.</body>
</html>
""")
blog_config = path.join('_layout').join('blog.cfg')
blog_config.write(example_config)
# Refresh the blog instance to better emulate real-world usage
blog = models.Blog(str(path), is_preview=True)
blog.build()
site_dir = [os.path.basename(str(file)) for file in preview_path.listdir()]
assert 'index.html' in site_dir
today = datetime.date.today()
expected_post_dir = preview_path.join('{}'.format(today.year)).join('{:02d}'.format(today.month))
generated_posts = [os.path.basename(str(file)) for file in expected_post_dir.listdir()]
assert len(generated_posts) == 2
assert 'test-post.html' in generated_posts
assert 'draft-post.html' in generated_posts
blog.clean()
assert not os.path.isdir(str(preview_path))
def test_publish(tmpdir):
path = tmpdir.mkdir('blog')
blog = models.Blog(str(path))
blog.init()
with mock.patch('subprocess.call'):
blog.new_post('Draft post', draft=True)
today = datetime.date.today()
posts_dir = path.join('_posts')
drafts_dir = path.join('_drafts')
expected_filename = '{}-{:02d}-{:02d}-draft-post.md'.format(
today.year,
today.month,
today.day
)
blog.publish(expected_filename)
posts = [os.path.basename(str(file)) for file in posts_dir.listdir()]
drafts = [os.path.basename(str(file)) for file in drafts_dir.listdir()]
assert len(drafts) == 0
assert expected_filename in posts
def test_publish_raises_value_error(tmpdir):
path = tmpdir.mkdir('blog')
blog = models.Blog(str(path))
blog.init()
with mock.patch('subprocess.call'):
blog.new_post('Draft post', draft=True)
today = datetime.date.today()
posts_dir = path.join('_posts')
drafts_dir = path.join('_drafts')
failing_filename = 'post-doesnt-exist.md'
expected_filename = '{}-{:02d}-{:02d}-draft-post.md'.format(
today.year,
today.month,
today.day
)
with pytest.raises(ValueError) as e_info:
blog.publish(failing_filename)
posts = [os.path.basename(str(file)) for file in posts_dir.listdir()]
drafts = [os.path.basename(str(file)) for file in drafts_dir.listdir()]
assert len(posts) == 0
assert expected_filename in drafts
| epochblue/nanogen | tests/test_models.py | Python | mit | 10,406 | 0.001057 |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Adrien Vergé
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from tests.common import RuleTestCase
class YamllintDirectivesTestCase(RuleTestCase):
conf = ('commas: disable\n'
'trailing-spaces: {}\n'
'colons: {max-spaces-before: 1}\n')
def test_disable_directive(self):
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(4, 8, 'colons'),
problem3=(6, 7, 'colons'),
problem4=(6, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'# yamllint disable\n'
'- bad : colon\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem=(3, 18, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'# yamllint disable\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'# yamllint enable\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(8, 7, 'colons'),
problem2=(8, 26, 'trailing-spaces'))
def test_disable_directive_with_rules(self):
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'# yamllint disable rule:trailing-spaces\n'
'- bad : colon\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(5, 8, 'colons'),
problem3=(7, 7, 'colons'))
self.check('---\n'
'- [valid , YAML]\n'
'# yamllint disable rule:trailing-spaces\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'# yamllint enable rule:trailing-spaces\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(5, 8, 'colons'),
problem2=(8, 7, 'colons'),
problem3=(8, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'# yamllint disable rule:trailing-spaces\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'# yamllint enable\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(5, 8, 'colons'),
problem2=(8, 7, 'colons'),
problem3=(8, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'# yamllint disable\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'# yamllint enable rule:trailing-spaces\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem=(8, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'# yamllint disable rule:colons\n'
'- trailing spaces \n'
'# yamllint disable rule:trailing-spaces\n'
'- bad : colon\n'
'- [valid , YAML]\n'
'# yamllint enable rule:colons\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(4, 18, 'trailing-spaces'),
problem2=(9, 7, 'colons'))
def test_disable_line_directive(self):
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'# yamllint disable-line\n'
'- bad : colon\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(7, 7, 'colons'),
problem3=(7, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'- bad : colon # yamllint disable-line\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(6, 7, 'colons'),
problem3=(6, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML] # yamllint disable-line\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(4, 8, 'colons'),
problem3=(6, 7, 'colons'),
problem4=(6, 26, 'trailing-spaces'))
def test_disable_line_directive_with_rules(self):
self.check('---\n'
'- [valid , YAML]\n'
'# yamllint disable-line rule:colons\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(4, 18, 'trailing-spaces'),
problem2=(5, 8, 'colons'),
problem3=(7, 7, 'colons'),
problem4=(7, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces # yamllint disable-line rule:colons \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 55, 'trailing-spaces'),
problem2=(4, 8, 'colons'),
problem3=(6, 7, 'colons'),
problem4=(6, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'# yamllint disable-line rule:colons\n'
'- bad : colon\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(7, 7, 'colons'),
problem3=(7, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'- bad : colon # yamllint disable-line rule:colons\n'
'- [valid , YAML]\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(6, 7, 'colons'),
problem3=(6, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'# yamllint disable-line rule:colons\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(4, 8, 'colons'),
problem3=(7, 26, 'trailing-spaces'))
self.check('---\n'
'- [valid , YAML]\n'
'- trailing spaces \n'
'- bad : colon\n'
'- [valid , YAML]\n'
'# yamllint disable-line rule:colons rule:trailing-spaces\n'
'- bad : colon and spaces \n'
'- [valid , YAML]\n',
self.conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(4, 8, 'colons'))
def test_disable_directive_with_rules_and_dos_lines(self):
conf = self.conf + 'new-lines: {type: dos}\n'
self.check('---\r\n'
'- [valid , YAML]\r\n'
'# yamllint disable rule:trailing-spaces\r\n'
'- trailing spaces \r\n'
'- bad : colon\r\n'
'- [valid , YAML]\r\n'
'# yamllint enable rule:trailing-spaces\r\n'
'- bad : colon and spaces \r\n'
'- [valid , YAML]\r\n',
conf,
problem1=(5, 8, 'colons'),
problem2=(8, 7, 'colons'),
problem3=(8, 26, 'trailing-spaces'))
self.check('---\r\n'
'- [valid , YAML]\r\n'
'- trailing spaces \r\n'
'- bad : colon\r\n'
'- [valid , YAML]\r\n'
'# yamllint disable-line rule:colons\r\n'
'- bad : colon and spaces \r\n'
'- [valid , YAML]\r\n',
conf,
problem1=(3, 18, 'trailing-spaces'),
problem2=(4, 8, 'colons'),
problem3=(7, 26, 'trailing-spaces'))
def test_directive_on_last_line(self):
conf = 'new-line-at-end-of-file: {}'
self.check('---\n'
'no new line',
conf,
problem=(2, 12, 'new-line-at-end-of-file'))
self.check('---\n'
'# yamllint disable\n'
'no new line',
conf)
self.check('---\n'
'no new line # yamllint disable',
conf)
def test_indented_directive(self):
conf = 'brackets: {min-spaces-inside: 0, max-spaces-inside: 0}'
self.check('---\n'
'- a: 1\n'
' b:\n'
' c: [ x]\n',
conf,
problem=(4, 12, 'brackets'))
self.check('---\n'
'- a: 1\n'
' b:\n'
' # yamllint disable-line rule:brackets\n'
' c: [ x]\n',
conf)
def test_directive_on_itself(self):
conf = ('comments: {min-spaces-from-content: 2}\n'
'comments-indentation: {}\n')
self.check('---\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf,
problem1=(2, 8, 'comments'),
problem2=(4, 2, 'comments-indentation'))
self.check('---\n'
'# yamllint disable\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf)
self.check('---\n'
'- a: 1 # yamllint disable-line\n'
' b:\n'
' # yamllint disable-line\n'
' # wrong indentation\n'
' c: [x]\n',
conf)
self.check('---\n'
'- a: 1 # yamllint disable-line rule:comments\n'
' b:\n'
' # yamllint disable-line rule:comments-indentation\n'
' # wrong indentation\n'
' c: [x]\n',
conf)
self.check('---\n'
'# yamllint disable\n'
'- a: 1 # comment too close\n'
' # yamllint enable rule:comments-indentation\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf,
problem=(6, 2, 'comments-indentation'))
def test_disable_file_directive(self):
conf = ('comments: {min-spaces-from-content: 2}\n'
'comments-indentation: {}\n')
self.check('# yamllint disable-file\n'
'---\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf)
self.check('# yamllint disable-file\n'
'---\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf)
self.check('#yamllint disable-file\n'
'---\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf)
self.check('#yamllint disable-file \n'
'---\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf)
self.check('---\n'
'# yamllint disable-file\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf,
problem1=(3, 8, 'comments'),
problem2=(5, 2, 'comments-indentation'))
self.check('# yamllint disable-file: rules cannot be specified\n'
'---\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf,
problem1=(3, 8, 'comments'),
problem2=(5, 2, 'comments-indentation'))
self.check('AAAA yamllint disable-file\n'
'---\n'
'- a: 1 # comment too close\n'
' b:\n'
' # wrong indentation\n'
' c: [x]\n',
conf,
problem1=(1, 1, 'document-start'),
problem2=(3, 8, 'comments'),
problem3=(5, 2, 'comments-indentation'))
def test_disable_file_directive_not_at_first_position(self):
self.check('# yamllint disable-file\n'
'---\n'
'- bad : colon and spaces \n',
self.conf)
self.check('---\n'
'# yamllint disable-file\n'
'- bad : colon and spaces \n',
self.conf,
problem1=(3, 7, 'colons'),
problem2=(3, 26, 'trailing-spaces'))
def test_disable_file_directive_with_syntax_error(self):
self.check('# This file is not valid YAML (it is a Jinja template)\n'
'{% if extra_info %}\n'
'key1: value1\n'
'{% endif %}\n'
'key2: value2\n',
self.conf,
problem=(2, 2, 'syntax'))
self.check('# yamllint disable-file\n'
'# This file is not valid YAML (it is a Jinja template)\n'
'{% if extra_info %}\n'
'key1: value1\n'
'{% endif %}\n'
'key2: value2\n',
self.conf)
def test_disable_file_directive_with_dos_lines(self):
self.check('# yamllint disable-file\r\n'
'---\r\n'
'- bad : colon and spaces \r\n',
self.conf)
self.check('# yamllint disable-file\r\n'
'# This file is not valid YAML (it is a Jinja template)\r\n'
'{% if extra_info %}\r\n'
'key1: value1\r\n'
'{% endif %}\r\n'
'key2: value2\r\n',
self.conf)
| adrienverge/yamllint | tests/test_yamllint_directives.py | Python | gpl-3.0 | 17,973 | 0 |
"""
Module to handle distortions in diffraction patterns.
"""
import numpy as np
import scipy.optimize
def filter_ring(points, center, rminmax):
"""Filter points to be in a certain radial distance range from center.
Parameters
----------
points : np.ndarray
Candidate points.
center : np.ndarray or tuple
Center position.
rminmax : tuple
Tuple of min and max radial distance.
Returns
-------
: np.ndarray
List of filtered points, two column array.
"""
try:
# points have to be 2D array with 2 columns
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
assert(len(points.shape) == 2)
# center can either be tuple or np.array
center = np.array(center)
center = np.reshape(center, 2)
rminmax = np.array(rminmax)
rminmax = np.reshape(rminmax, 2)
except:
raise TypeError('Something wrong with the input!')
# calculate radii
rs = np.sqrt( np.square(points[:,0]-center[0]) + np.square(points[:,1]-center[1]) )
# filter by given limits
sel = (rs>=rminmax[0])*(rs<=rminmax[1])
if sel.any():
return points[sel]
else:
return None
def points_topolar(points, center):
"""Convert points to polar coordinate system.
Can be either in pixel or real dim, but should be the same for points and center.
Parameters
----------
points : np.ndarray
Positions as two column array.
center : np.ndarray or tuple
Origin of the polar coordinate system.
Returns
-------
: np.ndarray
Positions in polar coordinate system as two column array (r, theta).
"""
try:
# points have to be 2D array with 2 columns
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
assert(len(points.shape) == 2)
# center can either be tuple or np.array
center = np.array(center)
center = np.reshape(center, 2)
except:
raise TypeError('Something wrong with the input!')
# calculate radii
rs = np.sqrt( np.square(points[:,0]-center[0]) + np.square(points[:,1]-center[1]) )
# calculate angle
thes = np.arctan2(points[:,1]-center[1], points[:,0]-center[0])
return np.array( [rs, thes] ).transpose()
def residuals_center( param, data):
"""Residual function for minimizing the deviations from the mean radial distance.
Parameters
----------
param : np.ndarray
The center to optimize.
data : np.ndarray
The points in x,y coordinates of the original image.
Returns
-------
: np.ndarray
Residuals.
"""
# manually calculating the radii, as we do not need the thetas
rs = np.sqrt( np.square(data[:,0]-param[0]) + np.square(data[:,1]-param[1]) )
return rs-np.mean(rs)
def optimize_center(points, center, maxfev=1000, verbose=None):
"""Optimize the center by minimizing the sum of square deviations from the mean radial distance.
Parameters
----------
points : np.ndarray
The points to which the optimization is done (x,y coords in org image).
center : np.ndarray or tuple
Initial center guess.
maxfev : int
Max number of iterations forwarded to scipy.optimize.leastsq().
verbose : bool
Set to get verbose output.
Returns
-------
: np.ndarray
The optimized center.
"""
try:
# points have to be 2D array with 2 columns
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
assert(len(points.shape) == 2)
# center can either be tuple or np.array
center = np.array(center)
center = np.reshape(center, 2)
except:
raise TypeError('Something wrong with the input!')
# run the optimization
popt, flag = scipy.optimize.leastsq(residuals_center, center, args=points, maxfev=maxfev)
if flag not in [1, 2, 3, 4]:
print('WARNING: center optimization failed.')
if verbose:
print('optimized center: ({}, {})'.format(center[0], center[1]))
return popt
def rad_dis(theta, alpha, beta, order=2):
"""Radial distortion due to ellipticity or higher order distortion.
Relative distortion, to be multiplied with radial distance.
Parameters
----------
theta : np.ndarray
Angles at which to evaluate. Must be float.
alpha : float
Orientation of major axis.
beta : float
Strength of distortion (beta = (1-r_min/r_max)/(1+r_min/r_max).
order : int
Order of distortion.
Returns
-------
: np.ndarray
Distortion factor.
"""
return (1.-np.square(beta))/np.sqrt(1.+np.square(beta)-2.*beta*np.cos(order*(theta+alpha)))
def residuals_dis(param, points, ns):
"""Residual function for distortions.
Parameters
----------
param : np.ndarray
Parameters for distortion.
points : np.ndarray
Points to fit to.
ns : tuple
List of orders to account for.
Returns
-------
: np.ndarray
Residuals.
"""
est = param[0]*np.ones(points[:, 1].shape)
for i in range(len(ns)):
est *=rad_dis(points[:, 1], param[i*2+1], param[i*2+2], ns[i])
return points[:, 0] - est
def optimize_distortion(points, ns, maxfev=1000, verbose=False):
"""Optimize distortions.
The orders in the list ns are first fitted subsequently and the result is refined in a final fit simultaneously fitting all orders.
Parameters
----------
points : np.ndarray
Points to optimize to (in polar coords).
ns : tuple
List of orders to correct for.
maxfev : int
Max number of iterations forwarded to scipy.optimize.leastsq().
verbose : bool
Set for verbose output.
Returns
-------
: np.ndarray
Optimized parameters according to ns.
"""
try:
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
# check points to be sufficient for fitting
assert(points.shape[0] >= 3)
# check orders
assert(len(ns)>=1)
except:
raise TypeError('Something wrong with the input!')
# init guess for full fit
init_guess = np.ones(len(ns)*2+1)
init_guess[0] = np.mean(points[:,0])
# make a temporary copy
points_tmp = np.copy(points)
if verbose:
print('correction for {} order distortions.'.format(ns))
print('starting with subsequent fitting:')
# subsequently fit the orders
for i in range(len(ns)):
# optimize order to points_tmp
popt, flag = scipy.optimize.leastsq(residuals_dis, np.array((init_guess[0], 0.1, 0.1)),
args=(points_tmp, (ns[i],)), maxfev=maxfev)
if flag not in [1, 2, 3, 4]:
print('WARNING: optimization of distortions failed.')
# information
if verbose:
print('fitted order {}: R={} alpha={} beta={}'.format(ns[i], popt[0], popt[1], popt[2]))
# save for full fit
init_guess[i*2+1] = popt[1]
init_guess[i*2+2] = popt[2]
# do correction
points_tmp[:, 0] /= rad_dis(points_tmp[:, 1], popt[1], popt[2], ns[i])
# full fit
if verbose:
print('starting the full fit:')
popt, flag = scipy.optimize.leastsq(residuals_dis, init_guess, args=(points, ns), maxfev=maxfev)
if flag not in [1, 2, 3, 4]:
print('WARNING: optimization of distortions failed.')
if verbose:
print('fitted to: R={}'.format(popt[0]))
for i in range(len(ns)):
print('.. order={}, alpha={}, beta={}'.format(ns[i], popt[i*2+1], popt[i*2+2]))
return popt
| ercius/openNCEM | ncempy/algo/distortion.py | Python | gpl-3.0 | 8,278 | 0.010993 |
from db import get_engines,get_sessions,Song,Track,Note
from iter import TimeIterator
from utils import Counter
from sqlalchemy.orm import sessionmaker
from preference_rules import *
import music21,sys
from optparse import OptionParser
from multiprocessing import Process,Queue
class ChordSpan(object):
"""
A ChordSpan is a series of TimeInstances that all have the same root.
Each ChordSpan also maintains a pointer (prev_cs) to the previous ChordSpan computed in the song.
"""
def __init__(self,initial_ts,prev_cs):
"""
Initialize a ChordSpan
Args:
initial_ts: the first TimeInstance to consider
prev_cs: the previous TimeInstance
"""
self.tss = [initial_ts]
self.root = None
# a back-pointer to the previous best chord-span
self.prev_cs = prev_cs
def __repr__(self):
return "<ChordSpan: root=%r>" % (self.root)
def last_ts(self):
"""
Calculate and return the last TimeInstance in this ChordSpan.
Returns:
TimeInstance: the last time instance in the ChordSpan
"""
return max(self.tss,key=lambda ts: ts.time)
def add(self,ts):
"""
Add a TimeInstance to this ChordSpan
Args:
ts: the TimeInstance to add
"""
self.tss.append(ts)
def remove(self,ts):
"""
Remove a TimeInstance from this ChordSpan
Args:
ts: the TimeInstance to remove
"""
self.tss.remove(ts)
def notes(self):
"""
Flatten all notes in the TimeInstances that comprise this ChordSpan.
Returns:
All notes played in this ChordSpan
"""
res = []
# iterate through all chords
for ts in self.tss:
# all notes in this time instance
for note in ts.notes():
res.append(note)
return res
def roman_numeral(self,track):
"""
Calculate the roman numeral corresponding to the computed root and key of the corresponding track
Args:
track: The track to which a Note in this ChordSpan belongs. Note: Here we assume that at any moment in
time, there is only one key signature in all tracks of the song.
Returns:
the Music21 Roman Numeral object.
"""
pitch = music21.key.sharpsToPitch(track.key_sig_top)
key = music21.key.Key(pitch)
if track.key_sig_bottom == 0:
scale = music21.scale.MajorScale(self.root.name)
else:
scale = music21.scale.MelodicMinorScale(self.root.name)
chord = music21.chord.Chord([scale.chord.root(),scale.chord.third,scale.chord.fifth])
return music21.roman.romanNumeralFromChord(chord,key).scaleDegree
def label(self,depth=0):
"""
Label all the notes in this ChordSpan with the determined root.
Then proceed to recursively label the preceding ChordSpan
"""
rn = None
# label all the notes in this chord span
for note in self.notes():
if self.root:
note.root = self.root.midi
note.iso_root = self.root.name
if not rn:
rn = self.roman_numeral(note.track)
note.roman = rn
# label the previous chord span (assuming we haven't surpassed max recursion limit)
if self.prev_cs and depth < sys.getrecursionlimit() - 1:
self.prev_cs.label()
def pr_score(self,m_root):
"""
Calculate the preference rule score, when using m_root as a root for this ChordSpan.
Note this method is the core of the Preference Rule approach to Harmonic Analysis
Such an approach is heavily inspired by the work of Daniel Sleater and David Temperley at CMU
in their Melisma Music Analyzer: http://www.link.cs.cmu.edu/melisma/
Args:
m_root (Music21.note.Note): a note representing the proposed root of this chord
Returns:
the score obtained using this note as a root
"""
last_ts = self.last_ts()
ts_notes = last_ts.notes()
# calculate the beat strength
stren = beat_strength(ts_notes)
# compatibility scores
comp_score = compatibility(ts_notes,m_root)
# difference from previous chord root on line of fifths
lof = (lof_difference(self.prev_cs.root,m_root) if self.prev_cs else 0)
return STRENGTH_MULTIPLIER * stren + COMPATIBILITY_MULTIPLIER * comp_score + LOF_MULTIPLIER * lof
def calc_best_root(self):
"""
Calculate the best root for this chord span
Returns:
the combined score of this ChordSpan and its predecessor
"""
# start with C, weight of 0
best_root,best_weight = music21.note.Note('C'),-len(line_of_fifths)
# try all possible roots
for m_root in music21.scale.ChromaticScale('C').pitches:
val = self.pr_score(m_root)
if val > best_weight:
best_root,best_weight = m_root,val
# use this as the chord-span root
self.root = best_root
# calculate the combined score
prev_cs_score = (self.prev_cs.score if self.prev_cs else 0)
return prev_cs_score + best_weight
# increased recursion limit for dynamic programming back-pointer labelling process
RECURSION_LIMIT = 10000
class HarmonicAnalyzer(Process):
"""
Run Harmonic Analysis in a separate process.
Such an approach is heavily inspired by the work of Daniel Sleater and David Temperley at CMU
in their Melisma Music Analyzer: http://www.link.cs.cmu.edu/melisma/
"""
def __init__(self,durk_step,engine,counter):
"""
Initialize the Harmonic Analyzer process
Args:
durk_step: steps between TimeInstances
engine: the database engine to draw songs from
counter (Counter): atomic song counter
"""
# Initialize the Process
Process.__init__(self)
# time step used in TimeIterator
self.durk_step = durk_step
# session to pull songs from
Session = sessionmaker(bind=engine)
self.session = Session()
# Counter object representing number of songs that have been processed
self.counter = counter
# increase the recursion limit
sys.setrecursionlimit(RECURSION_LIMIT)
def run(self):
"""
Start the Process. Note that this method overrides Process.run()
"""
# Iterate through every song in the database attached to this process
for song in self.session.query(Song).all():
# Atomically increment the song counter
count = self.counter.incrementAndGet()
print count, ". ", song
# skip songs that have already been analyzed
if song.analyzed:
print count, ". Already analyzed. Skipping."
continue
# and run the analysis
try:
self.analyze(song)
# mark this song as analyzed
song.analyzed = True
self.session.commit()
except Exception,e:
sys.stderr.write("Exception when processing " + str(song) + ":\n")
sys.stderr.write("\t" + str(e) + "\n")
def analyze(self,song):
"""
Run Harmonic Analysis on a particular Song
Args:
song (Song): the song to analyze
"""
cs,idx = None,0
try:
# construct the iterator
ti = TimeIterator(song,self.durk_step)
except ValueError,e:
# something is very wrong with this song... let's skip it!
sys.stderr.write("Exception when processing " + str(song) + ":\n")
sys.stderr.write("\t" + str(e) + "\n")
return False
# iterate through every TimeInstance in the song
for ts in ti:
# and consider what to do...
cs = self.consider_ts(cs,ts)
# print idx, ts, "--", cs.score, ":", cs
idx += 1
cs.label()
self.session.commit()
def consider_ts(self,cs,ts):
"""
Consider how to segment a new TimeInstance.
Note: this method is the core of the Dynamic Programming approach to Harmonic Analysis.
Args:
cs: the latest determined ChordSpan in the Song being evaluated
ts: the TimeInstance under consideration
Returns:
the new latest ChordSpan
"""
# if this is the first ChordSpan created.
if not cs:
res = ChordSpan(ts,None)
score = res.calc_best_root()
else:
# we already have a ChordSpan in progress.
# option 1: start a new chord-span
opt1_cs = ChordSpan(ts,cs)
opt1_score = cs.calc_best_root()
# option 2: add to prior segment
cs.add(ts)
opt2_score = cs.calc_best_root()
# determine which option is superior
if opt1_score > opt2_score:
cs.remove(ts)
res = opt1_cs
score = opt1_score
else:
res = cs
score = opt2_score
# set the score on this cs
res.score = score
return res
def main():
"""
Run harmonic analysis on all songs in all databases. This will take a LONG time.
"""
parser = OptionParser()
parser.add_option("-d", "--durk-step", dest="durk_step", default=4, type="int")
parser.add_option("-t", "--pool-size", dest="pool_size", default=8, type="int")
parser.add_option("-u", "--username", dest="db_username", default="postgres")
parser.add_option("-p", "--password", dest="db_password", default="postgres")
(options, args) = parser.parse_args()
print "Creating", options.pool_size, "processes."
processes = []
# Initialize the counter to 0
counter = Counter(0)
# get all database engines
engines = get_engines(options.pool_size,options.db_username,options.db_password)
# Construct a new HarmonicAnalyzer process for each database.
for i in xrange(options.pool_size):
p = HarmonicAnalyzer(options.durk_step,engines[i],counter)
processes.append(p)
# Start the processes
print "Starting", options.pool_size, "processes."
for p in processes:
p.start()
# And wait for them to finish
for p in processes:
p.join()
if __name__ == '__main__':
main()
| jasonsbrooks/ARTIST | src/artist_generator/analyze/chords.py | Python | mit | 10,666 | 0.008063 |
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/MSCommon/sdk.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
__doc__ = """Module to detect the Platform/Windows SDK
PSDK 2003 R1 is the earliest version detected.
"""
import os
import SCons.Errors
import SCons.Util
import common
debug = common.debug
# SDK Checks. This is of course a mess as everything else on MS platforms. Here
# is what we do to detect the SDK:
#
# For Windows SDK >= 6.0: just look into the registry entries:
# HKLM\Software\Microsoft\Microsoft SDKs\Windows
# All the keys in there are the available versions.
#
# For Platform SDK before 6.0 (2003 server R1 and R2, etc...), there does not
# seem to be any sane registry key, so the precise location is hardcoded.
#
# For versions below 2003R1, it seems the PSDK is included with Visual Studio?
#
# Also, per the following:
# http://benjamin.smedbergs.us/blog/tag/atl/
# VC++ Professional comes with the SDK, VC++ Express does not.
# Location of the SDK (checked for 6.1 only)
_CURINSTALLED_SDK_HKEY_ROOT = \
r"Software\Microsoft\Microsoft SDKs\Windows\CurrentInstallFolder"
class SDKDefinition(object):
"""
An abstract base class for trying to find installed SDK directories.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
def find_sdk_dir(self):
"""Try to find the MS SDK from the registry.
Return None if failed or the directory does not exist.
"""
if not SCons.Util.can_read_reg:
debug('find_sdk_dir(): can not read registry')
return None
hkey = self.HKEY_FMT % self.hkey_data
debug('find_sdk_dir(): checking registry:%s'%hkey)
try:
sdk_dir = common.read_reg(hkey)
except SCons.Util.WinError, e:
debug('find_sdk_dir(): no SDK registry key %s' % repr(hkey))
return None
debug('find_sdk_dir(): Trying SDK Dir: %s'%sdk_dir)
if not os.path.exists(sdk_dir):
debug('find_sdk_dir(): %s not on file system' % sdk_dir)
return None
ftc = os.path.join(sdk_dir, self.sanity_check_file)
if not os.path.exists(ftc):
debug("find_sdk_dir(): sanity check %s not found" % ftc)
return None
return sdk_dir
def get_sdk_dir(self):
"""Return the MSSSDK given the version string."""
try:
return self._sdk_dir
except AttributeError:
sdk_dir = self.find_sdk_dir()
self._sdk_dir = sdk_dir
return sdk_dir
def get_sdk_vc_script(self,host_arch, target_arch):
""" Return the script to initialize the VC compiler installed by SDK
"""
if (host_arch == 'amd64' and target_arch == 'x86'):
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch=target_arch
arch_string=target_arch
if (host_arch != target_arch):
arch_string='%s_%s'%(host_arch,target_arch)
debug("sdk.py: get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s"%(arch_string,
host_arch,
target_arch))
file=self.vc_setup_scripts.get(arch_string,None)
debug("sdk.py: get_sdk_vc_script():file:%s"%file)
return file
class WindowsSDK(SDKDefinition):
"""
A subclass for trying to find installed Windows SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\Microsoft SDKs\Windows\v%s\InstallationFolder'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.version
class PlatformSDK(SDKDefinition):
"""
A subclass for trying to find installed Platform SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\MicrosoftSDK\InstalledSDKS\%s\Install Dir'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.uuid
#
# The list of VC initialization scripts installed by the SDK
# These should be tried if the vcvarsall.bat TARGET_ARCH fails
preSDK61VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvarsamd64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
SDK61VCSetupScripts = {'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\amd64\vcvarsamd64.bat',
'x86_amd64': r'bin\x86_amd64\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\x86_ia64\vcvarsx86_ia64.bat',
'ia64' : r'bin\ia64\vcvarsia64.bat'}
SDK70VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvars64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
# The list of support SDKs which we know how to detect.
#
# The first SDK found in the list is the one used by default if there
# are multiple SDKs installed. Barring good reasons to the contrary,
# this means we should list SDKs from most recent to oldest.
#
# If you update this list, update the documentation in Tool/mssdk.xml.
SupportedSDKList = [
WindowsSDK('7.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0A',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('6.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK61VCSetupScripts,
),
WindowsSDK('6.0A',
sanity_check_file=r'include\windows.h',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = preSDK61VCSetupScripts,
),
WindowsSDK('6.0',
sanity_check_file=r'bin\gacutil.exe',
include_subdir='include',
lib_subdir='lib',
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R2',
sanity_check_file=r'SetEnv.Cmd',
uuid="D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1",
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R1',
sanity_check_file=r'SetEnv.Cmd',
uuid="8F9E5EF3-A9A5-491B-A889-C58EFFECE8B3",
vc_setup_scripts = preSDK61VCSetupScripts,
),
]
SupportedSDKMap = {}
for sdk in SupportedSDKList:
SupportedSDKMap[sdk.version] = sdk
# Finding installed SDKs isn't cheap, because it goes not only to the
# registry but also to the disk to sanity-check that there is, in fact,
# an SDK installed there and that the registry entry isn't just stale.
# Find this information once, when requested, and cache it.
InstalledSDKList = None
InstalledSDKMap = None
def get_installed_sdks():
global InstalledSDKList
global InstalledSDKMap
debug('sdk.py:get_installed_sdks()')
if InstalledSDKList is None:
InstalledSDKList = []
InstalledSDKMap = {}
for sdk in SupportedSDKList:
debug('MSCommon/sdk.py: trying to find SDK %s' % sdk.version)
if sdk.get_sdk_dir():
debug('MSCommon/sdk.py:found SDK %s' % sdk.version)
InstalledSDKList.append(sdk)
InstalledSDKMap[sdk.version] = sdk
return InstalledSDKList
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
SDKEnvironmentUpdates = {}
def set_sdk_by_directory(env, sdk_dir):
global SDKEnvironmentUpdates
debug('set_sdk_by_directory: Using dir:%s'%sdk_dir)
try:
env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
except KeyError:
env_tuple_list = []
SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
include_path = os.path.join(sdk_dir, 'include')
mfc_path = os.path.join(include_path, 'mfc')
atl_path = os.path.join(include_path, 'atl')
if os.path.exists(mfc_path):
env_tuple_list.append(('INCLUDE', mfc_path))
if os.path.exists(atl_path):
env_tuple_list.append(('INCLUDE', atl_path))
env_tuple_list.append(('INCLUDE', include_path))
env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
for variable, directory in env_tuple_list:
env.PrependENVPath(variable, directory)
def get_sdk_by_version(mssdk):
if mssdk not in SupportedSDKMap:
msg = "SDK version %s is not supported" % repr(mssdk)
raise SCons.Errors.UserError(msg)
get_installed_sdks()
return InstalledSDKMap.get(mssdk)
def get_default_sdk():
"""Set up the default Platform/Windows SDK."""
get_installed_sdks()
if not InstalledSDKList:
return None
return InstalledSDKList[0]
def mssdk_setup_env(env):
debug('sdk.py:mssdk_setup_env()')
if 'MSSDK_DIR' in env:
sdk_dir = env['MSSDK_DIR']
if sdk_dir is None:
return
sdk_dir = env.subst(sdk_dir)
debug('sdk.py:mssdk_setup_env: Using MSSDK_DIR:%s'%sdk_dir)
elif 'MSSDK_VERSION' in env:
sdk_version = env['MSSDK_VERSION']
if sdk_version is None:
msg = "SDK version is specified as None"
raise SCons.Errors.UserError(msg)
sdk_version = env.subst(sdk_version)
mssdk = get_sdk_by_version(sdk_version)
if mssdk is None:
msg = "SDK version %s is not installed" % sdk_version
raise SCons.Errors.UserError(msg)
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: Using MSSDK_VERSION:%s'%sdk_dir)
elif 'MSVS_VERSION' in env:
msvs_version = env['MSVS_VERSION']
debug('sdk.py:mssdk_setup_env:Getting MSVS_VERSION from env:%s'%msvs_version)
if msvs_version is None:
debug('sdk.py:mssdk_setup_env thinks msvs_version is None')
return
msvs_version = env.subst(msvs_version)
import vs
msvs = vs.get_vs_by_version(msvs_version)
debug('sdk.py:mssdk_setup_env:msvs is :%s'%msvs)
if not msvs:
debug('sdk.py:mssdk_setup_env: no VS version detected, bailingout:%s'%msvs)
return
sdk_version = msvs.sdk_version
debug('sdk.py:msvs.sdk_version is %s'%sdk_version)
if not sdk_version:
return
mssdk = get_sdk_by_version(sdk_version)
if not mssdk:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: Using MSVS_VERSION:%s'%sdk_dir)
else:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: not using any env values. sdk_dir:%s'%sdk_dir)
set_sdk_by_directory(env, sdk_dir)
#print "No MSVS_VERSION: this is likely to be a bug"
def mssdk_exists(version=None):
sdks = get_installed_sdks()
if version is None:
return len(sdks) > 0
return version in sdks
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | src/engine/SCons/Tool/MSCommon/sdk.py | Python | mit | 14,245 | 0.00702 |
from . import common
import os
import hglib
class test_paths(common.basetest):
def test_basic(self):
f = open('.hg/hgrc', 'a')
f.write('[paths]\nfoo = bar\n')
f.close()
# hgrc isn't watched for changes yet, have to reopen
self.client = hglib.open()
paths = self.client.paths()
self.assertEquals(len(paths), 1)
self.assertEquals(paths['foo'], os.path.abspath('bar'))
self.assertEquals(self.client.paths('foo'), os.path.abspath('bar'))
| beckjake/python3-hglib | tests/test-paths.py | Python | mit | 512 | 0.001953 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
# from tools.translate import _
from .. import ftpserver
class document_ftp_browse(osv.osv_memory):
_name = 'document.ftp.browse'
_description = 'Document FTP Browse'
_columns = {
'url' : fields.char('FTP Server', size=64, required=True),
}
def default_get(self, cr, uid, fields, context=None):
res = {}
if 'url' in fields:
user_pool = self.pool.get('res.users')
current_user = user_pool.browse(cr, uid, uid, context=context)
data_pool = self.pool.get('ir.model.data')
aid = data_pool._get_id(cr, uid, 'document_ftp', 'action_document_browse')
aid = data_pool.browse(cr, uid, aid, context=context).res_id
ftp_url = self.pool.get('ir.actions.url').browse(cr, uid, aid, context=context)
url = ftp_url.url and ftp_url.url.split('ftp://') or []
if url:
url = url[1]
if url[-1] == '/':
url = url[:-1]
else:
url = '%s:%s' %(ftpserver.HOST, ftpserver.PORT)
res['url'] = 'ftp://%s@%s'%(current_user.login, url)
return res
def browse_ftp(self, cr, uid, ids, context=None):
data_id = ids and ids[0] or False
data = self.browse(cr, uid, data_id, context=context)
final_url = data.url
return {
'type': 'ir.actions.act_url',
'url':final_url,
'target': 'new'
}
document_ftp_browse()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| crmccreary/openerp_server | openerp/addons/document_ftp/wizard/ftp_browse.py | Python | agpl-3.0 | 2,556 | 0.004304 |
# this one is like your scripts with argv
def print_two(*args):
arg1,arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
# ok, that *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
# this just takes one arguments
def print_one(arg1):
print "arg1: %r" % arg1
# this just takes no arguments
def print_none():
print "I got nothin'."
print_two("Zed", "Shaw")
print_two_again("Zed", "Shaw")
print_one("First")
print_none()
| Cloudlie/pythonlearning | ex18.py | Python | mit | 514 | 0.015564 |
def extractCnoveltranslationsCom(item):
'''
Parser for 'cnoveltranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('what if my brother is too good?', 'What if My Brother is Too Good?', 'translated'),
('i am this type of woman', 'I Am This Type of Woman', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractCnoveltranslationsCom.py | Python | bsd-3-clause | 784 | 0.026786 |
#!/home/dante/Projects/free-art/venv/bin/python3
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| DanteOnline/free-art | venv/bin/painter.py | Python | gpl-3.0 | 2,138 | 0.000935 |
from django.core.management.base import BaseCommand
from faceDB.face_db import FaceDB
from faceDB.face import FaceCluster
from faceDB.util import * # only required for saving cluster images
from carnie_helper import RudeCarnie
from query.models import *
import random
import json
class Command(BaseCommand):
help = 'Find genders for all the detected faces'
def add_arguments(self, parser):
parser.add_argument('path')
def handle(self, *args, **options):
with open(options['path']) as f:
paths = [s.strip() for s in f.readlines()]
model_dir = '/app/deps/rude-carnie/inception_gender_checkpoint'
rc = RudeCarnie(model_dir=model_dir)
for path in paths:
confident = 0
if path == '':
return
video = Video.objects.filter(path=path).get()
labelset = video.detected_labelset()
tracks = Track.objects.filter(first_frame__labelset=labelset).all()
for track in tracks:
if track.gender != '0':
print 'skipping_track', track.id
continue
faces = Face.objects.filter(track=track)
print track.id
print("len of faces for path {}, is {}".format(path, len(faces)))
imgs = ['./assets/thumbnails/{}_{}.jpg'.format(labelset.id, f.id)
for f in faces]
best = rc.get_gender(imgs)
# Update each of the faces.
male_sum = 0.0
female_sum = 0.0
for i, face in enumerate(faces):
if best[i] is None:
# couldn't get gender output for some reason
continue
if best[i][0] == 'M':
male_sum += best[i][1]
elif best[i][0] == 'F':
female_sum += best[i][1]
track.gender = 'M' if male_sum>female_sum else 'F'
track.save()
| MattPerron/esper | esper/query/management/commands/gender_tracks.py | Python | apache-2.0 | 2,055 | 0.001946 |
import unittest
import testRObject
import testVector
import testArray
import testDataFrame
import testFormula
import testFunction
import testEnvironment
import testRobjects
import testMethods
import testPackages
import testHelp
import testLanguage
# wrap this nicely so a warning is issued if no numpy present
import testNumpyConversions
def suite():
suite_RObject = testRObject.suite()
suite_Vector = testVector.suite()
suite_Array = testArray.suite()
suite_DataFrame = testDataFrame.suite()
suite_Function = testFunction.suite()
suite_Environment = testEnvironment.suite()
suite_Formula = testFormula.suite()
suite_Robjects = testRobjects.suite()
suite_NumpyConversions = testNumpyConversions.suite()
suite_Methods = testMethods.suite()
suite_Packages = testPackages.suite()
suite_Help = testHelp.suite()
suite_Language = testLanguage.suite()
alltests = unittest.TestSuite([suite_RObject,
suite_Vector,
suite_Array,
suite_DataFrame,
suite_Function,
suite_Environment,
suite_Formula,
suite_Robjects,
suite_Methods,
suite_NumpyConversions,
suite_Packages,
suite_Help,
suite_Language
])
return alltests
def main():
r = unittest.TestResult()
suite().run(r)
return r
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
suite = suite()
tr.run(suite)
| welltempered/rpy2-heroku | rpy/robjects/tests/__init__.py | Python | gpl-2.0 | 1,832 | 0.003821 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.utils import formats, dateparse, timezone
from .models import Period, Traineeship, Student
from django.core.exceptions import ValidationError
from datetime import datetime, date
from io import BytesIO
from docx import Document
from docx.shared import Pt
def json_access_error(request):
return JsonResponse(
{
"errors": [
{
"status": "403",
"source": { "pointer": request.path },
"detail": "vous n'êtes plus autorisé à utiliser cette période"
},
]
},
status=403
)
def time_limit():
today = timezone.localdate()
days_offset = 3-today.weekday()
return timezone.make_aware(datetime.combine(today+timezone.timedelta(days=days_offset), datetime.min.time()))
def calendar(request, action, traineeship):
user = request.user
traineeship = Traineeship.objects.get(id=int(traineeship))
try:
student = user.student
except Student.DoesNotExist:
student = None
# calendar read
if action=='read':
time_start = timezone.make_aware(datetime.combine(dateparse.parse_date(request.GET['start']), datetime.min.time()))
time_end = timezone.make_aware(datetime.combine(dateparse.parse_date(request.GET['end']), datetime.min.time()))
base_criteria = {
'traineeship' : traineeship
}
if request.GET['type']=='past':
base_criteria['start__gte'] = time_start
base_criteria['end__lt'] = time_limit()
if request.GET['type']=='future':
base_criteria['start__gte'] = time_limit()
base_criteria['end__lt'] = time_end
ps = Period.objects.filter(**base_criteria)
d = []
for p in ps:
d.append({
'id': p.id,
'start': p.start,
'end': p.end,
})
return JsonResponse(d, safe=False)
# create period
if action=='create':
time_start = dateparse.parse_datetime(request.GET['start'])
time_end = dateparse.parse_datetime(request.GET['end'])
if student and time_start<time_limit():
return json_access_error(request)
try:
p = traineeship.periods.create(start=time_start, end=time_end)
return JsonResponse({"event_id" : p.id}, safe=False)
except ValidationError as e:
return JsonResponse(
{
"errors": [
{
"status": "422",
"source": { "pointer": request.path },
"detail": "%s" % e.args[0]
},
]
},
status=422
)
# delete event
if action=='delete':
p = traineeship.periods.get(id=int(request.GET['event_id']))
if student and p.start<time_limit():
return json_access_error(request)
p.delete()
return JsonResponse({"event_id" : 0}, safe=False)
# update event
if action=='update':
try:
p = traineeship.periods.get(id=int(request.GET['event_id']))
time_start = dateparse.parse_datetime(request.GET['start'])
time_end = dateparse.parse_datetime(request.GET['end'])
if student and time_start<time_limit():
return json_access_error(request)
p.start = time_start
p.end = time_end
p.save()
return JsonResponse({"event_id" : p.id}, safe=False)
except ValidationError as e:
return JsonResponse(
{
"errors": [
{
"status": "422",
"source": { "pointer": request.path },
"detail": "%s" % e.args[0]
},
]
},
status=422
)
# On ne devrait pas arriver ici...
return JsonResponse(
{
"errors": [
{
"status": "400",
"source": { "pointer": request.path },
"detail": "action not found"
},
]
},
status=400
)
# DOCX
def download_schedule(request, traineeship):
user = request.user
ts = Traineeship.objects.get(id=int(traineeship))
try:
student = user.student
except Student.DoesNotExist:
student = None
# Create the HttpResponse object with the appropriate docx headers.
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'attachment; filename="horaire.docx"'
buffer = BytesIO()
document = Document()
document.add_heading("%s %s : Stage d'%s" % (ts.student.first_name, ts.student.last_name, ts.category), 0)
document.save(buffer)
# Get the value of the BytesIO buffer and write it to the response.
doc = buffer.getvalue()
buffer.close()
response.write(doc)
return response
JOURS = ['Lundi', 'Mardi', 'Mercredi', 'Jeudi', 'Vendredi', 'Samedi', 'Dimanche']
def download_schedule_for_student(request, student, from_date=timezone.localdate()):
next_monday = from_date + timezone.timedelta(days=7-from_date.weekday())
# télécharge l'horaire d'un étudiant particulier pour la semaine suivant la date fournie ou
# aujourd'hui si cette date n'est pas fournie
student = Student.objects.get(id=student)
#ts = student.traineeships.filter(date_start__lte=from_date, is_closed=False)[0]
ts = student.traineeships.filter(is_closed=False)[0]
# TODO : pas de stage ouvert, plus d'un stage ouvert, étudiant n'existant pas
# Create the HttpResponse object with the appropriate docx headers.
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
response['Content-Disposition'] = 'attachment; filename="horaire %s %s.docx"' % (student.last_name,
student.first_name)
buffer = BytesIO()
document = Document()
document.styles["Title"].font.size = Pt(18)
document.styles["Subtitle"].font.size = Pt(16)
document.add_heading("%s %s : du %s au %s" % (
ts.student.first_name,
ts.student.last_name,
next_monday.strftime("%d-%m-%Y"),
(next_monday + timezone.timedelta(days=6)).strftime("%d-%m-%Y"),
)
,0)
document.add_paragraph("Stage d'%s - %s" % (ts.category, ts.place,), style="Subtitle")
table = document.add_table(rows=1, cols=5)
table.style = 'Light Shading Accent 1'
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Jour'
hdr_cells[1].text = 'De'
hdr_cells[2].text = 'A'
hdr_cells[3].text = 'Périodes'
hdr_cells[4].text = 'Heures'
for x in range(7):
row_day = next_monday + timezone.timedelta(days=x)
day_periods = ts.periods.filter(start__date=row_day).order_by('start')
row_cells = table.add_row().cells
row_cells[0].text = JOURS[x]
num_p = 0
for p in day_periods :
num_p += 1
row_cells[1].text = timezone.localtime(p.start).strftime("%H:%M")
row_cells[2].text = timezone.localtime(p.end).strftime("%H:%M")
row_cells[3].text = str(p.period_duration())
row_cells[4].text = str(p.hour_duration())
if not num_p == len(day_periods):
row_cells = table.add_row().cells
document.save(buffer)
# Get the value of the BytesIO buffer and write it to the response.
doc = buffer.getvalue()
buffer.close()
response.write(doc)
return response
| Lapin-Blanc/AS_STAGES | django_calendar/views.py | Python | mit | 8,052 | 0.007211 |
#!/usr/bin/env python
# Copyright 2015 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, platform, subprocess
def install_dependencies():
env = dict(os.environ.iteritems())
if platform.system() == 'Darwin':
clang_tarball = 'clang+llvm-3.4.2-x86_64-apple-darwin10.9.xz'
clang_dir = os.path.abspath('clang+llvm-3.4.2-x86_64-apple-darwin10.9')
print('http://llvm.org/releases/3.4.2/%s' % clang_tarball)
subprocess.check_call(
['curl', '-O', 'http://llvm.org/releases/3.4.2/%s' % clang_tarball])
shasum = subprocess.Popen(['shasum', '-c'], stdin=subprocess.PIPE)
shasum.communicate(
'b182ca49f8e4933041daa8ed466f1e4a589708bf %s' % clang_tarball)
assert shasum.wait() == 0
subprocess.check_call(['tar', 'xfJ', clang_tarball])
env.update({
'PATH': ':'.join(
[os.path.join(clang_dir, 'bin'), os.environ['PATH']]),
'DYLD_LIBRARY_PATH': ':'.join(
[os.path.join(clang_dir, 'lib')] +
([os.environ['DYLD_LIBRARY_PATH']]
if 'DYLD_LIBRARY_PATH' in os.environ else [])),
})
return env
def test(root_dir, install_args, install_env):
subprocess.check_call(
['./install.py'] + install_args,
env = install_env,
cwd = root_dir)
subprocess.check_call(
['./test.py'],
cwd = root_dir)
if __name__ == '__main__':
root_dir = os.path.realpath(os.path.dirname(__file__))
legion_dir = os.path.dirname(root_dir)
runtime_dir = os.path.join(legion_dir, 'runtime')
env = install_dependencies()
env.update({
'LG_RT_DIR': runtime_dir,
})
test(root_dir, ['--debug'], env)
test(root_dir, [], env)
| SKA-ScienceDataProcessor/legion-sdp-clone | language/travis.py | Python | apache-2.0 | 2,295 | 0.004793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.