text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from kwonly_args import kwonly_defaults
from .parser import ParserListener
class ObjectBuilderParams(object):
# By creating a subclass of this class and overriding these
# attributes you can change the default values of __init__().
default_object_creator = None
default_array_creator = None
default_string_to_scalar_converter = None
@kwonly_defaults
def __init__(self, object_creator=None, array_creator=None, string_to_scalar_converter=None):
"""
:param object_creator: A callable with signature object_creator(listener) that has to
return a tuple: (json_object, insert_function). You can access line/column information
by accessing listener.parser.line and listener.parser.column.
The returned json_object will be used as a json object (dict) in the hierarchy returned by
this loads() function. The insert_function(key, value) will be used to insert items.
Besides this json_object has to support the 'in' operator (by implementing __contains__).
:param array_creator: A callable with signature array_creator(listener) that has to
return a tuple: (json_array, append_function).
The returned json_array will be used as a json array (list) in the hierarchy returned by
this loads() function. The append_function(item) will be used to add items.
:param string_to_scalar_converter: This is a callable with signature
string_to_scalar_converter(listener, scalar_str, scalar_str_quoted).
While parsing, this function receives every json value that is not an
object or an array. This includes quoted strings and all other non-quoted stuff
(like the null, True, False literals and numbers/strings). Note that scalar_str is always a
string and scalar_str_quoted is a boolean that indicates whether scalar_str was quoted or
not in the input json string. The parser interprets every scalar as a quoted or non-quoted
string.
If scalar_str_quoted is True then scalar_str contains the unescaped string. If
scalar_str_quoted is False then it may contain "null", "True" false or the string
representation of anything else (eg: a number: "1.564") and it's up to you how to interpret
it. You can define your own constant scalar literals if you want like interpreting
the unquoted "yes" and "no" literals as boolean values.
In case of conversion error you should call listener.error() with an error message and this
raises an exception with information about the error location, etc...
"""
def get_default(name):
# We use type(self).__dict__['X'] because these class attributes are often simple
# functions and we don't want to convert them to instance methods by retrieving them
# with self.X statements.
return type(self).__dict__[name]
self.object_creator = object_creator or get_default('default_object_creator')
self.array_creator = array_creator or get_default('default_array_creator')
self.string_to_scalar_converter = string_to_scalar_converter or\
get_default('default_string_to_scalar_converter')
class ObjectBuilderParserListener(ParserListener):
""" A configurable parser listener implementation that can be configured to
build a json tree using the user supplied object/array factories and scalar converter. """
def __init__(self, params):
super(ObjectBuilderParserListener, self).__init__()
self.params = params
self._object_key = None
# The lambda function could actually be a None but that way we get a warning in
# self._new_value() that the insert_function isn't callable...
self._container_stack = [(None, None, lambda *args: None)]
self._result = None
@property
def result(self):
""" This property holds the parsed object or array after a successful parsing. """
return self._result
class ContainerType(object):
object = 0
array = 1
@property
def _state(self):
return self._container_stack[-1]
def _new_value(self, value):
container_type, _, insert_function = self._state
if container_type == self.ContainerType.object:
insert_function(self._object_key, value)
self._object_key = None
elif container_type == self.ContainerType.array:
insert_function(value)
def _pop_container_stack(self):
if len(self._container_stack) == 2:
self._result = self._container_stack[-1][1]
self._container_stack.pop()
def begin_object(self):
obj, insert_function = self.params.object_creator(self)
self._new_value(obj)
self._container_stack.append((self.ContainerType.object, obj, insert_function))
def end_object(self):
self._pop_container_stack()
def begin_object_item(self, key, key_quoted):
if key in self._state[1]:
self.error('Duplicate key: "%s"' % (key,))
self._object_key = key
def begin_array(self):
arr, append_function = self.params.array_creator(self)
self._new_value(arr)
self._container_stack.append((self.ContainerType.array, arr, append_function))
def end_array(self):
self._pop_container_stack()
def scalar(self, scalar_str, scalar_str_quoted):
value = self.params.string_to_scalar_converter(self.parser, scalar_str, scalar_str_quoted)
self._new_value(value)
| pasztorpisti/json-cfg | src/jsoncfg/parser_listener.py | Python | mit | 5,570 | 0.005027 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class SubnetPaged(Paged):
"""
A paging container for iterating over a list of :class:`Subnet <azure.mgmt.network.v2017_06_01.models.Subnet>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Subnet]'}
}
def __init__(self, *args, **kwargs):
super(SubnetPaged, self).__init__(*args, **kwargs)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/subnet_paged.py | Python | mit | 922 | 0.001085 |
"""
Virtualization installation functions.
Currently somewhat Xen/paravirt specific, will evolve later.
Copyright 2006-2008 Red Hat, Inc.
Michael DeHaan <mdehaan@redhat.com>
Original version based on virtguest-install
Jeremy Katz <katzj@redhat.com>
Option handling added by Andrew Puch <apuch@redhat.com>
Simplified for use as library by koan, Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os, sys, time, stat
import tempfile
import random
import exceptions
import errno
import re
import virtinst
import app as koan
try:
import virtinst.DistroManager as DistroManager
except:
# older virtinst, this is probably ok
# but we know we can't do Xen fullvirt installs
pass
import traceback
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def start_install(name=None, ram=None, disks=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None, arch=None, no_gfx=False, fullvirt=False, bridge=None):
if profile_data.has_key("file"):
raise koan.InfoException("Xen does not work with --image yet")
if fullvirt:
# FIXME: add error handling here to explain when it's not supported
guest = virtinst.FullVirtGuest(installer=DistroManager.PXEInstaller())
else:
guest = virtinst.ParaVirtGuest()
extra = extra.replace("&","&")
if not fullvirt:
guest.set_boot((profile_data["kernel_local"], profile_data["initrd_local"]))
# fullvirt OS's will get this from the PXE config (managed by Cobbler)
guest.extraargs = extra
else:
print "- fullvirt mode"
if profile_data.has_key("breed"):
breed = profile_data["breed"]
if breed != "other" and breed != "":
if breed in [ "debian", "suse", "redhat" ]:
guest.set_os_type("linux")
elif breed in [ "windows" ]:
guest.set_os_type("windows")
else:
guest.set_os_type("unix")
if profile_data.has_key("os_version"):
# FIXME: when os_version is not defined and it's linux, do we use generic24/generic26 ?
version = profile_data["os_version"]
if version != "other" and version != "":
try:
guest.set_os_variant(version)
except:
print "- virtinst library does not understand variant %s, treating as generic" % version
pass
guest.set_name(name)
guest.set_memory(ram)
guest.set_vcpus(vcpus)
if not no_gfx:
guest.set_graphics("vnc")
else:
guest.set_graphics(False)
if uuid is not None:
guest.set_uuid(uuid)
for d in disks:
if d[1] != 0:
guest.disks.append(virtinst.XenDisk(d[0], size=d[1]))
counter = 0
if profile_data.has_key("interfaces"):
interfaces = profile_data["interfaces"].keys()
interfaces.sort()
counter = -1
for iname in interfaces:
counter = counter + 1
intf = profile_data["interfaces"][iname]
mac = intf["mac_address"]
if mac == "":
mac = random_mac()
if not bridge:
profile_bridge = profile_data["virt_bridge"]
intf_bridge = intf["virt_bridge"]
if intf_bridge == "":
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
intf_bridge = profile_bridge
else:
if bridge.find(",") == -1:
intf_bridge = bridge
else:
bridges = bridge.split(",")
intf_bridge = bridges[counter]
nic_obj = virtinst.XenNetworkInterface(macaddr=mac, bridge=intf_bridge)
guest.nics.append(nic_obj)
counter = counter + 1
else:
# for --profile you just get one NIC, go define a system if you want more.
# FIXME: can mac still be sent on command line in this case?
if bridge is None:
profile_bridge = profile_data["virt_bridge"]
else:
profile_bridge = bridge
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
nic_obj = virtinst.XenNetworkInterface(macaddr=random_mac(), bridge=profile_bridge)
guest.nics.append(nic_obj)
guest.start_install()
return "use virt-manager or reconnect with virsh console %s" % name
| charles-dyfis-net/koan | koan/xencreate.py | Python | gpl-2.0 | 5,855 | 0.008027 |
"""
Craft is a script to access the plugins which craft a gcode file.
The plugin buttons which are commonly used are bolded and the ones which are rarely used have normal font weight.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import settings
from skeinforge.skeinforge_utilities import skeinforge_analyze
from skeinforge.skeinforge_utilities import skeinforge_polyfile
from skeinforge.skeinforge_utilities import skeinforge_profile
import os
import time
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getChainText( fileName, procedure ):
"Get a crafted shape file."
text = gcodec.getFileText( fileName )
procedures = getProcedures( procedure, text )
return getChainTextFromProcedures( fileName, procedures, text )
def getChainTextFromProcedures( fileName, procedures, text ):
"Get a crafted shape file from a list of procedures."
lastProcedureTime = time.time()
for procedure in procedures:
craftModule = getCraftModule( procedure )
if craftModule != None:
text = craftModule.getCraftedText( fileName, text )
if gcodec.isProcedureDone( text, procedure ):
print( '%s procedure took %s seconds.' % ( procedure.capitalize(), int( round( time.time() - lastProcedureTime ) ) ) )
Filehandle = open ('report.txt', 'a')
Filehandle.write ('%s procedure took %s seconds.' % ( procedure.capitalize(), int( round( time.time() - lastProcedureTime ) ) )+'\n')
Filehandle.close ()
lastProcedureTime = time.time()
return text
def getCraftModule( fileName ):
"Get craft module."
return gcodec.getModuleWithDirectoryPath( getPluginsDirectoryPath(), fileName )
def getLastModule():
"Get the last tool."
craftSequence = getReadCraftSequence()
if len( craftSequence ) < 1:
return None
return getCraftModule( craftSequence[ - 1 ] )
def getNewRepository():
"Get the repository constructor."
return CraftRepository()
def getPluginsDirectoryPath():
"Get the plugins directory path."
return gcodec.getAbsoluteFolderPath( os.path.dirname( __file__ ), os.path.join( 'skeinforge_tools', 'craft_plugins' ) )
def getPluginFileNames():
"Get craft plugin fileNames."
craftSequence = getReadCraftSequence()
craftSequence.sort()
return craftSequence
def getProcedures( procedure, text ):
"Get the procedures up to and including the given procedure."
craftSequence = getReadCraftSequence()
sequenceIndexPlusOneFromText = getSequenceIndexPlusOneFromText( text )
sequenceIndexFromProcedure = getSequenceIndexFromProcedure( procedure )
return craftSequence[ sequenceIndexPlusOneFromText : sequenceIndexFromProcedure + 1 ]
def getReadCraftSequence():
"Get profile sequence."
return skeinforge_profile.getCraftTypePluginModule().getCraftSequence()
def getSequenceIndexFromProcedure( procedure ):
"Get the profile sequence index of the procedure. Return None if the procedure is not in the sequence"
craftSequence = getReadCraftSequence()
if procedure not in craftSequence:
return 0
return craftSequence.index( procedure )
def getSequenceIndexPlusOneFromText( fileText ):
"Get the profile sequence index of the file plus one. Return zero if the procedure is not in the file"
craftSequence = getReadCraftSequence()
for craftSequenceIndex in xrange( len( craftSequence ) - 1, - 1, - 1 ):
procedure = craftSequence[ craftSequenceIndex ]
if gcodec.isProcedureDone( fileText, procedure ):
return craftSequenceIndex + 1
return 0
def writeChainTextWithNounMessage( fileName, procedure ):
"Get and write a crafted shape file."
print( '' )
print( 'The %s tool is parsing the file:' % procedure )
print( os.path.basename( fileName ) )
print( '' )
startTime = time.time()
suffixFileName = fileName[ : fileName.rfind( '.' ) ] + '_' + procedure + '.gcode'
craftText = getChainText( fileName, procedure )
if craftText == '':
return
gcodec.writeFileText( suffixFileName, craftText )
print( '' )
print( 'The %s tool has created the file:' % procedure )
print( suffixFileName )
print( '' )
print( 'It took %s seconds to craft the file.' % int( time.time() - startTime ) )
skeinforge_analyze.writeOutput( suffixFileName, craftText )
def writeOutput( fileName ):
"Craft a gcode file with the last module."
pluginModule = getLastModule()
if pluginModule != None:
pluginModule.writeOutput( fileName )
class CraftRadioButtonsSaveListener:
"A class to update the craft radio buttons."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
euclidean.addElementToListTableIfNotThere( self, self.repository.repositoryDialog, settings.globalProfileSaveListenerListTable )
self.gridPosition = gridPosition.getCopy()
self.gridPosition.row = gridPosition.rowStart
self.gridPosition.increment()
self.setRadioButtons()
def getFromRadioPlugins( self, radioPlugins, repository ):
"Initialize."
self.name = 'CraftRadioButtonsSaveListener'
self.radioPlugins = radioPlugins
self.repository = repository
repository.displayEntities.append( self )
return self
def save( self ):
"Profile has been saved and craft radio plugins should be updated."
self.setRadioButtons()
def setRadioButtons( self ):
"Profile has been saved and craft radio plugins should be updated."
craftSequence = skeinforge_profile.getCraftTypePluginModule().getCraftSequence()
craftSequence.append( 'bfb' )
craftSequence.remove( 'home' )
craftSequence.remove( 'chamber' )
craftSequence.remove( 'lash' )
craftSequence.remove( 'oozebane' )
craftSequence.remove( 'splodge' )
craftSequence.remove( 'unpause' )
craftSequence.remove( 'wipe' )
gridPosition = self.gridPosition.getCopy()
maximumValue = False
activeRadioPlugins = []
for radioPlugin in self.radioPlugins:
if radioPlugin.name in craftSequence:
activeRadioPlugins.append( radioPlugin )
radioPlugin.incrementGridPosition( gridPosition )
maximumValue = max( radioPlugin.value, maximumValue )
else:
radioPlugin.radiobutton.grid_remove()
if not maximumValue:
selectedRadioPlugin = settings.getSelectedRadioPlugin( self.repository.importantFileNames + [ activeRadioPlugins[ 0 ].name ], activeRadioPlugins ).setSelect()
self.repository.pluginFrame.update()
class CraftRepository:
"A class to handle the craft settings."
def __init__( self ):
"Set the default settings, execute title & settings fileName."
settings.addListsToRepository( 'skeinforge.skeinforge_utilities.skeinforge_craft.html', '', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Craft', self, '' )
self.importantFileNames = ['bfb', 'carve', 'chop', 'feed', 'flow', 'lift', 'raft', 'speed' ]
allCraftNames = gcodec.getPluginFileNamesFromDirectoryPath( getPluginsDirectoryPath() )
radioPlugins = settings.getRadioPluginsAddPluginFrame( getPluginsDirectoryPath(), self.importantFileNames, allCraftNames, self )
CraftRadioButtonsSaveListener().getFromRadioPlugins( radioPlugins, self )
self.executeTitle = 'Craft'
def execute( self ):
"Craft button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, [], self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
| tobykurien/MakerDroid | assetsrc/public.mp3/skeinforge/skeinforge_utilities/skeinforge_craft.py | Python | gpl-3.0 | 7,636 | 0.043609 |
# Copyright 2010 http://www.collabq.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
from django import http
from django import template
from django.conf import settings
from django.template import loader
from django.shortcuts import render_to_response
from administration import helper as admin_helper
from common import api
from common import clean
from common import decorator
from common import models
from common import exception
from common import util
from common import validate
from common import views as common_views
ITEMS_BY_PAGE = 20
@decorator.gae_admin_required
def install(request):
try:
root_user = api.actor_get(api.ROOT, settings.ROOT_NICK)
if root_user:
return util.RedirectFlash('/', 'Already Installed')
except:
root_user = None
post_name = util.get_metadata('POST_NAME')
default_channel = util.get_metadata('DEFAULT_CHANNEL')
if request.POST:
site_name = request.POST.get('site_name', None)
tagline = request.POST.get('tagline', None)
post_name = request.POST.get('post_name', None)
root_mail = request.POST.get('root_mail', None)
password = request.POST.get('password', None)
confirm = request.POST.get('confirm', None)
default_channel = request.POST.get('default_channel', None)
try:
logging.info('saving values')
validate.nonce(request, 'install')
validate.email(root_mail)
validate.password(password)
validate.password_and_confirm(password, confirm)
channel = clean.channel(default_channel)
admin_helper.validate_and_save_sitesettings(site_name, tagline, post_name)
root_user = api.user_create_root(api.ROOT, password=password)
api.email_associate(api.ROOT, root_user.nick, root_mail)
channel_ref = api.channel_create(api.ROOT, nick=api.ROOT.nick, channel=channel, tags=[],
type='', description='Support Channel')
util.set_metadata('DEFAULT_CHANNEL', default_channel)
logging.info('Installed and Redirecting to front')
return util.RedirectFlash('/', 'Installed Successfully')
except:
exception.handle_exception(request)
redirect_to = '/'
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/install.html', c)
@decorator.gae_admin_required
def admin(request):
page = 'admin'
group_menuitem = 'admin'
title = 'Administration'
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/admin.html', c)
@decorator.gae_admin_required
def admin_site(request):
page = 'site'
title = 'Site Settings'
site_name = util.get_metadata('SITE_NAME')
tagline = util.get_metadata('TAGLINE')
post_name = util.get_metadata('POST_NAME')
if request.POST:
site_name = request.POST.get('site_name', None)
tagline = request.POST.get('tagline', None)
post_name = request.POST.get('post_name', None)
site_description = request.POST.get('site_description', None)
try:
validate.nonce(request, 'site')
admin_helper.validate_and_save_sitesettings(site_name, tagline, post_name, site_description)
except exception.ValidationError:
exception.handle_exception(request)
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/site.html', c)
@decorator.gae_admin_required
def admin_channel(request):
page = 'channel'
title = 'Channels Settings'
enable_channels = util.get_metadata('ENABLE_CHANNELS')
enable_channel_types = util.get_metadata('ENABLE_CHANNEL_TYPES')
if request.POST:
enable_channels = request.POST.get('enable_channels', False)
enable_channel_types = request.POST.get('enable_channel_types', False)
try:
validate.nonce(request, 'admin_channel')
validate.bool_checkbox(enable_channels)
validate.bool_checkbox(enable_channel_types)
util.set_metadata('ENABLE_CHANNELS', str(enable_channels), 0, {'type':'bool'})
util.set_metadata('ENABLE_CHANNEL_TYPES', str(enable_channel_types), 0, {'type':'bool'})
except exception.ValidationError:
exception.handle_exception(request)
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/channel.html', c)
@decorator.gae_admin_required
def admin_channel_list(request):
page = 'channel_list'
title = 'Channels'
page = util.paging_get_page(request)
offset = util.paging_get_offset(page, ITEMS_BY_PAGE)
filter = request.GET.get('filter', 'all')
#owner = api.actor_lookup_nick(request.user, util.get_owner(request))
new_link = '/admin/channels/new'
size, items = api.admin_get_channels(api.ROOT, ITEMS_BY_PAGE, offset, filter)
start, end, next, prev, first, last = util.paging(page, ITEMS_BY_PAGE, size)
base_url = '/admin/channels?'
if filter is not None:
filter_url = '&filter=%s' % filter
group_menuitem = 'channel'
menuitem = 'channel-list'
channel_types = api.get_config_values(api.ROOT, 'channel_type')
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/channel_list.html', c)
@decorator.gae_admin_required
def admin_channel_new(request):
page = 'channel_new'
title = 'Create a Channel'
if request.method == 'POST':
params = {
'nick': api.ROOT.nick,
'channel': request.POST.get('channel'),
'description': request.POST.get('description', ''),
'type':request.POST.get('type'),
'tags': request.POST.getlist('tags[]'),
}
channel_ref = api.channel_create(api.ROOT, **params)
if channel_ref is not None:
logging.info('Channel created %s' % channel_ref)
return util.RedirectFlash('/admin/channels', "Channel created successfully")
group_menuitem = 'channel'
menuitem = 'channel-new'
channel_types = api.get_config_values(api.ROOT, 'channel_type')
c = template.RequestContext(request, locals())
return render_to_response('administration/templates/channel_new.html', c)
@decorator.gae_admin_required
def admin_channel_enable(request, nick):
logging.info("admin_channel_enable")
nick = clean.channel(nick)
channel = api.channel_get_safe(api.ROOT, nick)
channel.enabled = True
channel.put()
logging.info("Channel %s" % channel.nick)
logging.info("Is enabled? %s" % channel.is_enabled())
return util.RedirectFlash('/admin/channels', "Channel has been enabled successfully")
@decorator.gae_admin_required
def admin_channel_disable(request, nick):
logging.info("admin_channel_disable")
nick = clean.channel(nick)
channel = api.channel_get_safe(api.ROOT, nick)
channel.enabled = False
channel.put()
logging.info("Channel %s" % channel.nick)
logging.info("Is enabled? %s" % channel.is_enabled())
return util.RedirectFlash('/admin/channels', "Channel has been disabled successfully")
@decorator.gae_admin_required
def admin_auto(request, action):
page = util.paging_get_page(request)
offset = util.paging_get_offset(page, ITEMS_BY_PAGE)
next = str(int(page)+1)
redirect_url = 'admin/auto/%s?page=%s' % (action, next)
action = "administration.actions.%s" % action
__import__(action)
action_call = sys.modules[action]
redirect, output = action_call.process(page, ITEMS_BY_PAGE, offset)
c = template.RequestContext(request, locals())
t = loader.get_template('administration/templates/auto.html')
return http.HttpResponse(t.render(c)) | CollabQ/CollabQ | administration/views.py | Python | apache-2.0 | 8,027 | 0.017815 |
# -*- coding: utf-8 -*-
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
# --------------------------------------------------------------------------
# Blender 2.5 Extensions Framework
# --------------------------------------------------------------------------
#
# Authors:
# Doug Hammond
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENCE BLOCK *****
#
"""
Pure logic and validation class.
By using a Subject object, and a dict of described logic tests, it
is possible to arrive at a True or False result for various purposes:
1. Data validation
2. UI control visibility
A Subject can be any object whose members are readable with getattr() :
class Subject(object):
a = 0
b = 1
c = 'foo'
d = True
e = False
f = 8
g = 'bar'
Tests are described thus:
Use the special list types Logic_AND and Logic_OR to describe
combinations of values and other members. Use Logic_Operator for
numerical comparison.
With regards to Subject, each of these evaluate to True:
TESTA = {
'a': 0,
'c': Logic_OR([ 'foo', 'bar' ]),
'd': Logic_AND([True, True]),
'f': Logic_AND([8, {'b': 1}]),
'e': {'b': Logic_Operator({'gte':1, 'lt':3}) },
'g': Logic_OR([ 'baz', Logic_AND([{'b': 1}, {'f': 8}]) ])
}
With regards to Subject, each of these evaluate to False:
TESTB = {
'a': 'foo',
'c': Logic_OR([ 'bar', 'baz' ]),
'd': Logic_AND([ True, 'foo' ]),
'f': Logic_AND([9, {'b': 1}]),
'e': {'b': Logic_Operator({'gte':-10, 'lt': 1}) },
'g': Logic_OR([ 'baz', Logic_AND([{'b':0}, {'f': 8}]) ])
}
With regards to Subject, this test is invalid
TESTC = {
'n': 0
}
Tests are executed thus:
S = Subject()
L = Logician(S)
L.execute(TESTA)
"""
class Logic_AND(list):
pass
class Logic_OR(list):
pass
class Logic_Operator(dict):
pass
class Logician(object):
"""Given a subject and a dict that describes tests to perform on
its members, this class will evaluate True or False results for
each member/test pair. See the examples below for test syntax.
"""
subject = None
def __init__(self, subject):
self.subject = subject
def get_member(self, member_name):
"""Get a member value from the subject object. Raise exception
if subject is None or member not found.
"""
if self.subject is None:
raise Exception('Cannot run tests on a subject which is None')
return getattr(self.subject, member_name)
def test_logic(self, member, logic, operator='eq'):
"""Find the type of test to run on member, and perform that test"""
if type(logic) is dict:
return self.test_dict(member, logic)
elif type(logic) is Logic_AND:
return self.test_and(member, logic)
elif type(logic) is Logic_OR:
return self.test_or(member, logic)
elif type(logic) is Logic_Operator:
return self.test_operator(member, logic)
else:
# compare the value, I think using Logic_Operator() here
# allows completeness in test_operator(), but I can't put
# my finger on why for the minute
return self.test_operator(member,
Logic_Operator({operator: logic}))
def test_operator(self, member, value):
"""Execute the operators contained within value and expect that
ALL operators are True
"""
# something in this method is incomplete, what if operand is
# a dict, Logic_AND, Logic_OR or another Logic_Operator ?
# Do those constructs even make any sense ?
result = True
for operator, operand in value.items():
operator = operator.lower().strip()
if operator in ['eq', '==']:
result &= member==operand
if operator in ['not', '!=']:
result &= member!=operand
if operator in ['lt', '<']:
result &= member<operand
if operator in ['lte', '<=']:
result &= member<=operand
if operator in ['gt', '>']:
result &= member>operand
if operator in ['gte', '>=']:
result &= member>=operand
if operator in ['and', '&']:
result &= member&operand
if operator in ['or', '|']:
result &= member|operand
if operator in ['len']:
result &= len(member)==operand
# I can think of some more, but they're probably not useful.
return result
def test_or(self, member, logic):
"""Member is a value, logic is a set of values, ANY of which
can be True
"""
result = False
for test in logic:
result |= self.test_logic(member, test)
return result
def test_and(self, member, logic):
"""Member is a value, logic is a list of values, ALL of which
must be True
"""
result = True
for test in logic:
result &= self.test_logic(member, test)
return result
def test_dict(self, member, logic):
"""Member is a value, logic is a dict of other members to
compare to. All other member tests must be True
"""
result = True
for other_member, test in logic.items():
result &= self.test_logic(self.get_member(other_member), test)
return result
def execute(self, test):
"""Subject is an object, test is a dict of {member: test} pairs
to perform on subject's members. Wach key in test is a member
of subject.
"""
for member_name, logic in test.items():
result = self.test_logic(self.get_member(member_name), logic)
print('member %s is %s' % (member_name, result))
# A couple of name aliases
class Validation(Logician):
pass
class Visibility(Logician):
pass
| Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/modules/extensions_framework/validate.py | Python | gpl-3.0 | 6,509 | 0.002612 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.providers.amazon.aws.operators.glue_crawler import GlueCrawlerOperator
mock_crawler_name = 'test-crawler'
mock_role_name = 'test-role'
mock_config = {
'Name': mock_crawler_name,
'Description': 'Test glue crawler from Airflow',
'DatabaseName': 'test_db',
'Role': mock_role_name,
'Targets': {
'S3Targets': [
{
'Path': 's3://test-glue-crawler/foo/',
'Exclusions': [
's3://test-glue-crawler/bar/',
],
'ConnectionName': 'test-s3-conn',
}
],
'JdbcTargets': [
{
'ConnectionName': 'test-jdbc-conn',
'Path': 'test_db/test_table>',
'Exclusions': [
'string',
],
}
],
'MongoDBTargets': [
{'ConnectionName': 'test-mongo-conn', 'Path': 'test_db/test_collection', 'ScanAll': True}
],
'DynamoDBTargets': [{'Path': 'test_db/test_table', 'scanAll': True, 'scanRate': 123.0}],
'CatalogTargets': [
{
'DatabaseName': 'test_glue_db',
'Tables': [
'test',
],
}
],
},
'Classifiers': ['test-classifier'],
'TablePrefix': 'test',
'SchemaChangePolicy': {
'UpdateBehavior': 'UPDATE_IN_DATABASE',
'DeleteBehavior': 'DEPRECATE_IN_DATABASE',
},
'RecrawlPolicy': {'RecrawlBehavior': 'CRAWL_EVERYTHING'},
'LineageConfiguration': 'ENABLE',
'Configuration': """
{
"Version": 1.0,
"CrawlerOutput": {
"Partitions": { "AddOrUpdateBehavior": "InheritFromTable" }
}
}
""",
'SecurityConfiguration': 'test',
'Tags': {'test': 'foo'},
}
class TestGlueCrawlerOperator(unittest.TestCase):
def setUp(self):
self.glue = GlueCrawlerOperator(task_id='test_glue_crawler_operator', config=mock_config)
@mock.patch('airflow.providers.amazon.aws.operators.glue_crawler.GlueCrawlerHook')
def test_execute_without_failure(self, mock_hook):
mock_hook.return_value.has_crawler.return_value = True
self.glue.execute({})
mock_hook.assert_has_calls(
[
mock.call('aws_default'),
mock.call().has_crawler('test-crawler'),
mock.call().update_crawler(**mock_config),
mock.call().start_crawler(mock_crawler_name),
mock.call().wait_for_crawler_completion(crawler_name=mock_crawler_name, poll_interval=5),
]
)
| Acehaidrey/incubator-airflow | tests/providers/amazon/aws/operators/test_glue_crawler.py | Python | apache-2.0 | 3,458 | 0.001735 |
import json
import unittest
from boto.s3.key import Key
from mock import create_autospec, Mock, call
from arbalest.s3 import Bucket
from arbalest.pipeline import S3SortedDataSources
def mock_key(name):
return Key(Mock(), name)
class S3SortedDataSourcesShould(unittest.TestCase):
def setUp(self):
parents = ['event.entity.created/2014-11-03/',
'event.entity.created/2014-11-04/',
'event.entity.created/2014-11-05/',
'event.entity.created/2014-11-06/',
'event.entity.created/2014-11-07/']
first_children = ['event.entity.created/2014-11-04/00/',
'event.entity.created/2014-11-04/01/']
second_children = ['event.entity.created/2014-11-05/00/']
self.bucket = create_autospec(Bucket)
self.bucket.list = Mock(
side_effect=[[mock_key(key) for key in parents],
[mock_key(key) for key in first_children],
[mock_key(key) for key in second_children]])
def test_have_source_journal_key(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
self.assertEqual('/event.entity.created_source_journal.json',
source.source_journal_key)
def test_get_all_dates_as_sources(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
self.assertEqual(['event.entity.created/2014-11-03',
'event.entity.created/2014-11-04',
'event.entity.created/2014-11-05',
'event.entity.created/2014-11-06',
'event.entity.created/2014-11-07'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_as_sources_with_empty_dates(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
'', '')
self.assertEqual(['event.entity.created/2014-11-03',
'event.entity.created/2014-11-04',
'event.entity.created/2014-11-05',
'event.entity.created/2014-11-06',
'event.entity.created/2014-11-07'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_including_and_after_start_date_as_sources(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
'2014-11-04')
self.assertEqual(['event.entity.created/2014-11-04',
'event.entity.created/2014-11-05',
'event.entity.created/2014-11-06',
'event.entity.created/2014-11-07'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_including_and_before_end_date_as_sources(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
end='2014-11-05')
self.assertEqual(['event.entity.created/2014-11-03',
'event.entity.created/2014-11-04',
'event.entity.created/2014-11-05'],
list(source.get()))
self.bucket.list.assert_called_once_with(source.source + '/', '/')
def test_get_all_dates_including_and_between_start_and_end_date_as_sources(
self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket,
start='2014-11-04/01',
end='2014-11-06')
self.assertEqual(['event.entity.created/2014-11-04/01',
'event.entity.created/2014-11-05/00',
'event.entity.created/2014-11-06'],
list(source.get()))
self.bucket.list.assert_has_calls(
[call(source.source + '/', '/'),
call('event.entity.created/2014-11-04/', '/'),
call('event.entity.created/2014-11-05/', '/'),
call('event.entity.created/2014-11-06/', '/')])
def test_committed(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
key = mock_key(source.source_journal_key)
key.exists = Mock(return_value=True)
source.bucket.get = Mock(return_value=key)
self.assertEqual(True, source.committed().exists())
def test_commit(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
committed_point = '2014-11-04'
source.commit(committed_point)
self.bucket.save.assert_called_once_with(source.source_journal_key,
json.dumps({
'committed': committed_point}))
def test_rollback(self):
source = S3SortedDataSources('', 'event.entity.created', self.bucket)
source.rollback()
self.bucket.delete.assert_called_once_with(source.source_journal_key)
| Dwolla/arbalest | test/pipeline/test_s3_sorted_data_sources.py | Python | mit | 5,292 | 0.000189 |
#!/usr/bin/env python
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
import math, os, platform, random, re, sys, time, threading, traceback
import ProgressBar
import TestRunner
import Util
import LitConfig
import Test
import lit.discovery
class TestingProgressDisplay:
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
self.lock = threading.Lock()
self.progressBar = progressBar
self.completed = 0
def update(self, test):
# Avoid locking overhead in quiet mode
if self.opts.quiet and not test.result.isFailure:
self.completed += 1
return
# Output lock.
self.lock.acquire()
try:
self.handleUpdate(test)
finally:
self.lock.release()
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
def handleUpdate(self, test):
self.completed += 1
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
if self.opts.succinct and not test.result.isFailure:
return
if self.progressBar:
self.progressBar.clear()
print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
self.completed, self.numTests)
if test.result.isFailure and self.opts.showOutput:
print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20)
print test.output
print "*" * 20
sys.stdout.flush()
class TestProvider:
def __init__(self, tests, maxTime):
self.maxTime = maxTime
self.iter = iter(tests)
self.lock = threading.Lock()
self.startTime = time.time()
def get(self):
# Check if we have run out of time.
if self.maxTime is not None:
if time.time() - self.startTime > self.maxTime:
return None
# Otherwise take the next test.
self.lock.acquire()
try:
item = self.iter.next()
except StopIteration:
item = None
self.lock.release()
return item
class Tester(threading.Thread):
def __init__(self, litConfig, provider, display):
threading.Thread.__init__(self)
self.litConfig = litConfig
self.provider = provider
self.display = display
def run(self):
while 1:
item = self.provider.get()
if item is None:
break
self.runTest(item)
def runTest(self, test):
result = None
startTime = time.time()
try:
result, output = test.config.test_format.execute(test,
self.litConfig)
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print '\nCtrl-C detected, goodbye.'
os.kill(0,9)
except:
if self.litConfig.debug:
raise
result = Test.UNRESOLVED
output = 'Exception during script execution:\n'
output += traceback.format_exc()
output += '\n'
elapsed = time.time() - startTime
test.setResult(result, output, elapsed)
self.display.update(test)
def runTests(numThreads, litConfig, provider, display):
# If only using one testing thread, don't use threads at all; this lets us
# profile, among other things.
if numThreads == 1:
t = Tester(litConfig, provider, display)
t.run()
return
# Otherwise spin up the testing threads and wait for them to finish.
testers = [Tester(litConfig, provider, display)
for i in range(numThreads)]
for t in testers:
t.start()
try:
for t in testers:
t.join()
except KeyboardInterrupt:
sys.exit(2)
def main(builtinParameters = {}):
# Bump the GIL check interval, its more important to get any one thread to a
# blocking operation (hopefully exec) than to try and unblock other threads.
#
# FIXME: This is a hack.
import sys
sys.setcheckinterval(1000)
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {file-or-path}")
parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, action="store", default=None)
parser.add_option("", "--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
parser.add_option("", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
group = OptionGroup(parser, "Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
group.add_option("-q", "--quiet", dest="quiet",
help="Suppress no error output",
action="store_true", default=False)
group.add_option("-s", "--succinct", dest="succinct",
help="Reduce amount of output",
action="store_true", default=False)
group.add_option("-v", "--verbose", dest="showOutput",
help="Show all test output",
action="store_true", default=False)
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Execution")
group.add_option("", "--path", dest="path",
help="Additional paths to add to testing environment",
action="append", type=str, default=[])
group.add_option("", "--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
group.add_option("", "--time-tests", dest="timeTests",
help="Track elapsed wall time for each test",
action="store_true", default=False)
group.add_option("", "--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Selection")
group.add_option("", "--max-tests", dest="maxTests", metavar="N",
help="Maximum number of tests to run",
action="store", type=int, default=None)
group.add_option("", "--max-time", dest="maxTime", metavar="N",
help="Maximum time to spend testing (in seconds)",
action="store", type=float, default=None)
group.add_option("", "--shuffle", dest="shuffle",
help="Run tests in random order",
action="store_true", default=False)
group.add_option("", "--filter", dest="filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"regular expression"),
action="store", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Debug and Experimental Options")
group.add_option("", "--debug", dest="debug",
help="Enable debugging (for 'lit' development)",
action="store_true", default=False)
group.add_option("", "--show-suites", dest="showSuites",
help="Show discovered test suites",
action="store_true", default=False)
group.add_option("", "--repeat", dest="repeatTests", metavar="N",
help="Repeat tests N times (for timing)",
action="store", default=None, type=int)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if not args:
parser.error('No inputs specified')
if opts.numThreads is None:
# Python <2.5 has a race condition causing lit to always fail with numThreads>1
# http://bugs.python.org/issue1731717
# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
# threads by default there.
if sys.hexversion >= 0x2050200:
opts.numThreads = Util.detectCPUs()
else:
opts.numThreads = 1
inputs = args
# Create the user defined parameters.
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,val = entry,''
else:
name,val = entry.split('=', 1)
userParams[name] = val
# Create the global config object.
litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
path = opts.path,
quiet = opts.quiet,
useValgrind = opts.useValgrind,
valgrindLeakCheck = opts.valgrindLeakCheck,
valgrindArgs = opts.valgrindArgs,
noExecute = opts.noExecute,
ignoreStdErr = False,
debug = opts.debug,
isWindows = (platform.system()=='Windows'),
params = userParams,
config_prefix = opts.configPrefix)
tests = lit.discovery.find_tests_for_inputs(litConfig, inputs)
if opts.showSuites:
suitesAndTests = {}
for t in tests:
if t.suite not in suitesAndTests:
suitesAndTests[t.suite] = []
suitesAndTests[t.suite].append(t)
print '-- Test Suites --'
suitesAndTests = suitesAndTests.items()
suitesAndTests.sort(key = lambda (ts,_): ts.name)
for ts,ts_tests in suitesAndTests:
print ' %s - %d tests' %(ts.name, len(ts_tests))
print ' Source Root: %s' % ts.source_root
print ' Exec Root : %s' % ts.exec_root
# Select and order the tests.
numTotalTests = len(tests)
# First, select based on the filter expression if given.
if opts.filter:
try:
rex = re.compile(opts.filter)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
tests = [t for t in tests
if rex.search(t.getFullName())]
# Then select the order.
if opts.shuffle:
random.shuffle(tests)
else:
tests.sort(key = lambda t: t.getFullName())
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
tests = tests[:opts.maxTests]
# Don't create more threads than tests.
opts.numThreads = min(len(tests), opts.numThreads)
extra = ''
if len(tests) != numTotalTests:
extra = ' of %d' % numTotalTests
header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
opts.numThreads)
if opts.repeatTests:
tests = [t.copyWithIndex(i)
for t in tests
for i in range(opts.repeatTests)]
progressBar = None
if not opts.quiet:
if opts.succinct and opts.useProgressBar:
try:
tc = ProgressBar.TerminalController()
progressBar = ProgressBar.ProgressBar(tc, header)
except ValueError:
print header
progressBar = ProgressBar.SimpleProgressBar('Testing: ')
else:
print header
startTime = time.time()
display = TestingProgressDisplay(opts, len(tests), progressBar)
provider = TestProvider(tests, opts.maxTime)
runTests(opts.numThreads, litConfig, provider, display)
display.finish()
if not opts.quiet:
print 'Testing Time: %.2fs'%(time.time() - startTime)
# Update results for any tests which weren't run.
for t in tests:
if t.result is None:
t.setResult(Test.UNRESOLVED, '', 0.0)
# List test results organized by kind.
hasFailures = False
byCode = {}
for t in tests:
if t.result not in byCode:
byCode[t.result] = []
byCode[t.result].append(t)
if t.result.isFailure:
hasFailures = True
# FIXME: Show unresolved and (optionally) unsupported tests.
for title,code in (('Unexpected Passing Tests', Test.XPASS),
('Failing Tests', Test.FAIL)):
elts = byCode.get(code)
if not elts:
continue
print '*'*20
print '%s (%d):' % (title, len(elts))
for t in elts:
print ' %s' % t.getFullName()
print
if opts.timeTests:
# Collate, in case we repeated tests.
times = {}
for t in tests:
key = t.getFullName()
times[key] = times.get(key, 0.) + t.elapsed
byTime = list(times.items())
byTime.sort(key = lambda (name,elapsed): elapsed)
if byTime:
Util.printHistogram(byTime, title='Tests')
for name,code in (('Expected Passes ', Test.PASS),
('Expected Failures ', Test.XFAIL),
('Unsupported Tests ', Test.UNSUPPORTED),
('Unresolved Tests ', Test.UNRESOLVED),
('Unexpected Passes ', Test.XPASS),
('Unexpected Failures', Test.FAIL),):
if opts.quiet and not code.isFailure:
continue
N = len(byCode.get(code,[]))
if N:
print ' %s: %d' % (name,N)
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
sys.exit(2)
# Warn about warnings.
if litConfig.numWarnings:
print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
if hasFailures:
sys.exit(1)
sys.exit(0)
if __name__=='__main__':
main()
| dbrumley/recfi | llvm-3.3/utils/lit/lit/main.py | Python | mit | 15,230 | 0.00499 |
# This file is part of HDL Checker.
#
# Copyright (c) 2015 - 2019 suoto (Andre Souto)
#
# HDL Checker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HDL Checker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDL Checker. If not, see <http://www.gnu.org/licenses/>.
"HDL Checker installation script"
import setuptools # type: ignore
import versioneer
LONG_DESCRIPTION = open("README.md", "rb").read().decode(encoding='utf8', errors='replace')
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: GNU General Public License v3 (GPLv3)
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Topic :: Software Development
Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)
Topic :: Text Editors :: Integrated Development Environments (IDE)
"""
setuptools.setup(
name = 'hdl_checker',
version = versioneer.get_version(),
description = 'HDL code checker',
long_description = LONG_DESCRIPTION,
long_description_content_type = "text/markdown",
author = 'Andre Souto',
author_email = 'andre820@gmail.com',
url = 'https://github.com/suoto/hdl_checker',
license = 'GPLv3',
keywords = 'VHDL Verilog SystemVerilog linter LSP language server protocol vimhdl vim-hdl',
platforms = 'any',
packages = setuptools.find_packages(),
install_requires = ['argcomplete',
'argparse',
'backports.functools_lru_cache; python_version<"3.2"',
'bottle>=0.12.9',
'enum34>=1.1.6; python_version<"3.3"',
'future>=0.14.0',
'futures; python_version<"3.2"',
'prettytable>=0.7.2',
'pygls==0.9.1',
'requests>=2.20.0',
'six>=1.10.0',
'tabulate>=0.8.5',
'typing>=3.7.4',
'waitress>=0.9.0', ],
cmdclass = versioneer.get_cmdclass(),
entry_points = {
'console_scripts' : ['hdl_checker=hdl_checker.server:main', ]
},
classifiers=CLASSIFIERS.splitlines(),
)
| suoto/hdlcc | setup.py | Python | gpl-3.0 | 3,321 | 0.014453 |
#!/usr/pkg/bin/python
import os, sys, time
from reportlab.graphics.barcode.common import *
from reportlab.graphics.barcode.code39 import *
from reportlab.graphics.barcode.code93 import *
from reportlab.graphics.barcode.code128 import *
from reportlab.graphics.barcode.usps import *
from reportlab.graphics.barcode.usps4s import USPS_4State
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, Preformatted, PageBreak
from reportlab.lib.units import inch, cm
from reportlab.lib import colors
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.frames import Frame
from reportlab.platypus.flowables import XBox, KeepTogether
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.barcode import getCodes, getCodeNames, createBarcodeDrawing, createBarcodeImageInMemory
def run():
styles = getSampleStyleSheet()
styleN = styles['Normal']
styleH = styles['Heading1']
story = []
#for codeNames in code
story.append(Paragraph('I2of5', styleN))
story.append(I2of5(1234, barWidth = inch*0.02, checksum=0))
story.append(Paragraph('MSI', styleN))
story.append(MSI(1234))
story.append(Paragraph('Codabar', styleN))
story.append(Codabar("A012345B", barWidth = inch*0.02))
story.append(Paragraph('Code 11', styleN))
story.append(Code11("01234545634563"))
story.append(Paragraph('Code 39', styleN))
story.append(Standard39("A012345B%R"))
story.append(Paragraph('Extended Code 39', styleN))
story.append(Extended39("A012345B}"))
story.append(Paragraph('Code93', styleN))
story.append(Standard93("CODE 93"))
story.append(Paragraph('Extended Code93', styleN))
story.append(Extended93("L@@K! Code 93 :-)")) #, barWidth=0.005 * inch))
story.append(Paragraph('Code 128', styleN))
c=Code128("AB-12345678") #, barWidth=0.005 * inch)
#print 'WIDTH =', (c.width / inch), 'barWidth =', (c.barWidth / inch)
#print 'LQ =', (c.lquiet / inch), 'RQ =', (c.rquiet / inch)
story.append(c)
story.append(Paragraph('USPS FIM', styleN))
story.append(FIM("A"))
story.append(Paragraph('USPS POSTNET', styleN))
story.append(POSTNET('78247-1043'))
story.append(Paragraph('USPS 4 State', styleN))
story.append(USPS_4State('01234567094987654321','01234567891'))
from reportlab.graphics.barcode import createBarcodeDrawing
story.append(Paragraph('EAN13', styleN))
bcd = createBarcodeDrawing('EAN13', value='123456789012')
story.append(bcd)
story.append(Paragraph('EAN8', styleN))
bcd = createBarcodeDrawing('EAN8', value='1234567')
story.append(bcd)
story.append(Paragraph('UPCA', styleN))
bcd = createBarcodeDrawing('UPCA', value='03600029145')
story.append(bcd)
story.append(Paragraph('USPS_4State', styleN))
bcd = createBarcodeDrawing('USPS_4State', value='01234567094987654321',routing='01234567891')
story.append(bcd)
story.append(Paragraph('Label Size', styleN))
story.append(XBox((2.0 + 5.0/8.0)*inch, 1 * inch, '1x2-5/8"'))
story.append(Paragraph('Label Size', styleN))
story.append(XBox((1.75)*inch, .5 * inch, '1/2x1-3/4"'))
c = Canvas('out.pdf')
f = Frame(inch, inch, 6*inch, 9*inch, showBoundary=1)
f.addFromList(story, c)
c.save()
print 'saved out.pdf'
def fullTest(fileName="test_full.pdf"):
"""Creates large-ish test document with a variety of parameters"""
story = []
styles = getSampleStyleSheet()
styleN = styles['Normal']
styleH = styles['Heading1']
styleH2 = styles['Heading2']
story = []
story.append(Paragraph('ReportLab Barcode Test Suite - full output', styleH))
story.append(Paragraph('Generated on %s' % time.ctime(time.time()), styleN))
story.append(Paragraph('', styleN))
story.append(Paragraph('Repository information for this build:', styleN))
#see if we can figure out where it was built, if we're running in source
if os.path.split(os.getcwd())[-1] == 'barcode' and os.path.isdir('.svn'):
#runnning in a filesystem svn copy
infoLines = os.popen('svn info').read()
story.append(Preformatted(infoLines, styles["Code"]))
story.append(Paragraph('About this document', styleH2))
story.append(Paragraph('History and Status', styleH2))
story.append(Paragraph("""
This is the test suite and docoumentation for the ReportLab open source barcode API,
being re-released as part of the forthcoming ReportLab 2.0 release.
""", styleN))
story.append(Paragraph("""
Several years ago Ty Sarna contributed a barcode module to the ReportLab community.
Several of the codes were used by him in hiw work and to the best of our knowledge
this was correct. These were written as flowable objects and were available in PDFs,
but not in our graphics framework. However, we had no knowledge of barcodes ourselves
and did not advertise or extend the package.
""", styleN))
story.append(Paragraph("""
We "wrapped" the barcodes to be usable within our graphics framework; they are now available
as Drawing objects which can be rendered to EPS files or bitmaps. For the last 2 years this
has been available in our Diagra and Report Markup Language products. However, we did not
charge separately and use was on an "as is" basis.
""", styleN))
story.append(Paragraph("""
A major licensee of our technology has kindly agreed to part-fund proper productisation
of this code on an open source basis in Q1 2006. This has involved addition of EAN codes
as well as a proper testing program. Henceforth we intend to publicise the code more widely,
gather feedback, accept contributions of code and treat it as "supported".
""", styleN))
story.append(Paragraph("""
This involved making available both downloads and testing resources. This PDF document
is the output of the current test suite. It contains codes you can scan (if you use a nice sharp
laser printer!), and will be extended over coming weeks to include usage examples and notes on
each barcode and how widely tested they are. This is being done through documentation strings in
the barcode objects themselves so should always be up to date.
""", styleN))
story.append(Paragraph('Usage examples', styleH2))
story.append(Paragraph("""
To be completed
""", styleN))
story.append(Paragraph('The codes', styleH2))
story.append(Paragraph("""
Below we show a scannable code from each barcode, with and without human-readable text.
These are magnified about 2x from the natural size done by the original author to aid
inspection. This will be expanded to include several test cases per code, and to add
explanations of checksums. Be aware that (a) if you enter numeric codes which are too
short they may be prefixed for you (e.g. "123" for an 8-digit code becomes "00000123"),
and that the scanned results and readable text will generally include extra checksums
at the end.
""", styleN))
codeNames = getCodeNames()
from reportlab.lib.utils import flatten
width = [float(x[8:]) for x in sys.argv if x.startswith('--width=')]
height = [float(x[9:]) for x in sys.argv if x.startswith('--height=')]
isoScale = [int(x[11:]) for x in sys.argv if x.startswith('--isoscale=')]
options = {}
if width: options['width'] = width[0]
if height: options['height'] = height[0]
if isoScale: options['isoScale'] = isoScale[0]
scales = [x[8:].split(',') for x in sys.argv if x.startswith('--scale=')]
scales = map(float,scales and flatten(scales) or [1])
scales = map(float,scales and flatten(scales) or [1])
for scale in scales:
story.append(PageBreak())
story.append(Paragraph('Scale = %.1f'%scale, styleH2))
story.append(Spacer(36, 12))
for codeName in codeNames:
s = [Paragraph('Code: ' + codeName, styleH2)]
for hr in (0,1):
s.append(Spacer(36, 12))
dr = createBarcodeDrawing(codeName, humanReadable=hr,**options)
dr.renderScale = scale
s.append(dr)
s.append(Spacer(36, 12))
s.append(Paragraph('Barcode should say: ' + dr._bc.value, styleN))
story.append(KeepTogether(s))
SimpleDocTemplate(fileName).build(story)
print 'created', fileName
if __name__=='__main__':
run()
fullTest()
def createSample(name,memory):
f = open(name,'wb')
f.write(memory)
f.close()
createSample('test_cbcim.png',createBarcodeImageInMemory('EAN13', value='123456789012'))
createSample('test_cbcim.gif',createBarcodeImageInMemory('EAN8', value='1234567', format='gif'))
createSample('test_cbcim.pdf',createBarcodeImageInMemory('UPCA', value='03600029145',format='pdf'))
createSample('test_cbcim.tiff',createBarcodeImageInMemory('USPS_4State', value='01234567094987654321',routing='01234567891',format='tiff'))
| TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/thirdparty/src/reportlab/graphics/barcode/test.py | Python | gpl-3.0 | 9,268 | 0.007769 |
# -*- coding: utf-8 -*-
' 检查扩展名是否合法 '
__author__ = 'Ellery'
from app import app
import datetime, random
from PIL import Image
import os
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config.get('ALLOWED_EXTENSIONS')
def unique_name():
now_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
random_num = random.randint(0, 100)
if random_num <= 10:
random_num = str(0) + str(random_num)
unique_num = str(now_time) + str(random_num)
return unique_num
def image_thumbnail(filename):
filepath = os.path.join(app.config.get('UPLOAD_FOLDER'), filename)
im = Image.open(filepath)
w, h = im.size
if w > h:
im.thumbnail((106, 106*h/w))
else:
im.thumbnail((106*w/h, 106))
im.save(os.path.join(app.config.get('UPLOAD_FOLDER'),
os.path.splitext(filename)[0] + '_thumbnail' + os.path.splitext(filename)[1]))
def image_delete(filename):
thumbnail_filepath = os.path.join(app.config.get('UPLOAD_FOLDER'), filename)
filepath = thumbnail_filepath.replace('_thumbnail', '')
os.remove(filepath)
os.remove(thumbnail_filepath)
def cut_image(filename, box):
filepath = os.path.join(app.config.get('UPLOAD_AVATAR_FOLDER'), filename)
im = Image.open(filepath)
new_im = im.crop(box)
new_im.save(os.path.join(app.config.get('UPLOAD_AVATAR_FOLDER'), filename)) | allotory/basilinna | app/main/upload_file.py | Python | mit | 1,433 | 0.007774 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Easy AVR USB Keyboard Firmware Keymapper
# Copyright (C) 2018 David Howland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module contains code to load legacy user save data."""
import pickle
import re
from .build import NUM_MACROS, NULL_SYMBOL, key_mode_map, led_modes, led_assignments
from .scancodes import scancodes
from .userdata import Map
legacy_layers = ["Default", "Layer 1", "Layer 2", "Layer 3", "Layer 4",
"Layer 5", "Layer 6", "Layer 7", "Layer 8", "Layer 9"]
class LegacySaveFileException(Exception):
"""Raised when an error is encountered while loading a legacy layout file."""
pass
def load_legacy(user_data, datfile):
"""Load the legacy .dat save file from the path given by `datfile` and populate
the UserData object given by `user_data`.
"""
legacy_data = open_legacy(datfile)
convert_legacy(user_data, legacy_data)
def open_legacy(datfile):
"""Opens and decodes the pickled data in a legacy .dat save file. `datfile`
is a path to the file. The function returns a dictionary with an item for each
component of the legacy file.
"""
with open(datfile, 'rb') as fdin:
data = pickle.load(fdin)
if len(data) < 12:
raise LegacySaveFileException("The .dat file is either broken or too old.")
unique_id = data[1]
maps = data[2]
macros = data[3]
actions = data[4]
modes = data[5]
wmods = data[6]
layout_mod = data[8]
leds = data[9]
if len(data) > 11:
advancedleds = data[11]
useadvancedleds = data[12]
else:
advancedleds = [(255, 0)] * len(led_assignments)
useadvancedleds = False
if len(data) > 13:
ledlayers = data[13]
else:
ledlayers = [0, 0, 0, 0, 0]
# fixes for older versions (renamed layers)
for kmap in (maps, actions, modes, wmods):
if 'Fn' in kmap:
kmap['Layer 1'] = kmap['Fn']
del kmap['Fn']
# fixes for older versions (renamed/removed scancodes)
for layer in maps:
for row in maps[layer]:
for i, k in enumerate(row):
if k == "SCANCODE_DEBUG":
row[i] = "SCANCODE_CONFIG"
elif k == "SCANCODE_LOCKINGCAPS":
row[i] = "HID_KEYBOARD_SC_LOCKING_CAPS_LOCK"
elif k == "SCANCODE_FN":
row[i] = "SCANCODE_FN1"
elif k not in scancodes:
row[i] = NULL_SYMBOL
# fixes for older versions (renamed leds)
leds = ['Any Fn Active' if (x == 'Fn Lock') else x for x in leds]
leds = ['Fn1 Active' if (x == 'Fn Active') else x for x in leds]
# fixes for older versions (added macros)
extention = NUM_MACROS - len(macros)
if extention > 0:
macros.extend([''] * extention)
return {
'unique_id': unique_id,
'layout_mod': layout_mod,
'maps': maps,
'actions': actions,
'modes': modes,
'wmods': wmods,
'macros': macros,
'leds': leds,
'advancedleds': advancedleds,
'useadvancedleds': useadvancedleds,
'ledlayers': ledlayers,
}
def convert_legacy(user_data, legacy_data):
"""Converts the data from a legacy save file into a `user_data` object. `user_data`
should be a fresh instance of UserData and `legacy_data` is the output from a
successful call to open_legacy().
"""
# can't save to legacy file
user_data.path = None
# get good defaults to start from
user_data.new(legacy_data['unique_id'], legacy_data['layout_mod'])
# transmogrify the keymap data
for li, layer in enumerate(legacy_layers):
for ri, rowdef in enumerate(user_data.config.keyboard_definition):
if isinstance(rowdef, int):
continue
for ci, keydef in enumerate(rowdef):
keydim, matrix, _ = keydef
if user_data.layout_mod:
mod_map = user_data.config.alt_layouts[user_data.layout_mod]
keydim = mod_map.get((ri, ci), keydim)
if isinstance(keydim, tuple) and isinstance(matrix, tuple):
row, col = matrix
map = Map(legacy_data['maps'][layer][ri][ci],
key_mode_map[legacy_data['modes'][layer][ri][ci]],
legacy_data['actions'][layer][ri][ci],
legacy_data['wmods'][layer][ri][ci])
user_data.keymap[li][row][col] = map
# translate the macro data
user_data.macros = [translate_macro(macro) for macro in legacy_data['macros']]
# adapt the led data
user_data.led_modes = []
for old_assignment in legacy_data['leds']:
if old_assignment == 'Backlight':
user_data.led_modes.append(led_modes.index('Backlight'))
elif old_assignment in led_assignments:
user_data.led_modes.append(led_modes.index('Indicator'))
else:
user_data.led_modes.append(led_modes.index('Disabled'))
if legacy_data['useadvancedleds']:
for i, func in enumerate(legacy_data['advancedleds']):
led_id, _ = func
if led_id < len(user_data.led_modes):
user_data.led_modes[led_id] = led_modes.index('Indicator')
user_data.led_funcs[i] = func
# copy the rest
user_data.led_layers = legacy_data['ledlayers']
def translate_macro(input):
"""Translate the escape sequences in the original macro mini-language into
the equivalent representations in the new macro mini-language.
"""
# remove the special characters
input = input.replace("\\\\,", "\\")
input = input.replace("\\n,", "\n")
input = input.replace("\\t,", "\t")
# escape any $ symbols
input = input.replace("$", "$$")
# convert keyword format
input = re.sub(r'\\([A-Z0-9_]+\()', r'$\1', input)
# convert function/mod format
input = re.sub(r'\\([A-Z0-9_]+),', r'${\1}', input)
return input
| dhowland/EasyAVR | keymapper/easykeymap/legacy.py | Python | gpl-2.0 | 6,758 | 0.001776 |
# =============================================================================
# Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/
# FileName: 10433.py
# Description: UVa Online Judge - 10433
# =============================================================================
while True:
try:
str_N = input()
except EOFError:
break
N = int(str_N)
N2 = N * N
str_N2 = str(N2)
len_N = len(str_N)
if str_N2[-len_N:] == str_N:
print("Automorphic number of {}-digit.".format(len_N))
else:
print("Not an Automorphic number.")
| mjenrungrot/competitive_programming | UVa Online Judge/v104/10433.py | Python | mit | 619 | 0 |
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import copy
import matplotlib
from grid_cell_model.submitting import flagparse
import noisefigs
from noisefigs.env import NoiseEnvironment
import config
parser = flagparse.FlagParser()
parser.add_flag('--gammaSweep')
args = parser.parse_args()
env = NoiseEnvironment(user_config=config.get_config())
if args.gammaSweep or args.all:
env.register_plotter(noisefigs.plotters.GammaSweepsPlotter)
env.plot()
| MattNolanLab/ei-attractor | grid_cell_model/simulations/007_noise/figures/cosyne2015-abstract/figure_gamma.py | Python | gpl-3.0 | 492 | 0 |
############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
############################################################################
from django.contrib.messages import get_messages, SUCCESS
from django.test import TestCase
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from waffle.testutils import override_flag
from base.models.enums.entity_type import FACULTY
from base.models.enums.learning_container_year_types import EXTERNAL
from base.models.enums.organization_type import MAIN
from base.tests.factories.academic_calendar import generate_learning_unit_edition_calendars
from base.tests.factories.academic_year import create_current_academic_year
from base.tests.factories.entity import EntityWithVersionFactory
from base.tests.factories.external_learning_unit_year import ExternalLearningUnitYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFullFactory
from base.tests.factories.person import PersonFactory
from base.tests.forms.test_external_learning_unit import get_valid_external_learning_unit_form_data
from base.views.learning_units.update import update_learning_unit
from learning_unit.tests.factories.central_manager import CentralManagerFactory
@override_flag('learning_unit_update', active=True)
class TestUpdateExternalLearningUnitView(TestCase):
@classmethod
def setUpTestData(cls):
cls.entity = EntityWithVersionFactory(organization__type=MAIN, version__entity_type=FACULTY)
cls.manager = CentralManagerFactory(entity=cls.entity, with_child=True)
cls.person = cls.manager.person
cls.academic_year = create_current_academic_year()
generate_learning_unit_edition_calendars([cls.academic_year])
cls.luy = LearningUnitYearFullFactory(
academic_year=cls.academic_year,
internship_subtype=None,
acronym="EFAC1000",
learning_container_year__container_type=EXTERNAL,
learning_container_year__requirement_entity=cls.entity,
learning_container_year__allocation_entity=cls.entity,
)
cls.data = get_valid_external_learning_unit_form_data(cls.academic_year, cls.luy, cls.entity)
cls.url = reverse(update_learning_unit, args=[cls.luy.pk])
def setUp(self):
self.external = ExternalLearningUnitYearFactory(learning_unit_year=self.luy)
self.client.force_login(self.person.user)
def test_update_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_update_get_permission_denied(self):
self.client.force_login(PersonFactory().user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_update_post(self):
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.level for m in get_messages(response.wsgi_request)]
self.assertEqual(messages, [SUCCESS])
def test_update_message_with_report(self):
self.data['postponement'] = "1"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (with report)."))
def test_update_message_without_report(self):
self.data['postponement'] = "0"
response = self.client.post(self.url, data=self.data)
self.assertEqual(response.status_code, 302)
messages = [m.message for m in get_messages(response.wsgi_request)]
self.assertEqual(messages[0], _("The learning unit has been updated (without report)."))
| uclouvain/OSIS-Louvain | base/tests/views/learning_units/external/test_update.py | Python | agpl-3.0 | 4,921 | 0.001829 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2014 Zuza Software Foundation
#
# This file is part of amaGama.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A translation memory server using tmdb for storage, communicates
with clients using JSON over HTTP."""
from flask import Flask
from amagama import tmdb
from amagama.views import api
class AmagamaServer(Flask):
def __init__(self, settings, *args, **kwargs):
super(AmagamaServer, self).__init__(*args, **kwargs)
self.config.from_pyfile(settings)
self.config.from_envvar('AMAGAMA_CONFIG', silent=True)
self.tmdb = tmdb.TMDB(self)
def amagama_server_factory():
app = AmagamaServer("settings.py", __name__)
app.register_blueprint(api.read_api, url_prefix='/tmserver')
app.register_blueprint(api.read_api, url_prefix='/api/v1')
if app.config['ENABLE_DATA_ALTERING_API']:
app.register_blueprint(api.write_api, url_prefix='/tmserver')
app.register_blueprint(api.write_api, url_prefix='/api/v1')
if app.config['ENABLE_WEB_UI']:
from amagama.views import web
app.register_blueprint(web.web_ui, url_prefix='')
return app
| translate/amagama | amagama/application.py | Python | gpl-3.0 | 1,780 | 0 |
# Copyright(c) 2017, Dimitar Venkov
# @5devene, dimitar.ven@gmail.com
# www.badmonkeys.net
from itertools import imap, repeat
import System
badChars = set(System.IO.Path.GetInvalidFileNameChars() )
def tolist(x):
if hasattr(x,'__iter__'): return x
else : return [x]
def fixName(n, rep="", badChars=badChars):
n1 = (c if c not in badChars else rep for c in iter(n) )
return ''.join(n1)
names = tolist(IN[0])
replacement = str(IN[1])
other = tolist(IN[2])
badChars.update(other)
OUT = map(fixName, imap(str, names), repeat(replacement, len(names) ) ) | dimven/SpringNodes | py/String.ReplaceIllegalChars.py | Python | mit | 557 | 0.028725 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask_script import Manager, Shell, Server
from flask_script.commands import Clean, ShowUrls
from flask_migrate import MigrateCommand
from inspectors.app import create_app
from inspectors.settings import DevConfig, ProdConfig
from inspectors.database import db
app = create_app()
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, and db.
"""
return {'app': app, 'db': db}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
manager.add_command("urls", ShowUrls())
manager.add_command("clean", Clean())
if __name__ == '__main__':
manager.run()
| codeforamerica/mdc-inspectors | manage.py | Python | bsd-3-clause | 1,026 | 0 |
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shoop.notify import Context
from shoop_tests.notify.fixtures import get_initialized_test_event
@pytest.mark.django_db
def test_log_entries():
event = get_initialized_test_event()
ctx = Context.from_event(event)
order = ctx.get("order")
n_log_entries = ctx.log_entry_queryset.count()
ctx.add_log_entry_on_log_target("blap", "blorr")
order.add_log_entry("blep")
assert ctx.log_entry_queryset.count() == n_log_entries + 2 # they got added
assert order.log_entries.last().message == "blep" # it's what we added
assert ctx.log_entry_queryset.last().message == "blep" # from this perspective too
@pytest.mark.django_db
@pytest.mark.parametrize("target_obj", (None, object()))
def test_log_entry_on_unloggable_object(target_obj):
event = get_initialized_test_event()
event.variable_values["order"] = target_obj # invalidate log target _before_ creating context
ctx = Context.from_event(event)
n_log_entries = ctx.log_entry_queryset.count()
ctx.add_log_entry_on_log_target("blap", "blorr")
assert ctx.log_entry_queryset.count() == n_log_entries # couldn't add :(
| taedori81/shoop | shoop_tests/notify/test_log_entries.py | Python | agpl-3.0 | 1,388 | 0.002161 |
# constraints should be multiplicative so we can do them in any order
# ok, maybe not multiplicative. not sure yet.
# want to avoid plateaus in the space.
import copy
import numpy as np
from scipy.special import binom
import scipy
import librosa_analysis
import novelty
BEAT_DUR_KEY = "med_beat_duration"
class ConstraintPipeline(object):
def __init__(self, constraints=None):
if constraints is None:
self.constraints = []
else:
self.constraints = constraints
def add_constraint(self, constraint):
self.constraints.append(constraint)
def apply(self, song, target_n_length):
n_beats = len(song.analysis["beats"])
beat_names = copy.copy(song.analysis["beats"])
transition_cost = np.zeros((n_beats, n_beats))
penalty = np.zeros((n_beats, target_n_length))
for constraint in self.constraints:
# print constraint
transition_cost, penalty, beat_names = constraint.apply(
transition_cost, penalty, song, beat_names)
return transition_cost, penalty, beat_names
class Constraint(object):
def __init__(self):
pass
def apply(self, transition_cost, penalty, song, beat_names):
return transition_cost, penalty, beat_names
class RandomJitterConstraint(Constraint):
def __init__(self, jitter_max=.001):
self.jitter = jitter_max
def apply(self, transition_cost, penalty, song, beat_names):
return (
transition_cost + self.jitter * np.random.rand(
transition_cost.shape[0], transition_cost.shape[1]),
penalty + self.jitter * np.random.rand(
penalty.shape[0], penalty.shape[1]),
beat_names)
class TimbrePitchConstraint(Constraint):
def __init__(self, timbre_weight=1, chroma_weight=1,
context=0):
self.tw = timbre_weight
self.cw = chroma_weight
self.m = context
def apply(self, transition_cost, penalty, song, beat_names):
timbre_dist = librosa_analysis.structure(
np.array(song.analysis['timbres']).T)
chroma_dist = librosa_analysis.structure(
np.array(song.analysis['chroma']).T)
dists = self.tw * timbre_dist + self.cw * chroma_dist
if self.m > 0:
new_dists = np.copy(dists)
coefs = [binom(self.m * 2, i) for i in range(self.m * 2 + 1)]
coefs = np.array(coefs) / np.sum(coefs)
for beat_i in xrange(self.m, dists.shape[0] - self.m):
for beat_j in xrange(self.m, dists.shape[1] - self.m):
new_dists[beat_i, beat_j] = 0.0
for i, c in enumerate(coefs):
t = i - self.m
new_dists[beat_i, beat_j] +=\
c * dists[beat_i + t, beat_j + t]
dists = new_dists
# dists = np.copy(song.analysis["dense_dist"])
# shift it over
dists[:-1, :] = dists[1:, :]
dists[-1, :] = np.inf
# don't use the final beat
dists[:, -1] = np.inf
transition_cost[:dists.shape[0], :dists.shape[1]] += dists
return transition_cost, penalty, beat_names
def __repr__(self):
return "TimbrePitchConstraint:" +\
"%f(timbre) + %f(chroma), %f(context)" % (self.tw, self.cw, self.m)
class RhythmConstraint(Constraint):
def __init__(self, beats_per_measure, penalty):
self.p = penalty
self.time = beats_per_measure
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
for i in range(self.time):
for j in set(range(self.time)) - set([(i + 1) % self.time]):
transition_cost[i:n_beats:self.time][j:n_beats:self.time] +=\
self.p
return transition_cost, penalty, beat_names
class MinimumLoopConstraint(Constraint):
def __init__(self, min_loop):
self.min_loop = min_loop
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
for i in range(n_beats):
for j in range(-(self.min_loop - 1), 1):
if 0 <= i + j < n_beats:
transition_cost[i, i + j] += np.inf
return transition_cost, penalty, beat_names
def __repr__(self):
return "MinimumLoopConstraint: min_loop(%d)" % self.min_loop
class LabelConstraint(Constraint):
def __init__(self, in_labels, target_labels, penalty, penalty_window=0):
self.in_labels = copy.copy(in_labels)
self.out_labels = target_labels
self.penalty = penalty
self.window = penalty_window
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
# extend in_labels to work with pauses that we may have added
if n_beats < transition_cost.shape[0]:
self.in_labels.extend(
[None] * (transition_cost.shape[0] - n_beats))
new_pen = np.ones(penalty.shape) * np.array(self.penalty)
# new_pen = np.ones((n_beats, len(self.penalty))) *\
# np.array(self.penalty)
n_target = penalty.shape[1]
for n_i in xrange(transition_cost.shape[0]):
node_label = self.in_labels[n_i]
for l in xrange(1, n_target - 1):
prev_target = self.out_labels[l - 1]
next_target = self.out_labels[l + 1]
target_label = self.out_labels[l]
if node_label == target_label or target_label is None:
new_pen[n_i, l] = 0.0
elif node_label is None:
# should this have a penalty?
new_pen[n_i, l] = 0.0
if self.window > 0:
if target_label != prev_target:
# reduce penalty for beats prior
span = min(self.window, l)
new_pen[n_i, l - span:l] =\
np.linspace(1.0, 0.01, num=span)
if target_label != next_target:
# reduce penalty for beats later
span = min(self.window, len(self.out_labels) - l - 1)
new_pen[n_i, l + 1:l + span + 1] =\
np.linspace(0.01, 1.0, num=span)
for l in [0, n_target - 1]:
target_label = self.out_labels[l]
if node_label == target_label or target_label is None:
new_pen[n_i, l] = 0.0
elif node_label is None:
new_pen[n_i, l] = 0.0
penalty += new_pen
return transition_cost, penalty, beat_names
def __repr__(self):
return "LabelConstraint"
class ValenceArousalConstraint(Constraint):
def __init__(self, in_va, target_va, penalty, penalty_window=0):
self.in_va = np.copy(in_va)
self.target_va = np.copy(target_va)
self.penalty = penalty
self.window = penalty_window
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
# extend in_va to work with pauses that have been added
if n_beats < transition_cost.shape[0]:
n_pauses = transition_cost.shape[0] - n_beats
extra_va = np.zeros((n_pauses, 2))
self.in_va = np.r_[self.in_va, extra_va]
new_pen = np.ones(penalty.shape) * np.array(self.penalty)
n_target = penalty.shape[1]
for n_i in xrange(transition_cost.shape[0]):
node_va = self.in_va[n_i]
for l in xrange(n_target):
if n_i < n_beats:
new_pen[n_i, l] *=\
np.linalg.norm(self.target_va[l] - node_va)
else:
# pauses have no penalty here
new_pen[n_i, l] *= 0
penalty += new_pen
return transition_cost, penalty, beat_names
def __repr__(self):
return "ValenceArousalConstraint"
class GenericTimeSensitivePenalty(Constraint):
def __init__(self, penalty):
self.penalty = penalty
def apply(self, transition_cost, penalty, song, beat_names):
penalty[:n_beats, :] += self.penalty
return transition_cost, penalty, beat_names
class EnergyConstraint(Constraint):
# does not work with music duration constraint yet
def __init__(self, penalty=0.5):
self.penalty = penalty
def apply(self, transition_cost, penalty, song, beat_names):
sr = song.samplerate
frames = song.all_as_mono()
n_beats = len(song.analysis["beats"])
energies = np.zeros(n_beats)
for i, beat in enumerate(beat_names[:n_beats - 1]):
start_frame = int(sr * beat)
end_frame = int(sr * beat_names[:n_beats][i + 1])
beat_frames = frames[start_frame:end_frame]
beat_frames *= np.hamming(len(beat_frames))
energies[i] = np.sqrt(np.mean(beat_frames * beat_frames))
energies[-1] = energies[-2]
energies = [[x] for x in energies]
dist_matrix = 10 * scipy.spatial.distance.squareform(
scipy.spatial.distance.pdist(energies, 'euclidean'))
# shift it
dist_matrix[:-1, :] = dist_matrix[1:, :]
dist_matrix[-1, :] = np.inf
transition_cost[:n_beats, :n_beats] += (dist_matrix * self.penalty)
return transition_cost, penalty, beat_names
def __repr__(self):
return "EnergyConstraint: penalty=%f" % self.penalty
class PauseConstraint(Constraint):
def __init__(self, min_length, max_length, to_penalty=1.4,
between_penalty=0.05, unit="seconds"):
self.min_len = self.max_len = self.min_beats = self.max_beats = None
if unit == "beats":
self.min_beats = min_length
self.max_beats = max_length
elif unit == "seconds":
self.min_len = min_length
self.max_len = max_length
else:
raise RuntimeWarning(
"unit must be seconds or beats, defaulting to seconds")
self.min_len = min_length
self.max_len = max_length
# perhaps these costs should be based on the cost of a
# "bad" transition in the music.
self.to_cost = to_penalty
self.bw_cost = between_penalty
def apply(self, transition_cost, penalty, song, beat_names):
# we have to manage the pauses...
n_beats = len(song.analysis["beats"])
if self.min_len and self.max_len:
beat_len = song.analysis[BEAT_DUR_KEY]
self.min_beats = int(np.ceil(self.min_len / float(beat_len)))
self.max_beats = int(np.floor(self.max_len / float(beat_len)))
tc = self.to_cost
# tc = self.to_cost * min_beats
bc = self.bw_cost
new_trans = np.zeros(
(n_beats + self.max_beats, n_beats + self.max_beats))
new_trans[:n_beats, :n_beats] = transition_cost
new_pen = np.zeros((n_beats + self.max_beats, penalty.shape[1]))
new_pen[:n_beats, :] = penalty
# beat to first pause
p0 = n_beats
p_n = p0 + self.max_beats - 1
new_trans[:n_beats, p0] = tc
# beat to other pauses
new_trans[:n_beats, p0 + 1:] = np.inf
# pause to pause default
new_trans[p0:, p0:] = np.inf
# pause to beat default
new_trans[p0:, :p0] = np.inf
# must stay in pauses until min pause
for i in range(p0, p0 + self.min_beats):
new_trans[i, :n_beats] = np.inf
new_trans[i, i + 1] = 0.
# after that, pause-to-pause costs something
for i in range(p0 + self.min_beats, p0 + self.max_beats - 2):
new_trans[i, :n_beats] = np.inf
# new_trans[i, :n_beats] = 0.
new_trans[i, p_n] = 0.
new_trans[i, i + 1] = bc
# last pause must go back to beats
# Also, must exit through last pause
new_trans[p_n, :n_beats] = 0.
new_pen[p0 + 1:, 0] = np.inf
# add pauses to beat_names
beat_names.extend(["p%d" % i for i in xrange(self.max_beats)])
return new_trans, new_pen, beat_names
def __repr__(self):
return "PauseConstraint: ({}, {}, {}, {})".format(
self.min_len, self.max_len, self.min_beats, self.max_beats)
class StartWithMusicConstraint(Constraint):
def apply(self, transition_cost, penalty, song, beat_names):
p0 = len(song.analysis["beats"])
penalty[p0:, 0] += np.inf
return transition_cost, penalty, beat_names
def __str__(self):
return "StartWithMusicConstraint"
def __repr__(self):
return "StartWithMusicConstraint()"
class EndWithMusicConstraint(Constraint):
def apply(self, transition_cost, penalty, song, beat_names):
p0 = len(song.analysis["beats"])
penalty[p0:, -1] += np.inf
return transition_cost, penalty, beat_names
def __str__(self):
return "EndWithMusicConstraint"
def __repr__(self):
return "EndWithMusicConstraint()"
class StartAtStartConstraint(Constraint):
def __init__(self, padding=0):
self.padding = padding
def apply(self, transition_cost, penalty, song, beat_names):
penalty[self.padding + 1:, 0] += np.inf
return transition_cost, penalty, beat_names
def __str__(self):
return "StartAtStartConstraint"
def __repr__(self):
return "StartAtStartConstraint()"
class StartAtTimeConstraint(Constraint):
def __init__(self, time):
self.time = float(time)
def apply(self, transition_cost, penalty, song, beat_names):
deltas = []
deltas_i = []
for i, bn in enumerate(beat_names):
try:
deltas.append(abs(float(bn) - self.time))
deltas_i.append(i)
except:
pass
if len(deltas) == 0:
beat_i = 0
else:
beat_i = deltas_i[np.argmin(deltas)]
penalty[:beat_i, 0] += np.inf
penalty[beat_i + 1:, 0] += np.inf
return transition_cost, penalty, beat_names
def __str__(self):
return "StartAtTimeConstraint time({})".format(self.time)
def __repr__(self):
return "StartAtTimeConstraint({})".format(self.time)
class EndAtTimeConstraint(Constraint):
def __init__(self, time):
self.time = float(time)
def apply(self, transition_cost, penalty, song, beat_names):
deltas = []
deltas_i = []
for i, bn in enumerate(beat_names):
try:
deltas.append(abs(float(bn) - self.time))
deltas_i.append(i)
except:
pass
if len(deltas) == 0:
beat_i = 0
else:
beat_i = deltas_i[np.argmin(deltas)]
penalty[:beat_i - 1, -1] += np.inf
penalty[beat_i:, -1] += np.inf
return transition_cost, penalty, beat_names
def __str__(self):
return "EndAtTimeConstraint time({})".format(self.time)
def __repr__(self):
return "EndAtTimeConstraint({})".format(self.time)
class EndAtEndConstraint(Constraint):
def __init__(self, padding=0):
self.padding = padding
def apply(self, transition_cost, penalty, song, beat_names):
last_beat = len(song.analysis["beats"])
penalty[last_beat:, -1] += np.inf
penalty[:last_beat - self.padding, -1] += np.inf
return transition_cost, penalty, beat_names
def __str__(self):
return "EndAtEndConstraint"
def __repr__(self):
return "EndAtEndConstraint()"
class PauseEntryLabelChangeConstraint(Constraint):
def __init__(self, target_labels, penalty_value):
self.out_labels = target_labels
self.p = penalty_value
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
n_pauses = transition_cost.shape[0] - n_beats
p0 = n_beats
if n_pauses > 0:
target_changes = [0]
for l in xrange(1, len(self.out_labels)):
target = self.out_labels[l]
prev_target = self.out_labels[l - 1]
if target != prev_target:
target_changes.append(l)
# target_changes.append(max(l - 4, 0))
target_changes = np.array(target_changes)
penalty[p0, :] += self.p
penalty[p0, target_changes] -= self.p
return transition_cost, penalty, beat_names
def __repr__(self):
return "PauseEntryLabelChangeConstraint: penalty(%f)" % self.p
class PauseEntryVAChangeConstraint(Constraint):
def __init__(self, target_va, penalty_value):
self.out_va = target_va
self.p = penalty_value
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
n_pauses = transition_cost.shape[0] - n_beats
p0 = n_beats
if n_pauses > 0:
target_changes = [0]
for l in xrange(1, len(self.out_va)):
target = self.out_va[l]
prev_target = self.out_va[l - 1]
if np.linalg.norm(target - prev_target) > 0:
target_changes.append(l)
# target_changes.append(max(l - 4, 0))
target_changes = np.array(target_changes)
penalty[p0, :] += self.p
penalty[p0, target_changes] -= self.p
return transition_cost, penalty, beat_names
def __repr__(self):
return "PauseEntryVAChangeConstraint: penalty(%f)" % self.p
class PauseExitLabelChangeConstraint(Constraint):
def __init__(self, target_labels, penalty_value):
self.out_labels = target_labels
self.p = penalty_value
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
if transition_cost.shape[0] > n_beats:
p_n = transition_cost.shape[0] - 1
target_changes = [0]
for l in xrange(1, len(self.out_labels)):
target = self.out_labels[l]
prev_target = self.out_labels[l - 1]
if target != prev_target:
target_changes.append(l)
target_changes = np.array(target_changes)
penalty[p_n, :] += self.p
penalty[p_n, target_changes] -= self.p
return transition_cost, penalty, beat_names
def __repr__(self):
return "PauseExitLabelChangeConstraint: penalty(%f)" % self.p
class PauseExitVAChangeConstraint(Constraint):
def __init__(self, target_va, penalty_value):
self.out_va = target_va
self.p = penalty_value
def apply(self, transition_cost, penalty, song, beat_names):
n_beats = len(song.analysis["beats"])
if transition_cost.shape[0] > n_beats:
p_n = transition_cost.shape[0] - 1
target_changes = [0]
for l in xrange(1, len(self.out_va)):
target = self.out_va[l]
prev_target = self.out_va[l - 1]
if np.linalg.norm(target - prev_target) > 0:
target_changes.append(l)
target_changes = np.array(target_changes)
penalty[p_n, :] += self.p
penalty[p_n, target_changes] -= self.p
return transition_cost, penalty, beat_names
def __repr__(self):
return "PauseExitVAChangeConstraint: penalty(%f)" % self.p
class NoveltyConstraint(Constraint):
def __init__(self, in_labels, target_labels, penalty):
self.in_labels = in_labels
self.out_labels = target_labels
self.penalty = penalty
def apply(self, transition_cost, penalty, song, beat_names):
changepoints = np.array(novelty.novelty(song))
beats = song.analysis["beats"]
n_beats = len(beats)
n_target = penalty.shape[1]
cp_beats_i = [np.argmin(np.abs(beats - cp)) for cp in changepoints]
cp_beats = [beats[i] for i in cp_beats_i]
# find emotional changes at each changepoint, if any
changes = []
for i in cp_beats_i:
# check the previous and next 4 beats
n_prev = min(4, i)
n_next = min(4, n_beats - i)
labs = [self.in_labels[j]
for j in range(i - n_prev, i + n_next + 1)]
# check first and last beat in this range... assuming a sort of
# coarse-grained emotional labeling
if labs[0] != labs[-1]:
# there is an emotional change at this point in the music
changes.append((i, labs[0], labs[-1]))
for change in changes:
print "Found emotional change near changepoint: " +\
change[1] + " -> " + change[2]
# find those emotional changes in the target output
for l in xrange(1, n_target):
target = self.out_labels[l]
prev_target = self.out_labels[l - 1]
if target != prev_target:
for change in changes:
if prev_target == change[1] and target == change[2]:
print "setting change:\t" +\
change[1] + " -> " + change[2]
print "\tat beat " + str(l) + " " +\
str(l * song.analysis[BEAT_DUR_KEY])
# give huge preference to hitting the changepoint here
beat_i = change[0]
penalty[:n_beats, l] += 1.0
n_prev = min(2, beat_i)
n_next = min(2, n_beats - beat_i)
penalty[beat_i - n_prev:beat_i + n_next, l] -= 1.0
return transition_cost, penalty, beat_names
def __repr__(self):
return "NoveltyConstraint"
class NoveltyVAConstraint(Constraint):
def __init__(self, in_va, target_va, penalty):
self.in_va = in_va
self.out_va = target_va
self.penalty = penalty
def apply(self, transition_cost, penalty, song, beat_names):
changepoints = np.array(novelty.novelty(song))
beats = song.analysis["beats"]
n_beats = len(beats)
n_target = penalty.shape[1]
cp_beats_i = [np.argmin(np.abs(beats - cp)) for cp in changepoints]
cp_beats = [beats[i] for i in cp_beats_i]
far_threshold = .2
close_threshold = .1
# find emotional changes at each changepoint
changes = []
for i in cp_beats_i:
# check the previous and next 4 beats
n_prev = min(4, i)
n_next = min(4, n_beats - i)
vas = [self.in_va[j]
for j in range(i - n_prev, i + n_next + 1)]
# check first and last beat in this range... assuming a sort of
# coarse-grained emotional labeling
# before_va = np.mean(vas[:3], axis=0)
# after_va = np.mean(vas[-3:], axis=0)
before_va = vas[0]
after_va = vas[-1]
if np.linalg.norm(before_va - after_va) > far_threshold:
# there is an emotional change at this point in the music
changes.append((i, before_va, after_va))
for change in changes:
print "Found emotional change near changepoint:",\
change[1], "->", change[2]
# find those emotional changes in the target output
for l in xrange(1, n_target):
target = self.out_va[l]
prev_target = self.out_va[l - 1]
if np.linalg.norm(target - prev_target) > far_threshold:
for change in changes:
# print np.linalg.norm(prev_target - change[1]),\
# np.linalg.norm(target - change[2])
if np.linalg.norm(prev_target - change[1]) <\
close_threshold and\
np.linalg.norm(target - change[2]) <\
close_threshold:
print "setting change:\t", change[1], "->", change[2]
print "\tat beat " + str(l) + " " +\
str(l * song.analysis[BEAT_DUR_KEY])
# give huge preference to hitting the changepoint here
beat_i = change[0]
penalty[:n_beats, l] += 1.0
n_prev = min(2, beat_i)
n_next = min(2, n_beats - beat_i)
penalty[beat_i - n_prev:beat_i + n_next, l] -= 1.0
return transition_cost, penalty, beat_names
def __repr__(self):
return "NoveltyConstraint"
class MusicDurationConstraint(Constraint):
def __init__(self, min_length, max_length):
self.minlen = min_length
self.maxlen = max_length
def apply(self, transition_cost, penalty, song, beat_names):
beat_len = song.analysis[BEAT_DUR_KEY]
minlen = int(self.minlen / beat_len)
maxlen = int(self.maxlen / beat_len)
beats = song.analysis["beats"]
n_beats = len(beats)
n_pause_beats = transition_cost.shape[0] - n_beats
# basically infinity.
pen_val = 99999999.0
# Create new transition cost table
# (beat * beat index in max span) x
# (beat * beat index in max span of music)
# Is this too large?
new_tc_size = n_beats * maxlen + n_pause_beats
p0 = n_beats * maxlen
new_tc = np.empty((new_tc_size, new_tc_size))
# tile the tc over this new table
new_tc[:p0, :p0] = np.tile(transition_cost[:n_beats, :n_beats],
(maxlen, maxlen))
# tile the pause information as well
new_tc[:p0, p0:] = np.tile(transition_cost[:n_beats, n_beats:],
(maxlen, 1))
new_tc[p0:, :p0] = np.tile(transition_cost[n_beats:, :n_beats],
(1, maxlen))
new_tc[p0:, p0:] = transition_cost[n_beats:, n_beats:]
# Create new penalty table
# (beat * beat index in max span) x (beats in output)
new_pen = np.empty((new_tc_size, penalty.shape[1]))
# tile the tc over this new table
new_pen[:p0, :] = np.tile(penalty[:n_beats, :],
(maxlen, 1))
new_pen[p0:, :] = penalty[n_beats:, :]
#--- CONSTRAINTS ---#
# * don't start song in segment beat other than first
new_pen[n_beats:(n_beats * maxlen), 0] += pen_val
# * don't go to pause before minimum length music segment
new_tc[:(n_beats * minlen), p0] += pen_val
# * must go to pause if we're at the maxlen-th beat
new_tc[n_beats * (maxlen - 1):n_beats * maxlen, :p0] += pen_val
# * after pause, don't go to non-first segment beat
new_tc[p0:, n_beats:p0] += pen_val
# * don't move between beats that don't follow
# the segment index
new_tc[:p0, :p0] += pen_val
for i in xrange(1, maxlen):
new_tc[(i - 1) * n_beats:i * n_beats,
i * n_beats:(i + 1) * n_beats] -= pen_val
# update beat_names
pause_names = beat_names[n_beats:]
new_beat_names = []
for rep in xrange(maxlen):
new_beat_names.extend(beat_names[:n_beats])
new_beat_names.extend(pause_names)
return new_tc, new_pen, new_beat_names
def __repr__(self):
return "MusicDurationConstraint"
| ucbvislab/radiotool | radiotool/algorithms/constraints.py | Python | isc | 27,809 | 0.000467 |
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS
# Local imports.
from mayavi.filters.filter_base import FilterBase
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
################################################################################
# `UserDefined` class.
################################################################################
class UserDefined(FilterBase):
"""
This filter lets the user define their own filter
dynamically/interactively. It is like `FilterBase` but allows a
user to specify the class without writing any code.
"""
# The version of this class. Used for persistence.
__version__ = 0
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface.
######################################################################
def __set_pure_state__(self, state):
# Create and set the filter.
children = [f for f in [self.filter] if f is not None]
handle_children_state(children, [state.filter])
self.filter = children[0]
self.update_pipeline()
# Restore our state.
super(UserDefined, self).__set_pure_state__(state)
######################################################################
# `UserDefined` interface.
######################################################################
def setup_filter(self):
"""Setup the filter if none has been set or check it if it
already has been."""
obj = self.filter
if not self._check_object(obj):
if obj is not None:
cname = obj.__class__.__name__
error('Invalid filter %s chosen! Try again!'%cname)
obj = self._choose_filter()
self.filter = obj
######################################################################
# Non-public interface.
######################################################################
def _choose_filter(self):
chooser = TVTKFilterChooser()
chooser.edit_traits(kind='livemodal')
obj = chooser.object
if obj is None:
error('Invalid filter chosen! Try again!')
return obj
def _check_object(self, obj):
if obj is None:
return False
if obj.__class__.__name__ in TVTK_FILTERS:
return True
return False
def _filter_changed(self, old, new):
self.name = 'UserDefined:%s'%new.__class__.__name__
super(UserDefined, self)._filter_changed(old, new)
| dmsurti/mayavi | mayavi/filters/user_defined.py | Python | bsd-3-clause | 3,082 | 0.001622 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from shutil import copy
from .host_test_plugins import HostTestPluginBase
from time import sleep
class HostTestPluginCopyMethod_Mbed(HostTestPluginBase):
def generic_mbed_copy(self, image_path, destination_disk):
""" Generic mbed copy method for "mbed enabled" devices.
It uses standard python shuitl function to copy
image_file (target specific binary) to device's disk.
"""
result = True
if not destination_disk.endswith('/') and not destination_disk.endswith('\\'):
destination_disk += '/'
try:
copy(image_path, destination_disk)
except Exception as e:
self.print_plugin_error("shutil.copy('%s', '%s')"% (image_path, destination_disk))
self.print_plugin_error("Error: %s"% str(e))
result = False
return result
# Plugin interface
name = 'HostTestPluginCopyMethod_Mbed'
type = 'CopyMethod'
stable = True
capabilities = ['shutil', 'default']
required_parameters = ['image_path', 'destination_disk', 'program_cycle_s']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
# Capability 'default' is a dummy capability
if capability == 'shutil':
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
program_cycle_s = kwargs['program_cycle_s']
# Wait for mount point to be ready
self.check_mount_point_ready(destination_disk) # Blocking
result = self.generic_mbed_copy(image_path, destination_disk)
# Allow mbed to cycle
sleep(program_cycle_s)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Mbed()
| c1728p9/mbed-os | tools/host_tests/host_tests_plugins/module_copy_mbed.py | Python | apache-2.0 | 2,899 | 0.001725 |
# Various utilies for dealing with Neutron and the renaming from Quantum.
from subprocess import check_output
from charmhelpers.core.hookenv import (
config,
log,
ERROR,
)
from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
if kernel_version() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
},
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-vmware'],
'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):
manager = net_manager or network_manager()
if manager == 'quantum':
plugins = quantum_plugins()
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log('Error: Network manager does not support plugins.')
raise Exception
try:
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise Exception
try:
return _plugin[attr]
except KeyError:
return None
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatability (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'
else:
# ensure accurate naming for all releases post-H
return 'neutron'
| jiasir/openstack-trove | lib/charmhelpers/contrib/openstack/neutron.py | Python | mit | 7,812 | 0.000256 |
#!/usr/bin/env python
import puka
import sys
client = puka.Client("amqp://localhost/")
promise = client.connect()
client.wait(promise)
promise = client.exchange_declare(exchange='direct_logs', type='direct')
client.wait(promise)
promise = client.queue_declare(exclusive=True)
queue_name = client.wait(promise)['queue']
severities = sys.argv[1:]
if not severities:
print >> sys.stderr, "Usage: %s [info] [warning] [error]" % (sys.argv[0],)
sys.exit(1)
for severity in severities:
promise = client.queue_bind(exchange='direct_logs', queue=queue_name,
routing_key=severity)
client.wait(promise)
print ' [*] Waiting for logs. To exit press CTRL+C'
consume_promise = client.basic_consume(queue=queue_name, no_ack=True)
while True:
msg_result = client.wait(consume_promise)
print " [x] %r:%r" % (msg_result['routing_key'], msg_result['body'])
| fams/rabbitmq-tutorials | python-puka/receive_logs_direct.py | Python | apache-2.0 | 902 | 0 |
"""
Contains the database connection tools and sqlalchemy models for iom database
Created by adam on 11/11/15
In order to use this, import the module and
create a sqlalchemy engine named 'engine' then do:
# connect to db
from sqlalchemy.orm import sessionmaker
# ORM's handle to database at global level
Session = sessionmaker(bind=engine)
Finally when ready to make queries, do:
#connect to db: Local object
session = Session()
The local session object is then used to make queries like:
s = session.query(Testimony).all() # All testimony objects
s1 = session.query(Testimony).order_by(Testimony.quoteID)[0:99] # First 100 vignettes
"""
__author__ = 'adam'
import os
import sys
import xml.etree.ElementTree as ET
# sqlalchemy tools
import sqlalchemy
from sqlalchemy import Table, Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
# connecting to db
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class Connection(object):
"""
Parent class for creating sqlalchemy engines, session objects,
and other db interaction stuff behind the scenes from a file
holding credentials
Attributes:
engine: sqlalchemy engine instance
session: sqlalchemy local session object. This is the property that should do most work
_credential_file: String path to file with db connection info
_username: String db username
_password: String db password
_server: String db server
_port: db port
_db_name: String name of db
"""
def __init__(self, credential_file=None):
"""
Loads db connection credentials from file and returns a mysql sqlalchemy engine
Args:
:param credential_file: String path to the credential file to use
Returns:
:return: sqlalchemy.create_engine Engine instance
"""
self._credential_file = credential_file
self._load_credentials()
self._make_engine()
def _load_credentials(self):
"""
Opens the credentials file and loads the attributes
"""
if self._credential_file is not None:
credentials = ET.parse(self._credential_file)
self._server = credentials.find('db_host').text
self._port = credentials.find('db_port').text
if self._port is not None:
self._port = int(self._port)
self._username = credentials.find('db_user').text
self._db_name = credentials.find('db_name').text
self._password = credentials.find('db_password').text
def _make_engine(self):
"""
Creates the sqlalchemy engine and stores it in self.engine
"""
raise NotImplementedError
class MySqlConnection(Connection):
"""
Uses the MySQL-Connector-Python driver (pip install MySQL-Connector-Python driver)
"""
def __init__(self, credential_file):
self._driver = '+mysqlconnector'
super().__init__(credential_file)
def _make_engine(self):
if self._port:
server = "%s:%s" % (self._server, self._port)
else:
server = self._server
self._dsn = "mysql%s://%s:%s@%s/%s" % (self._driver, self._username, self._password, server, self._db_name)
self.engine = create_engine(self._dsn)
class SqliteConnection(Connection):
"""
Makes a connection to an in memory sqlite database.
Note that does not actually populate the database. That
requires a call to: Base.metadata.create_all(SqliteConnection)
"""
def __init__(self):
super().__init__()
def _make_engine(self):
self.engine = create_engine('sqlite:///:memory:', echo=True)
class BaseDAO(object):
"""
Parent class for database interactions.
The parent will hold the single global connection (i.e. sqlalchemy Session)
to the db.
Instance classes will have their own session instances
Attributes:
global_session: (class attribute) A sqlalchemy configurable sessionmaker factory (sqlalchemy.orm.session.sessionmaker)
bound to the engine. Is not itself a session. Instead, it needs to be instantiated: DAO.global_session()
engine: sqlalchemy.engine.base.Engine instance
"""
global_session = None
def __init__(self, engine):
assert(isinstance(engine, sqlalchemy.engine.base.Engine))
self.engine = engine
if BaseDAO.global_session is None:
BaseDAO._create_session(engine)
@staticmethod
def _create_session(engine):
"""
Instantiates the sessionmaker factory into the global_session attribute
"""
BaseDAO.global_session = sqlalchemy.orm.sessionmaker(bind=engine)
class DAO(BaseDAO):
"""
example instance. Need to use metaclass to ensure that
all instances of DAO do this
"""
def __init__(self, engine):
assert(isinstance(engine, sqlalchemy.engine.base.Engine))
super().__init__(engine)
self.session = BaseDAO.global_session()
#######################################
# Database models #
#######################################
# Base class that maintains the catalog of tables and classes in db
Base = declarative_base()
condition_testimony_table = Table('iom_conditionsXtestimony', Base.metadata,
Column('quote_id', Integer, ForeignKey('iom_testimony.quote_id')),
Column('condition_id', Integer, ForeignKey('iom_conditions.condition_id'))
)
class Testimony(Base):
"""
Properties:
condition_ids: Tuple of condition ids identified in vignette
condition_names: Tuple of condition names identified in vignette
"""
__tablename__ = "iom_testimony"
quote_id = Column(Integer, primary_key=True)
respondent_id = Column(Integer)
question_number = Column(Integer)
quote_text = Column(String)
# many to many Testimony<->Condition
conditions = relationship('Condition', secondary=condition_testimony_table, backref="iom_testimony")
def get_condition_ids(self):
"""
Returns a tuple of unique condition ids identified for
the vignette
"""
self.condition_ids = []
[self.condition_ids.append(c.conditionID) for c in self.conditions]
self.condition_ids = tuple(set(self.condition_ids))
return self.condition_ids
def get_condition_names(self):
"""
Returns a tuple of any condition names identified for
the vignette
"""
self.condition_names = []
[self.condition_names.append(c.conditionName) for c in self.conditions]
self.condition_names = tuple(set(self.condition_names))
return self.condition_names
def get_id(self):
"""
Getter for quote_id
Returns:
Integer representation of the id of the vignette
"""
return self.quote_id
class Condition(Base):
"""
Properties:
quote_ids: List of associated vignette ids
respondent_ids: List of associated respondent ids
"""
__tablename__ = 'iom_conditions'
condition_id = Column(Integer, primary_key=True)
condition_name = Column(String)
# many to many Condition<->Alias
# aliases = relationship('Alias', backref='iom_conditions')
# many to many Testimony<->Condition
testimony = relationship('Testimony', secondary=condition_testimony_table, backref="iom_conditions")
def get_vignette_ids(self):
"""
Returns a tuple of quote ids wherein the condition is mentioned
"""
self.quote_ids = []
[self.quote_ids.append(t.quote_id) for t in self.testimony]
return tuple(self.quote_ids)
def get_respondent_ids(self):
"""
Returns a tuple of ids of respondents who mentioned the condition
Also sets attribute respondent_ids
"""
self.respondent_ids = []
[self.respondent_ids.append(t.respondent_id) for t in self.testimony]
self.respondent_ids = tuple(set(self.respondent_ids))
return self.respondent_ids
#
# class Alias(Base):
# __tablename__ = 'iom_conditionAliases'
# aliasID = Column(Integer, primary_key=True)
# conditionAlias = Column(String)
# conditionID = Column(Integer, ForeignKey('iom_conditions.condition_id'))
# condition = relationship('Condition', backref='iom_conditionAliases')
#
# def get_alias_text(self):
# return self.conditionAlias
#
# def get_condition(self):
# return self.condition
class Person(object):
"""
Parent class for Providers, Respondents, and Patients which contains
methods for retrieving data for those objects to inherit
Properties:
concatenated_text: String of all vignettes attributed to the
person. Only available after get_concatenated_responses called
quote_ids: Tuple of all ids of vignettes attributed to the person. Only
available after get_vignette_ids called
condition_ids: Tuple of all condition ids attributed to the person. Only
available after get_condition_ids called.
condition_names: Tuple of string names of conditions attributed to the person. Only
available after get_condition_names called.
TODO: Consider adding __get__ method which checks if the relevant property has been set
and then calls the relevant method if not.
"""
# def __init__(self):
def get_concatenated_responses(self):
"""
Concatenates all the vignette text for the respondent
and returns it.
"""
self.concatenated_text = ""
def addText(text):
self.concatenated_text += '\n ' + text
[addText(t.quote_text) for t in self.vignettes]
return self.concatenated_text
def get_concatenated_responses_for_html(self):
"""
Concatenates all the vignette text for the respondent
and returns it. Same as regular except adds html tags for line breaks
"""
self.concatenated_text_html = ""
def addText(text, question_number):
self.concatenated_text_html += '<p>[Q%s] %s </p>' % (question_number, text)
[addText(t.quote_text, t.question_number) for t in self.vignettes]
return self.concatenated_text_html
def get_vignette_ids(self):
"""
Returns a tuple of the quote ids belonging to the respondent
"""
self.quote_ids = []
[self.quote_ids.append(t.quote_id) for t in self.vignettes]
return tuple(self.quote_ids)
def get_condition_ids(self):
"""
Returns a tuple of unique condition ids identified for
the person
"""
self.condition_ids = []
for t in self.vignettes:
# iterate through each condition associated with each vignette
[self.condition_ids.append(ci) for ci in t.get_condition_ids()]
self.condition_ids = tuple(set(self.condition_ids))
return self.condition_ids
def get_condition_names(self):
"""
Returns a tuple of any condition names identified for
the person
"""
self.condition_names = []
for t in self.vignettes:
# iterate through each condition associated with each vignette
[self.condition_names.append(cn) for cn in t.get_condition_names()]
self.condition_names = tuple(set(self.condition_names))
return self.condition_names
def get_id(self):
"""
Getter for person's respondent id
:return: int
"""
if hasattr(self, 'respondent_id'):
return self.respondentID
elif hasattr(self, 'id'):
return self.id
else:
raise Exception
class Provider(Base, Person):
"""
Properties:
respondent_id: Integer respondent id
"""
__tablename__ = "iom_providers"
respondent_id = Column(Integer, ForeignKey('iom_testimony.respondent_id'), primary_key=True)
# Relation to testimony table
vignettes = relationship('Testimony', backref='iom_testimony', uselist=True)
def __init__(self):
Base.__init__(self)
Person.__init__(self)
# alias for respondent id
self.id = self.respondent_id
class Patient(Base, Person):
"""
Properties:
respondent_id: Integer respondent id
"""
__tablename__ = "iom_patients"
respondent_id = Column(Integer, ForeignKey('iom_testimony.respondent_id'), primary_key=True)
# Relation to testimony table
vignettes = relationship(Testimony, uselist=True)
def __init__(self):
Base.__init__(self)
Person.__init__(self)
# alias for respondent id
self.id = self.respondent_id
class Respondent(Base, Person):
"""
Generic respondent
Properties:
id: Integer respondent id
"""
__tablename__ = "iom_respondents"
id = Column(Integer, ForeignKey('iom_testimony.respondent_id'), primary_key=True)
# Relation to testimony table
vignettes = relationship('Testimony', uselist=True)
def __init__(self):
Base.__init__(self)
Person.__init__(self)
# alias for respondent id so matches others
self.respondent_id = self.id
if __name__ == '__main__':
# connect to db
# ORM's handle to database at global level
Session = sessionmaker(bind=mysql_engine) | PainNarrativesLab/IOMNarratives | IomDataModels.py | Python | mit | 13,627 | 0.001614 |
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
#Connect to Arduino. Automatically finds serial port.
connection = SerialManager()
a = ArduinoApi(connection = connection)
sensor = 14 #Analog pin 0
a.pinMode(sensor, a.INPUT) #Setup sensor
while True:
total = 0 #Each set of readings start with a total of 0
#Get all the readings:
for i in range(0, 24):
reading = a.analogRead(sensor) #get reading
vol = (reading*(5.0/1024)) #relative voltage
temp = ((vol-0.5)*100) #find temp
readings[i] = temp #Place temp reading in i space of array
sleep(0.1) #Time between readings
#Add the readings:
for i in range(0, 24):
total += readings[i]
#Find the average and print:
average = total/24
print("The average temp is ")
print(average)
| lcc755/WorkTerm1 | 5Array/2Temp/Code/average.py | Python | apache-2.0 | 897 | 0.020067 |
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_storage_class_header(self, resp):
provider = self.bucket.connection.provider
if provider.storage_class_header:
self._storage_class = resp.getheader(
provider.storage_class_header, None)
if (self._storage_class is None and
provider.get_provider_name() == 'aws'):
# S3 docs for HEAD object requests say S3 will return this
# header for all objects except Standard storage class objects.
self._storage_class = 'STANDARD'
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds.
:type method: string
:param method: The method to use for retrieving the file
(default is GET).
:type headers: dict
:param headers: Any headers to pass along in the request.
:type query_auth: bool
:param query_auth: If True, signs the request in the URL.
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
num_cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| akashlevy/Lyff | lyff_lambda/boto/s3/key.py | Python | mit | 83,034 | 0.000313 |
""" Convert an OpenTripPlanner json itinerary response into something that's more suitable for rendering via a webpage
"""
import re
import sys
import math
from decimal import *
import datetime
from datetime import timedelta
import simplejson as json
from ott.utils import object_utils
from ott.utils import date_utils
from ott.utils import json_utils
import logging
log = logging.getLogger(__file__)
def remove_agency_from_id(id):
""" OTP 1.0 has TriMet:1 for trip and route ids
"""
ret_val = id
if id and ":" in id:
v = id.split(":")
if v and len(v) > 1 and len(v[1]) > 0:
ret_val = v[1].strip()
return ret_val
class Error(object):
def __init__(self, jsn, params=None):
self.id = jsn['id']
self.msg = jsn['msg']
class DateInfo(object):
def __init__(self, jsn):
# import pdb; pdb.set_trace()
self.start_time_ms = jsn['startTime']
self.end_time_ms = jsn['endTime']
start = datetime.datetime.fromtimestamp(self.start_time_ms / 1000)
end = datetime.datetime.fromtimestamp(self.end_time_ms / 1000)
self.start_date = "{}/{}/{}".format(start.month, start.day, start.year) # 2/29/2012
self.end_date = "{}/{}/{}".format(end.month, end.day, end.year) # 2/29/2012
self.start_time = start.strftime(" %I:%M%p").lower().replace(' 0','') # "3:40pm" -- note, keep pre-space
self.end_time = end.strftime(" %I:%M%p").lower().replace(' 0','') # "3:44pm" -- note, keep pre-space
# service_date is important to link off to proper stop timetables
# in OTP 1.0, we have: <serviceDate>20161123</serviceDate>
# in older versions of OTP, there's no such date so set it to start_date
if 'serviceDate' in jsn and len(jsn['serviceDate']) == 8:
syear = jsn['serviceDate'][0:4]
smonth = jsn['serviceDate'][4:6].lstrip('0')
sday = jsn['serviceDate'][6:].lstrip('0')
self.service_date = "{}/{}/{}".format(smonth, sday, syear) # 2/29/2012
else:
self.service_date = self.estimate_service_date(start)
# OTP 1.0 has seconds not millisecs for duration
durr = int(jsn['duration'])
if durr < 60000:
durr = durr * 1000
self.duration_ms = durr
self.duration = ms_to_minutes(self.duration_ms, is_pretty=True, show_hours=True)
self.date = "%d/%d/%d" % (start.month, start.day, start.year) # 2/29/2012
self.pretty_date = start.strftime("%A, %B %d, %Y").replace(' 0',' ') # "Monday, March 4, 2013"
self.day = start.day
self.month = start.month
self.year = start.year
def estimate_service_date(self, start):
""" in OTP 1.0, we are provided a service_date that's very important to linking to proper schedules, etc...
but in prior versions, we are missing service_date, so this rountine is going to calculate service date
this way: if the hour is earier than 3am, then use 'yesterday' as the service date. This is a hack that
works for agencies like TriMet, which do not have Owl service.
NOTE: there are often instances in parsing OTP 1.0 (non Legs) that also don't have a service_date attribute,
so this routine will also be called. (Service date is mostly used for linking a transit leg
to a stop schedule, so...)
"""
d = start
if start.hour < 3:
""" yesterday calculation for times less than 3am """
d = start - timedelta(days=1)
ret_val = "{}/{}/{}".format(d.month, d.day, d.year) # 2/29/2012
return ret_val
class DateInfoExtended(DateInfo):
"""
"""
def __init__(self, jsn):
super(DateInfoExtended, self).__init__(jsn)
self.extended = True
# step 1: get data
walk = get_element(jsn, 'walkTime', 0)
tran = get_element(jsn, 'transitTime', 0)
wait = get_element(jsn, 'waitingTime', 0)
tot = walk + tran + wait
# step 2: trip length
h,m = seconds_to_hours_minutes(tot)
self.total_time_hours = h
self.total_time_mins = m
self.duration_min = int(round(tot / 60))
# step 3: transit info
h,m = seconds_to_hours_minutes(tran)
self.transit_time_hours = h
self.transit_time_mins = m
self.start_transit = "TODO"
self.end_transit = "TODO"
# step 4: bike / walk length
self.bike_time_hours = None
self.bike_time_mins = None
self.walk_time_hours = None
self.walk_time_mins = None
if 'mode' in jsn and jsn['mode'] == 'BICYCLE':
h,m = seconds_to_hours_minutes(walk)
self.bike_time_hours = h
self.bike_time_mins = m
else:
h,m = seconds_to_hours_minutes(walk)
self.walk_time_hours = h
self.walk_time_mins = m
# step 5: wait time
h,m = seconds_to_hours_minutes(wait)
self.wait_time_hours = h
self.wait_time_mins = m
# step 5: drive time...unused as of now...
self.drive_time_hours = None
self.drive_time_mins = None
self.text = self.get_text()
def get_text(self):
"""
"""
ret_val = ''
tot = hour_min_string(self.total_time_hours, self.total_time_mins)
walk = hour_min_string(self.walk_time_hours, self.walk_time_mins)
bike = hour_min_string(self.bike_time_hours, self.bike_time_mins)
wait = hour_min_string(self.wait_time_hours, self.wait_time_mins)
return ret_val
class Elevation(object):
def __init__(self, steps):
self.points = None
self.points_array = None
self.distance = None
self.start_ft = None
self.end_ft = None
self.high_ft = None
self.low_ft = None
self.rise_ft = None
self.fall_ft = None
self.grade = None
self.distance = self.make_distance(steps)
self.points_array, self.points = self.make_points(steps)
self.grade = self.find_max_grade(steps)
self.set_marks()
@classmethod
def make_distance(cls, steps):
""" loop through distance
"""
ret_val = None
try:
dist = 0
for s in steps:
dist += s['distance']
ret_val = dist
except Exception as ex:
log.warning(ex)
return ret_val
@classmethod
def make_point_string(cls, points, max_len=50):
"""
"""
points_array = points
if len(points) > (max_len * 1.15):
# reduce the point array down to something around the size of max_len (or smaller)
points_array = []
# slice the array up into chunks
# @see http://stackoverflow.com/questions/1335392/iteration-over-list-slices (thank you Nadia)
slice_size = int(round(len(points) / max_len))
if slice_size == 1:
slice_size = 2
list_of_slices = zip(*(iter(points),) * slice_size)
# average up the slices
for s in list_of_slices:
avg = sum(s) / len(s)
points_array.append(avg)
points_string = ','.join(["{0:.2f}".format(p) for p in points_array])
return points_string
@classmethod
def make_points(cls, steps):
""" parse leg for list of elevation points and distances
"""
points_array = None
points_string = None
try:
points = []
for s in steps:
for e in s['elevation']:
elev = e['second']
dist = e['first']
points.append(round(elev, 2))
if len(points) > 0:
points_array = points
points_string = cls.make_point_string(points)
except Exception as e:
log.warning(e)
return points_array, points_string
@classmethod
def find_max_grade(cls, steps):
""" parse leg for list of elevation points and distances
"""
r = {'up': 0, 'down': 0, 'ue': 0, 'ud': 0, 'de': 0, 'dd': 0}
ret_val = r
try:
for s in steps:
first = True
going_up = False
for e in s['elevation']:
dist = e['first']
elev = e['second']
if first:
first = False
r['ue'] = elev
r['ud'] = dist
r['de'] = elev
r['dd'] = dist
else:
# going up
if elev > r['lue']:
# set up vals
r['lue'] = elev
r['lud'] = dist
# set down vals
going_up = True
elif elev < r['lue']:
last_elev = elev
except Exception as e:
log.warning(e)
return ret_val
def set_marks(self):
""" finds start / end / high / low
"""
try:
start = self.points_array[0]
end = self.points_array[len(self.points_array) - 1]
high = self.points_array[0]
low = self.points_array[0]
rise = 0.0
fall = 0.0
slope = 0.0
# find high, low and rise, fall points
last = self.points_array[0]
for p in self.points_array:
if p > high:
high = p
if p < low:
low = p
if p > last:
rise += (p - last)
if p < last:
fall += (p - last)
last = p
# end results as strings with 2 decimal places
self.start_ft = "{0:.1f}".format(start)
self.end_ft = "{0:.1f}".format(end)
self.high_ft = "{0:.1f}".format(high)
self.low_ft = "{0:.1f}".format(low)
# find how much of a rise and fall in feet there are from the avg height
self.rise_ft = "{0:.1f}".format(rise)
self.fall_ft = "{0:.1f}".format(fall)
except Exception as e:
log.warning(e)
class Place(object):
def __init__(self, jsn, name=None):
""" """
self.name = jsn['name']
self.lat = jsn['lat']
self.lon = jsn['lon']
self.stop = Stop.factory(jsn, self.name)
self.map_img = self.make_img_url(lon=self.lon, lat=self.lat, icon=self.endpoint_icon(name))
def endpoint_icon(self, name):
""" """
ret_val = ''
if name:
x='/extraparams/format_options=layout:{0}'
if name in ['to', 'end', 'last']:
ret_val = x.format('end')
elif name in ['from', 'start', 'begin']:
ret_val = x.format('start')
return ret_val
def make_img_url(self, url="//maps.trimet.org/eapi/ws/V1/mapimage/format/png/width/300/height/288/zoom/8/coord/%(lon)s,%(lat)s%(icon)s", **kwargs):
return url % kwargs
def append_url_params(self, route=None, month=None, day=None):
if self.stop:
self.stop.append_params_schedule_url(route, month, day)
self.stop.append_params_info_url(month, day)
@classmethod
def factory(cls, jsn, obj=None, name=None):
""" will create a Place object from json (jsn) data,
optionally assign the resultant object to some other object, as this alleviates the awkward
construct of 'from' that uses a python keyword, (e.g., self.__dict__['from'] = Place(j['from'])
"""
p = Place(jsn, name)
if obj and name:
obj.__dict__[name] = p
return p
class Alert(object):
def __init__(self, jsn, route_id=None):
self.type = 'ROUTE'
self.route_id = route_id
text = url = start_date = None
try:
""" OTP 0.10.x format
"alerts":[
{
"alertDescriptionText":{
"translations":{"":"The westbound stop on NE Dekum at M L King is closed for construction. Use stop at 6th."},
"someTranslation":"The westbound stop on NE Dekum at M L King is closed for construction. Use stop at 6th."},
"alertUrl":{
"translations":{"":"http://trimet.org/alerts/"},
"someTranslation":"http://trimet.org/alerts/"},
},
"effectiveStartDate":1473674400000
}]
"""
text = jsn['alertDescriptionText']['someTranslation']
url = jsn['alertUrl']['someTranslation']
start_date = jsn['effectiveStartDate']
except:
try:
""" OTP 1.0 format
"alerts":[
{
"alertDescriptionText":"The westbound stop on NE Dekum at M L King is closed for construction. Use stop at 6th.",
"effectiveStartDate":1473674400000,
"alertUrl":"http://trimet.org/alerts/"
}
]
"""
text = jsn['alertDescriptionText']
url = jsn['alertUrl']
start_date = jsn['effectiveStartDate']
except:
log.warn("couldn't parse alerts")
self.text = text
self.url = url
# make sure we have a valid start date datetime
try:
dt = datetime.datetime.fromtimestamp(start_date / 1000)
self.start_date = start_date
except Exception as e:
# import pdb; pdb.set_trace()
dt = datetime.datetime.now()
self.start_date = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
self.start_date_pretty = dt.strftime("%B %d").replace(' 0',' ') # "Monday, March 4, 2013"
self.start_time_pretty = dt.strftime(" %I:%M %p").replace(' 0',' ').lower().strip() # "1:22 pm"
self.long_term = True if datetime.datetime.today() - dt > timedelta(days=35) else False
self.future = True if dt > datetime.datetime.today() else False
# TODO: trimet hack (eliminate me)
if "trimet.org" in self.url:
self.url = "http://trimet.org/#alerts/"
if self.route_id:
self.url = "{0}{1}".format(self.url, self.route_id)
@classmethod
def factory(cls, jsn, route_id=None, def_val=None):
""" returns either def_val (when no alerts in the jsn input), or a list of [Alert]s
"""
ret_val = def_val
try:
if jsn and len(jsn) > 0:
ret_val = []
for a in jsn:
alert = Alert(a, route_id)
ret_val.append(alert)
except Exception as e:
log.warning(e)
return ret_val
class Fare(object):
"""
"""
def __init__(self, jsn, fares):
self.adult = self.get_fare(jsn, '$2.50')
if fares:
self.adult_day = fares.query("adult_day", "$5.00")
self.honored = fares.query("honored", "$1.25")
self.honored_day = fares.query("honored_day", "$2.50")
self.youth = fares.query("youth", "$1.25")
self.youth_day = fares.query("youth_day", "$2.50")
self.tram = fares.query("tram", "$4.70")
self.notes = fares.query("notes")
def get_fare(self, jsn, def_val):
""" TODO -- need to figure out exceptions and populate self.note
1) TRAM (GONDOLA) fare
2) $5.00 one-way fare, when the trip lasts longer than the transfer window
"""
ret_val = def_val
try:
c = int(jsn['fare']['fare']['regular']['cents']) * 0.01
s = jsn['fare']['fare']['regular']['currency']['symbol']
ret_val = "%s%.2f" % (s, c)
except Exception as e:
pass
return ret_val
def update_fare_info(self, def_val):
""" read (periodically) a config file containing all fares an agency might present
"""
ret_val = def_val
try:
if datetime.now() - self.last_update > timedelta(minutes = self.avert_timeout):
log.warning("updating the advert content")
self.last_update = datetime.now()
except Exception as e:
log.warning("ERROR updating the advert content {0}".format(e))
return ret_val
class Stop(object):
"""
"""
def __init__(self, jsn, name=None):
# OLD OTP: "stop": {"agencyId":"TriMet", "name":"SW Arthur & 1st", "id":"143","info":"stop.html?stop_id=143", "schedule":"stop_schedule.html?stop_id=143"},
# NEW OTP: "from": { "name":"SE 13th & Lambert","stopId":"TriMet:6693","stopCode":"6693","lon":-122.652906,"lat":45.468484,"arrival":1478551773000,"departure":1478551774000,"zoneId":"B","stopIndex":11,"stopSequence":12,"vertexType":"TRANSIT"}
self.name = name
self.agency = None
self.id = None
self.get_id_and_agency(jsn)
self.info = self.make_info_url(id=self.id)
self.schedule = self.make_schedule_url(id=self.id)
def get_id_and_agency(self, jsn):
try:
# *.10.x format -- "stopId":{"agencyId":"TRIMET","id":"10579"}
self.id = jsn['id']
self.agency = jsn['agencyId']
except Exception as e:
# 1.0.x format -- "stopId":"TriMet:10579",
try:
s = jsn.split(':')
self.id = s[1].strip()
self.agency = s[0].strip()
except Exception as e:
log.warn("couldn't parse AGENCY nor ID from stop")
def make_info_url(self, url="stop.html?stop_id=%(id)s", **kwargs):
return url % kwargs
def make_schedule_url(self, url="stop_schedule.html?stop_id=%(id)s", **kwargs):
return url % kwargs
def append_params_schedule_url(self, route, month, day):
if self.schedule:
if route:
self.schedule += "&route={0}".format(route)
if month and day:
self.schedule += "&month={0}&day={1}".format(month, day)
def append_params_info_url(self, month, day):
if self.info:
if month and day:
self.info += "&month={0}&day={1}".format(month, day)
@classmethod
def factory(cls, jsn, name=None):
ret_val = None
stop_jsn = get_element(jsn, 'stopId')
if stop_jsn:
s = Stop(stop_jsn, name)
ret_val = s
return ret_val
class Route(object):
def __init__(self, jsn):
# TODO IMPORTANT
# TODO We should probably use ott.data's DAO objects here ... very confusing to have multiple routes
# TODO I know I wanted otp_to_ott.py to be standalone, but maybe that's a bad idea in terms of maintenance
# TODO IMPORTANT
# TODO this code is part of view.AgencyTemplate ... use a version of util.AgencyTemplate in the FUTURE
self.route_id_cleanup = '\D.*'
self.agency_id = jsn['agencyId']
self.agency_name = get_element(jsn, 'agencyName')
self.id = remove_agency_from_id(jsn['routeId'])
self.name = self.make_name(jsn)
self.headsign = get_element(jsn, 'headsign')
self.trip = remove_agency_from_id(get_element(jsn, 'tripId'))
url = self.url = get_element(jsn, 'url')
if url is None:
url = self.url = get_element(jsn, 'agencyUrl')
self.url = url
self.schedulemap_url = url
# http://www.c-tran.com/routes/2route/map.html
# http://trimet.org/schedules/r008.htm
if self.agency_id.lower() == 'trimet':
self.url = self.make_route_url("http://trimet.org/schedules/r{0}.htm")
self.schedulemap_url = self.make_route_url("http://trimet.org/images/schedulemaps/{0}.gif")
elif self.agency_id.lower() == 'psc':
self.url = self.make_route_url("http://www.portlandstreetcar.org/node/3")
self.schedulemap_url = self.make_route_url("http://www.portlandstreetcar.org/node/4")
elif self.agency_id.lower() == 'c-tran':
self.url = "http://c-tran.com/routes/{0}route/index.html".format(self.id)
self.schedulemap_url = "http://c-tran.com/images/routes/{0}map.png".format(self.id)
# TODO this code is part of view.AgencyTemplate ... use a version of util.AgencyTemplate in the FUTURE
def clean_route_id(self, route_id):
""" cleans the route_id parameter. needed because TriMet started using id.future type route ids for route name changes
"""
ret_val = route_id
if self.route_id_cleanup:
ret_val = re.sub(self.route_id_cleanup, '', route_id)
return ret_val
""" TODO: move to a single class that allows any agency to override & customize """
def make_route_url(self, template):
""" remove trailing x on route id, fill out the id with 3 zeros, pump that id thru the url template
"""
id = self.clean_route_id(self.id)
id = id.zfill(3)
id = template.format(id)
return id
def make_name(self, jsn, name_sep='-', def_val=''):
""" create a route name based on the returned json and the long & short names
NOTE: we also handle a special case for interline legs
"""
ret_val = def_val
# step 1: interline name will use jsn['route'] in certain circumstances
# NOTE: we get some funky things with interline in the recent OTP code, where the record is the previous route
# not the new interline route. So we'll build a name like MAX Yellow Line from the
# crap data we have fix this (temporarily)
ln = get_element(jsn, 'routeLongName')
if Leg.is_interline(jsn) and 'route' in jsn and len(jsn['route']) > 0 and not (jsn['route'] in ln or ln in jsn['route']):
ret_val = jsn['route']
else:
# step 2: build up a route name using the short and long name(s) of the route
# step 2a: grab short to go along with long name captured above
sn = get_element(jsn, 'routeShortName')
# step 2b: short name, ala '33' in 33-McLoughlin or '' for MAX Orange Line
if sn and len(sn) > 0:
if len(ret_val) > 0 and name_sep:
ret_val = ret_val + name_sep
ret_val = ret_val + sn
# step 2c: long name name, ala 'McLoughlin' in 33-McLoughlin, 'MAX Orange Line'
if ln and len(ln) > 0:
if len(ret_val) > 0 and name_sep:
ret_val = ret_val + name_sep
ret_val = ret_val + ln
return ret_val
class Step(object):
def __init__(self, jsn):
self.name = jsn['streetName']
self.lat = jsn['lat']
self.lon = jsn['lon']
self.distance_meters = jsn['distance']
self.distance_feet = m_to_ft(jsn['distance'])
self.distance = pretty_distance(self.distance_feet)
self.compass_direction = self.get_direction(get_element(jsn, 'absoluteDirection'))
self.relative_direction = self.get_direction(get_element(jsn, 'relativeDirection'))
@classmethod
def get_direction(cls, dir):
""" TODO localize me
"""
ret_val = dir
try:
ret_val = {
'LEFT': dir.lower(),
'RIGHT': dir.lower(),
'HARD_LEFT': dir.lower().replace('_', ' '),
'HARD_RIGHT': dir.lower().replace('_', ' '),
'CONTINUE': dir.lower(),
'NORTH': dir.lower(),
'SOUTH': dir.lower(),
'EAST': dir.lower(),
'WEST': dir.lower(),
'NORTHEAST': dir.lower(),
'NORTHWEST': dir.lower(),
'SOUTHEAST': dir.lower(),
'SOUTHWEST': dir.lower(),
}[dir]
except Exception as e:
pass
return ret_val
@classmethod
def get_relative_direction(cls, dir):
""" """
ret_val = dir
return ret_val
class Leg(object):
"""
"""
def __init__(self, jsn):
self.mode = jsn['mode']
fm = Place.factory(jsn['from'], self, 'from')
to = Place.factory(jsn['to'], self, 'to')
self.steps = self.get_steps(jsn)
self.elevation = None
if self.steps and 'steps' in jsn:
self.elevation = Elevation(jsn['steps'])
self.date_info = DateInfo(jsn)
self.compass_direction = self.get_compass_direction()
self.distance_meters = jsn['distance']
self.distance_feet = m_to_ft(jsn['distance'])
self.distance = pretty_distance(self.distance_feet)
# transit related attributes
self.route = None
self.alerts = None
self.transfer = None
self.interline = None
# mode specific config
route_id = None
if self.is_transit_mode():
self.route = Route(jsn)
route_id = self.route.id
if 'alerts' in jsn:
self.alerts = Alert.factory(jsn['alerts'], route_id=self.route.id)
self.interline = self.is_interline(jsn)
svc_date = date_utils.parse_month_day_year_string(self.date_info.service_date)
fm.append_url_params(route_id, month=svc_date['month'], day=svc_date['day'])
to.append_url_params(route_id, month=svc_date['month'], day=svc_date['day'])
@classmethod
def is_interline(cls, jsn):
ret_val = False
if 'interlineWithPreviousLeg' in jsn:
ret_val = jsn['interlineWithPreviousLeg']
return ret_val
def is_transit_mode(self):
return self.mode in ['BUS', 'TRAM', 'RAIL', 'TRAIN', 'SUBWAY', 'CABLECAR', 'GONDOLA', 'FUNICULAR', 'FERRY']
def is_sea_mode(self):
return self.mode in ['FERRY']
def is_air_mode(self):
return self.mode in ['GONDOLA']
def is_non_transit_mode(self):
return self.mode in ['BIKE', 'BICYCLE', 'WALK', 'CAR', 'AUTO']
def get_steps(self, jsn):
ret_val = None
if 'steps' in jsn and jsn['steps'] and len(jsn['steps']) > 0:
ret_val = []
for s in jsn['steps']:
step = Step(s)
ret_val.append(step)
return ret_val
def get_compass_direction(self):
ret_val = None
if self.steps and len(self.steps) > 0:
v = self.steps[0].compass_direction
if v:
ret_val = v
return ret_val
class Itinerary(object):
"""
"""
def __init__(self, jsn, itin_num, url, fares):
self.dominant_mode = None
self.selected = False
self.has_alerts = False
self.alerts = []
self.url = url
self.itin_num = itin_num
self.transfers = jsn['transfers']
self.fare = Fare(jsn, fares)
self.date_info = DateInfoExtended(jsn)
self.legs = self.parse_legs(jsn['legs'])
def set_dominant_mode(self, leg):
""" dominant transit leg -- rail > bus
"""
if object_utils.has_content(self.dominant_mode) is False:
self.dominant_mode = object_utils.safe_str(leg.mode).lower()
if leg.is_transit_mode() and not leg.is_sea_mode():
if self.dominant_mode != 'rail' and leg.mode == 'BUS':
self.dominant_mode = 'bus'
else:
self.dominant_mode = 'rail'
def parse_legs(self, legs):
"""
"""
ret_val = []
# step 1: build the legs
for l in legs:
leg = Leg(l)
ret_val.append(leg)
# step 2: find transfer legs e.g., this pattern TRANSIT LEG, WALK/BIKE LEG, TRANSIT LEG
num_legs = len(ret_val)
for i, leg in enumerate(ret_val):
self.set_dominant_mode(leg)
if leg.is_transit_mode() and i+2 < num_legs:
if ret_val[i+2].is_transit_mode() and ret_val[i+1].is_non_transit_mode():
self.transfer = True
# step 3: find 'unique' alerts and build an alerts object for the itinerary
alerts_hash = {}
for leg in ret_val:
if leg.alerts:
self.has_alerts = True
try:
for a in leg.alerts:
alerts_hash[a.text] = a
except Exception as e:
pass
self.alerts = []
for v in alerts_hash.values():
self.alerts.append(v)
return ret_val
class Plan(object):
""" top level class of the ott 'plan' object tree
contains these elements:
self.from, self.to, self.params, self.arrive_by, self.optimize (plus other helpers
"""
def __init__(self, jsn, params=None, fares=None, path="planner.html?itin_num={0}"):
""" creates a self.from and self.to element in the Plan object """
Place.factory(jsn['from'], self, 'from')
Place.factory(jsn['to'], self, 'to')
self.itineraries = self.parse_itineraries(jsn['itineraries'], path, params, fares)
self.set_plan_params(params)
def parse_itineraries(self, itineraries, path, params, fares):
""" TODO explain me...
"""
ret_val = []
for i, jsn in enumerate(itineraries):
itin_num = i+1
url_params = None
if params:
url_params = params.ott_url_params()
url = self.make_itin_url(path, url_params, itin_num)
itin = Itinerary(jsn, itin_num, url, fares)
ret_val.append(itin)
# set the selected
selected = self.get_selected_itinerary(params, len(ret_val))
if selected >= 0 and selected < len(ret_val):
ret_val[selected].selected = True
return ret_val
def make_itin_url(self, path, query_string, itin_num):
"""
"""
ret_val = None
try:
ret_val = path.format(itin_num)
if query_string:
ret_val = "{0}&{1}".format(ret_val, query_string)
except Exception as e:
log.warn("make_itin_url exception")
return ret_val
def get_selected_itinerary(self, params, max=3):
""" return list position (index starts at zero) of the 'selected' itinerary
@see ParamParser
"""
ret_val = 0
if params:
ret_val = params.get_itin_num_as_int()
ret_val -= 1 # decrement value because we need an array index, eg: itin #1 == itin[0]
# final check to make sure we don't over-index the list of itineraries
if ret_val < 0 or ret_val >= max:
ret_val = 0
return ret_val
def pretty_mode(self, mode):
""" TOD0 TODO TODO localize
"""
ret_val = 'Transit'
if 'BICYCLE' in mode and ('TRANSIT' in mode or ('RAIL' in mode and 'BUS' in mode)):
ret_val = 'Bike to Transit'
elif 'BICYCLE' in mode and 'RAIL' in mode:
ret_val = 'Bike to Rail'
elif 'BICYCLE' in mode and 'BUS' in mode:
ret_val = 'Bike to Bus'
elif 'TRANSIT' in mode:
ret_val = 'Transit'
elif 'BUS' in mode:
ret_val = 'Bus'
elif 'RAIL' in mode:
ret_val = 'Rail'
elif 'BICYCLE' in mode:
ret_val = 'Bike'
elif 'WALK' in mode:
ret_val = 'Walk'
return ret_val
def dominant_transit_mode(self, i=0):
""" TODO ... make better...parse itin affect adverts (at least) """
ret_val = 'rail'
if len(self.itineraries) < i:
i = len(self.itineraries) - 1
if i >= 0 and self.itineraries:
ret_val = self.itineraries[i].dominant_mode
return ret_val
def set_plan_params(self, params):
""" passed in by a separate routine, rather than parsed from returned itinerary
"""
if params:
self.params = {
"is_arrive_by" : params.arrive_depart,
"optimize" : params.optimize,
"map_planner" : params.map_url_params(),
"edit_trip" : params.ott_url_params(),
"return_trip" : params.ott_url_params_return_trip(),
"modes" : self.pretty_mode(params.mode),
"walk" : pretty_distance_meters(params.walk_meters)
}
else:
self.params = {}
self.max_walk = "1.4"
"""
UTILITY METHODS
"""
def get_element(jsn, name, def_val=None):
"""
"""
ret_val = def_val
try:
v = jsn[name]
if type(def_val) == int:
ret_val = int(v)
else:
ret_val = v
except Exception as e:
log.debug(name + " not an int value in jsn")
return ret_val
def ms_to_minutes(ms, is_pretty=False, show_hours=False):
ret_val = ms / 1000 / 60
# pretty '3 hours & 1 minute' string
if is_pretty:
h_str = ''
m_str = ''
# calculate hours string
m = ret_val
if show_hours and m > 60:
h = int(math.floor(m / 60))
m = int(m % 60)
if h > 0:
hrs = 'hour' if h == 1 else 'hours'
h_str = '%d %s' % (h, hrs)
if m > 0:
h_str = h_str + ' ' + '&' + ' '
# calculate minutes string
if m > 0:
mins = 'minute' if m == 1 else 'minutes'
m_str = '%d %s' % (m, mins)
ret_val = '%s%s' % (h_str, m_str)
return ret_val
def hour_min_string(h, m, fmt='{0} {1}', sp=', '):
ret_val = None
if h and h > 0:
hr = 'hours' if h > 1 else 'hour'
ret_val = "{0} {1}".format(h, hr)
if m:
min = 'minutes' if m > 1 else 'minute'
pre = '' if ret_val is None else ret_val + sp
ret_val = "{0}{1} {2}".format(pre, m, min)
return ret_val
def seconds_to_hours_minutes(secs, def_val=None, min_secs=60):
"""
"""
min = def_val
hour = def_val
if secs > min_secs:
m = math.floor(secs / 60)
min = m % 60
if m >= 60:
m = m - min
hour = int(math.floor(m / 60))
return hour,min
def m_to_ft(m):
ret_val = float(m) * 3.28
return ret_val
def distance_dict(distance, measure):
return {'distance':distance, 'measure':measure}
def pretty_distance(feet):
""" TODO localize
"""
ret_val = ''
if feet <= 1.0:
ret_val = distance_dict(1, 'foot')
elif feet < 1000:
ret_val = distance_dict(int(feet), 'feet')
elif feet < 1500:
ret_val = distance_dict('1/4', 'mile')
elif feet < 2200:
ret_val = distance_dict('1/3', 'mile')
elif feet < 3100:
ret_val = distance_dict('1/2', 'mile')
elif feet < 4800:
ret_val = distance_dict('3/4', 'mile')
elif feet < 5400:
ret_val = distance_dict('1', 'mile')
else:
ret_val = distance_dict(round(feet / 5280, 1), 'miles')
return ret_val
def pretty_distance_meters(m):
"""
"""
ret_val = m
try:
d = pretty_distance(float(m) * 3.28)
ret_val = "{distance} {measure}".format(**d)
except Exception as e:
log.warn("pretty distance meters")
return ret_val
def main():
argv = sys.argv
if argv and len(argv) > 1 and ('new' in argv or 'n' in argv):
file = './ott/otp_client/tests/data/new/pdx2ohsu.json'
elif argv and len(argv) > 1 and not ('pretty' in argv or 'p' in argv):
file = argv[1]
else:
file = './ott/otp_client/tests/data/old/pdx2ohsu.json'
try:
f = open(file)
except Exception as e:
path = "{0}/{1}".format('ott/otp_client/tests', file)
f = open(path)
j = json.load(f)
p = Plan(j['plan'])
pretty = False
if argv:
pretty = 'pretty' in argv or 'p' in argv
y = json_utils.json_repr(p, pretty)
print(y)
if __name__ == '__main__':
main()
| OpenTransitTools/otp_client_py | ott/otp_client/otp_to_ott.py | Python | mpl-2.0 | 36,580 | 0.004265 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((1187.5, 11664.8, 3272.4), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((1770.89, 9961.76, 3299.8), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((3687.55, 9634.71, 3057.58), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2648.01, 11291.3, 1826.19), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((4044.92, 11971.5, 1372.47), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((6062.7, 11004.3, 2417.93), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((7263.91, 10793.8, 3615.86), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((6953.99, 11521.2, 3125.77), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((7907.5, 9894.78, 5077.31), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8303.41, 10606.5, 6543.54), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((8648.15, 8917.55, 7280.2), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((7188.16, 8586, 7361.06), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((5928.71, 7709.66, 7700.76), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5062.69, 8985.9, 7445.77), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((3197.44, 8804, 8744.82), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((744.469, 6958.43, 8463.76), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((1208.16, 5977.81, 6850.56), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((2088.62, 5295.02, 7713.93), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3550.58, 6112.9, 7907.4), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4377.06, 6492.76, 9085.32), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((6007.56, 6982.35, 7340.01), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((4495.74, 6025.06, 8338.37), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((4671.59, 5400.4, 7990.14), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((4094.31, 4289.1, 7702.68), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((2855.56, 4857.38, 7343.13), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((1278.3, 4876.89, 7731.96), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2784.44, 5323.8, 7802.94), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4288.76, 6780.35, 7064.9), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5238.01, 5659.52, 7410.52), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((6351.12, 6173.15, 7332.57), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((6084.66, 5685.12, 6880.48), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((6675.68, 7281.04, 7241.44), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((6487.97, 5769.1, 8168.44), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((5420.47, 5029.67, 7394.7), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4189.38, 5203.47, 7849.05), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((3124.99, 5159.96, 8750.75), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((5052.75, 6178.53, 7549.89), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4064.07, 4737.44, 8226.59), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((3833.23, 5082.31, 6984.27), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((4487.35, 4521.45, 8300.26), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((5927, 4848.97, 7503.21), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((7588.07, 5205.25, 8242.55), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((6911.4, 2835.15, 9192.4), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7970.05, 4288.58, 8636.29), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((6413.73, 4426.41, 8070.59), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7214.37, 5564.23, 6660.88), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7333.85, 3953.02, 5536.58), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((7847.66, 2636.1, 7026.92), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((8231.3, 3643.36, 5471.2), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((8264.34, 5327.63, 4627.34), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((9266.18, 4749.42, 5212.85), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((8599.69, 6298.64, 5172.18), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((7541.83, 7704.31, 5048.15), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((8696.33, 8716.08, 4830.14), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((9254.2, 8212, 4670.03), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((8255.53, 6423.4, 4339.42), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((7313.23, 6698.44, 2391.04), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((6243.93, 5211.98, 662.652), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((5988.92, 4800.58, 314.706), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((6151.11, 5464.52, -173.152), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((6462.85, 5206.47, 778.133), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((7135.27, 5172.51, -99.6635), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((6842.76, 5973.3, 1604.97), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((4989.32, 5358.9, 1444.2), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((3339.67, 4380.71, 1167.86), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((4380.76, 3822.73, 2539.33), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((4141.94, 2593.1, 1441.84), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((5301.31, 4578.6, 1837.14), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((4139.42, 4231.63, -172.164), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((3009.74, 4275.26, -397.018), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((3865.91, 4091.7, -415.898), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| batxes/4Cin | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models39351.py | Python | gpl-3.0 | 17,562 | 0.025111 |
import redis
import psycopg2
from tornado.options import options
import tornadobase.handlers
class BaseHandler(tornadobase.handlers.BaseHandler):
def initialize(self):
self.dbref = psycopg2.connect(dbname=options.dbname,
user=options.dbuser,
password=options.dbpass)
self.dbref.autocommit = True
self.redis = redis.StrictRedis(host='localhost',
port=6379,
db=0,
decode_responses=True)
@property
def session(self):
session_id = self.get_secure_cookie('session')
if session_id is not None:
return self.redis.hgetall(session_id)
return None
def get_current_user(self):
session_id = self.get_secure_cookie('__id')
if session_id is not None:
self.redis.expire(session_id, options.session_timeout)
return self.redis.hget(session_id, 'userid')
return None
def write_error(self, status_code, **kwargs):
(http_error, error, stacktrace) = kwargs['exc_info']
if not hasattr(error, 'reason'):
reason = 'Something went wrong.'
else:
reason = error.reason
self.render('errors/general.html',
status_code=status_code,
reason=reason)
def on_finish(self):
self.dbref.close()
| coyote240/zeroecks.com | zeroecks/handlers/base_handler.py | Python | mit | 1,495 | 0 |
from pygame import Rect
class AfflictionBox():
def __init__(self, affliction, font, rectPosition = (0, 0)):
self.affliction = affliction
self.rectPosition = rectPosition
self.name = self.affliction.name
self.font = font
self.textSize = self.font.size(self.name)
self.textRect = Rect(self.rectPosition, self.textSize)
def update(self, rectPosition):
self.rectPosition = rectPosition
self.textRect.centerx = rectPosition[0] + self.textSize[0]
self.textRect.centery = rectPosition[1] + self.textSize[1] | ZakDoesGaming/OregonTrail | lib/afflictionBox.py | Python | mit | 523 | 0.032505 |
#!/usr/bin/env python
# coding=utf-8
import requests
import time
import json
"""
ansible 运行结果回调
"""
class CallbackModule(object):
def v2_runner_item_on_ok(self, *args, **kwargs):
# time.sleep(10)
# print args
for i in dir(args[0]):
if not i.startswith('__'):
print i
print '======'
# print args[0]._result
print json.dumps(args[0]._result, indent=4)
print args[0]._task
print 'runner item on ok'
def v2_runner_item_on_failed(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner item on failed'
# print args[0]._result
print json.dumps(args[0]._result, indent=4)
print args[0]._task
print '======'
def v2_runner_item_on_skipped(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner item on skipped'
def v2_runner_retry(self, *args, **kwargs):
# print args
print dir(args[0])
print 'runner on retry'
def v2_runner_on_ok(self, *args, **kwargs):
print 'runner on ok'
# # print args
# print dir(args[0])
for i in dir(args[0]):
if not i.startswith('__'):
print i
print json.dumps(args[0]._result, indent=4)
print args[0]._task
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._result)
# print type(args[0]._task), 'task type'
# print args[0]._host
# print kwargs
def v2_runner_on_unreachable(self, *args, **kwargs):
print 'runner on unreacheable'
# # print args
print dir(args[0])
# print args[0]._result
# print args[0]._task
# print args[0]._host
# print kwargs
def v2_runner_on_failed(self, *args, **kwargs):
# # print args
print dir(args[0])
# print args[0]._result
# print args[0]._task
# print args[0]._host
# print kwargs
print 'runner on failed'
print json.dumps(args[0]._result, indent=4)
print args[0]._task
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._result)
requests.post('http://127.0.0.1:9999/api/callback/test', args[0]._task)
print args[0].is_failed(), '-*/***********'
print '------'
def v2_runner_on_skipped(self, *args, **kwargs):
print 'runner on skipped'
def v2_playbook_on_stats(self, *args, **kwargs):
# print args
# print dir(args[0])
for i in dir(args[0]):
if not i.startswith('__'):
print i
# print args[0].changed, 'changed'
# print args[0].ok, 'ok'
# print args[0].dark, 'dark'
print args[0].failures, 'failures'
# print args[0].increment, 'increment'
# print args[0].processed, 'processed'
# print args[0].skipped, 'skipped'
# print args[0].summarize, 'summarize'
# print kwargs
print 'on stats'
if __name__ == '__main__':
print 'callback'
| tao12345666333/Talk-Is-Cheap | ansible/plugins/callback/test.py | Python | mit | 3,110 | 0.000646 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_Quantization/trend_PolyTrend/cycle_30/ar_/test_artificial_1024_Quantization_PolyTrend_30__20.py | Python | bsd-3-clause | 269 | 0.085502 |
import operator
import unittest
import numpy
import six
from cupy import testing
@testing.gpu
class TestArrayElementwiseOp(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_scalar_op(self, op, xp, dtype, swap=False):
a = testing.shaped_arange((2, 3), xp, dtype)
if swap:
return op(dtype(2), a)
else:
return op(a, dtype(2))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub)
def test_rsub_scalar(self):
self.check_array_scalar_op(operator.sub, swap=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_rdiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div, swap=True)
def test_idiv_scalar(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_rfloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv, swap=True)
def test_ifloordiv_scalar(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
def test_ipow_scalar(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_array_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((2, 3), xp, dtype)
return op(a, b)
def test_add_array(self):
self.check_array_scalar_op(operator.add)
def test_iadd_array(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_array(self):
self.check_array_scalar_op(operator.sub)
def test_isub_array(self):
self.check_array_scalar_op(operator.isub)
def test_mul_array(self):
self.check_array_scalar_op(operator.mul)
def test_imul_array(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.truediv)
def test_itruediv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.itruediv)
def test_div_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.div)
def test_idiv_array(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.idiv)
def test_floordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.floordiv)
def test_ifloordiv_array(self):
numpy.seterr(divide='ignore')
self.check_array_scalar_op(operator.ifloordiv)
def test_pow_array(self):
self.check_array_scalar_op(operator.pow)
def test_ipow_array(self):
self.check_array_scalar_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 3), dtype=dtype)
b = testing.shaped_arange((2, 1), dtype=dtype)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
self.check_array_broadcasted_op(operator.sub)
def test_broadcasted_isub(self):
self.check_array_broadcasted_op(operator.isub)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.div)
def test_broadcasted_idiv(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.idiv)
def test_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.floordiv)
def test_broadcasted_ifloordiv(self):
numpy.seterr(divide='ignore')
self.check_array_broadcasted_op(operator.ifloordiv)
def test_broadcasted_pow(self):
self.check_array_broadcasted_op(operator.pow)
def test_broadcasted_ipow(self):
self.check_array_broadcasted_op(operator.ipow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_doubly_broadcasted_op(self, op, xp, dtype):
a = testing.shaped_arange((2, 1, 3), xp, dtype)
b = testing.shaped_arange((3, 1), xp, dtype)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
numpy.seterr(divide='ignore', invalid='ignore')
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.floordiv)
def test_doubly_broadcasted_div(self):
if six.PY3:
return
numpy.seterr(divide='ignore')
self.check_array_doubly_broadcasted_op(operator.div)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return op(a, a[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
| ikasumi/chainer | tests/cupy_tests/test_ndarray_elementwise_op.py | Python | mit | 8,059 | 0 |
from omni_api.base import ClientBase, DataItem
class HackpadClient(ClientBase):
def __init__(self, client_id, secret):
self.auth = self.get_oauth_token(client_id, secret)
def get_url(self, url, **kwargs):
kwargs['auth'] = self.auth
return super(HackpadClient, self).get_url(
url,
load_json=True,
**kwargs
)
def search(self, query):
url = 'https://hackpad.com/api/1.0/search'
params = {
'q': query,
}
result = self.get_url(url, params=params)
return [HackPad(i) for i in result]
def all_pads(self):
"""Returns list of pad ids."""
# Stupid hack until the necessary endpoint exists
return self.search('a')
class HackPad(DataItem):
@property
def id(self):
return self.data['id']
@property
def creator_id(self):
return self.data['creatorId']
@property
def domain_id(self):
return self.data['domainId']
@property
def last_edited(self):
return self.parse_date(self.data['lastEditedDate'])
@property
def last_editor_id(self):
return self.data['lastEditorId']
@property
def snippet(self):
"""Markup"""
return self.data['snippet']
@property
def title(self):
return self.data['title']
| harveyr/omni-api | omni_api/hackpad.py | Python | mit | 1,372 | 0 |
class ListViewTestsMixin(object):
def test_admin_users_can_access_all_tables(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip', 'crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_access=True)
def test_admin_users_can_crawl_news_and_tips(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_crawl=True)
def test_admin_users_can_delete_rows_in_tables(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip', 'crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_use_row_actions=True)
def test_admin_users_can_export_news_and_tips(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_export=True)
def test_admin_users_cannot_export_other_models(self):
for username in ('super', 'follow'):
with self.login_as(username):
for model_name in ('crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_export=False)
def test_simple_users_can_export_news_and_tips(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_export=True)
def test_simple_users_cannot_crawl_news_and_tips(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_crawl=False)
def test_simple_users_cannot_delete_news_and_tips(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('news', 'tip'):
self._test_table_view(username, model_name, can_use_row_actions=False)
def test_simple_users_cannot_access_privileged_tables(self):
for username in ('simple',):
with self.login_as(username):
for model_name in ('crawl', 'user', 'schedule'):
self._test_table_view(username, model_name, can_access=False)
def _page_should_load_custom_js_css(self, response, info, target, skin):
msg = 'the page must load custom js/css' + info
for bundle in ('common', skin):
if target == 'prod':
bundle += '.min'
link_css = ('<link type="text/css" href="/static/%s/dz-%s.css?hash='
% (target, bundle))
self.assertContains(response, link_css, msg_prefix=msg)
link_js = ('<script type="text/javascript" src="/static/%s/dz-%s.js?hash='
% (target, bundle))
self.assertContains(response, link_js, msg_prefix=msg)
| ivandeex/dz | dz/tests/views.py | Python | mit | 3,249 | 0.003078 |
# BinGrep, version 1.0.0
# Copyright 2017 Hiroki Hada
# coding:UTF-8
import sys, os, time, argparse
import re
import pprint
#import pydot
import math
import cPickle
import ged_node
from idautils import *
from idc import *
import idaapi
def idascript_exit(code=0):
idc.Exit(code)
def get_short_function_name(function):
return function.replace("?", "")[:100]
def mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def cPickle_dump(filename, data):
with open(filename, "wb") as f:
cPickle.dump(data, f)
def print_cfg(cfg):
for block in cfg:
print "[%02d]" % block.id,
print hex(block.startEA),
succs = list(block.succs())
print "(succs(%d): " % len(succs),
for i in range(len(succs)):
sys.stdout.write(hex(succs[i].startEA))
if i < len(succs) - 1:
sys.stdout.write(", ")
print ")"
def output_cfg_as_png_rec(g, block, memo):
functions1, dummy = get_marks(block, 0)
hashed_label1 = hash_label(functions1)
label1 = hex(block.startEA) + ("\n%08x" % hashed_label1)
g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399'))
for b in list(block.succs()):
functions2, dummy = get_marks(b, 0)
hashed_label2 = hash_label(functions2)
label2 = hex(b.startEA) + ("\n%08x" % hashed_label2)
if b.startEA not in memo:
memo.append(b.startEA)
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold'))
output_cfg_as_png_rec(g, b, memo)
else:
g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted'))
def output_cfg_as_png(cfg, filename, overwrite_flag):
blocks_src = {}
blocks_dst = {}
block = cfg[0]
f_name = GetFunctionName(block.startEA)
if not overwrite_flag and os.path.exists(filename):
return
g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF")
size = "21"
g.set_rankdir('TB')
g.set_size(size)
g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold'))
g.add_node(pydot.Node('edge', color='lightgrey'))
memo = []
output_cfg_as_png_rec(g, block, memo)
g.write_png(filename)
def get_cfg(function_start, function_end):
f_name = GetFunctionName(function_start)
cfg = idaapi.FlowChart(idaapi.get_func(function_start))
return list(cfg)
def get_cfgs():
cfgs = []
for ea in Segments():
functions = list(Functions(SegStart(ea), SegEnd(ea)))
functions.append(SegEnd(ea))
for i in range(len(functions) - 1):
function_start = functions[i]
function_end = functions[i+1]
cfg = get_cfg(function_start, function_end)
cfgs.append(cfg)
return cfgs
def hash_label(marks):
tmp = sorted(set(marks))
tmp = "".join(tmp)
tmp = tmp.upper()
def rot13(string):
return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string))
hashed_label = rot13(tmp)
hashed_label = hashed_label & 0xFFFFFFFF
return hashed_label
def get_marks(block, gamma):
marks = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
if mnem not in ["call"]:
for buf in (opnd[1], opnd[2]):
if buf:
match = re.search("([\dA-F]+)h", buf)
if match:
magic = int(match.group(1), 16)
if 0x00001000 <= magic <= 0xffffffff:
marks.append(hex(magic))
for buf in (opnd[0], opnd[1], opnd[2]):
if buf:
match = re.search("offset (a[\S]+)", buf)
if match:
offset_a = match.group(1)
if offset_a[:4] == "asc_": continue
marks.append(offset_a)
continue
else:
gamma += 1
if opnd[0][:4] == "sub_": continue
if opnd[0][0] in ["?", "$"]: continue
if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue
if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue
if opnd[0].find("cookie") >= 0: continue
marks.append(opnd[0])
continue
return marks, gamma
def get_mnems(block):
mnems = []
for head in Heads(block.startEA, block.endEA):
mnem = GetMnem(head)
opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2))
buf = " "
for o in opnd:
if not o: break
elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx", "dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]:
buf += "reg "
elif o[:3] == "xmm": buf += "reg "
elif o.find("[") >= 0: buf += "mem "
elif o[:6] == "offset": buf += "off "
elif o[:4] == "loc_": buf += "loc "
elif o[:4] == "sub_": buf += "sub "
elif o.isdigit(): buf += "num "
elif re.match("[\da-fA-F]+h", o): buf += "num "
elif o[:6] == "dword_": buf += "dwd "
else: buf += "lbl "
mnems.append(mnem + buf)
return mnems
def cfg_to_cft_rec(block, memo, abr):
(alpha, beta, gamma) = abr
alpha += 1
marks, gamma = get_marks(block, gamma)
hashed_label = hash_label(marks)
mnems = get_mnems(block)
tree = ged_node.Node(hashed_label)
for b in list(block.succs()):
beta += 1
if b.startEA not in memo:
memo.append(b.startEA)
tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma))
tree = tree.addkid(tmp)
mnems += tmp2
return tree, (alpha, beta, gamma), mnems
def cfg_to_cft(cfg):
block = cfg[0]
memo = []
memo.append(block.startEA)
return cfg_to_cft_rec(block, memo, (0, 0, 0))
def dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite):
function_num = len(cfgs)
dump_data_list = {}
for cfg in cfgs:
function_name = GetFunctionName(cfg[0].startEA)
(cft, abr, mnems) = cfg_to_cft(cfg)
dump_data_list[function_name] = {}
dump_data_list[function_name]["FUNCTION_NAME"] = function_name
dump_data_list[function_name]["CFT"] = cft
dump_data_list[function_name]["ABR"] = abr
dump_data_list[function_name]["MNEMS"] = mnems
def dump_pickle(dump_data_list, program, function, f_overwrite):
function_name_short = get_short_function_name(function)
filename_pickle = os.path.join(function_name_short + ".pickle")
if f_overwrite or not os.path.exists(filename_pickle):
cPickle_dump(filename_pickle, dump_data_list[function])
cPickle_dump(program + ".dmp", dump_data_list)
def main(function, f_image, f_all, f_overwrite):
sys.setrecursionlimit(3000)
program = idaapi.get_root_filename()
start_time = time.time()
cfgs = get_cfgs()
dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite)
result_time = time.time() - start_time
print "Dump finished."
print "result_time: " + str(result_time) + " sec."
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
parser.add_argument('-f', dest='function', default=None, type=str, help='')
parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='')
parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)')
parser.add_argument('-o', dest='f_overwrite', default=False, action='store_true', help='Overwrite file')
args = parser.parse_args()
function = args.function
f_image = args.f_image
f_all = args.f_all
f_overwrite = args.f_overwrite
main(function, f_image, f_all, f_overwrite)
#idascript_exit()
| hada2/bingrep | bingrep_dump.py | Python | bsd-3-clause | 8,471 | 0.007909 |
# -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright © 2009, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""TODO: Decide what to do with this file"""
| tbielawa/Taboot | taboot/tasks/httpd.py | Python | gpl-3.0 | 805 | 0 |
# Copyright (C) 2007 - 2009 Khronos Group
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
#
# This sample judging object does the following:
#
# JudgeBaseline: verifies that app did not crash, the required steps have been performed,
# the rendered images match, and the required element(s) has been preserved
# JudgeExemplary: returns Baseline status.
# JudgeSuperior: returns Baseline status.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = [['library_geometries', 'geometry', 'mesh', 'triangles'], ['library_geometries', 'geometry', 'mesh', 'polygons'], ['library_geometries', 'geometry', 'mesh', 'polylist']]
attrName = 'count'
attrVal = '6'
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
# Check for preservation of element
self.__assistant.ElementTransformed(context, self.tagList)
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
self.status_exemplary = False
if (self.__assistant.ElementPreserved(context, self.tagList[1], False)):
context.Log("PASSED: Geometry preserved as " + self.tagList[1][len(self.tagList[1])-1] + ".")
if (self.__assistant.AttributeCheck(context, self.tagList[1], self.attrName, self.attrVal)):
self.status_exemplary = True
elif (self.__assistant.ElementPreserved(context, self.tagList[2], False)):
context.Log("PASSED: Geometry preserved as " + self.tagList[2][len(self.tagList[2])-1] + ".")
if (self.__assistant.AttributeCheck(context, self.tagList[2], self.attrName, self.attrVal)):
self.status_exemplary = True
else:
context.Log("FAILED: Geometry is not preserved as " + self.tagList[1][len(self.tagList[1])-1] + " or " + self.tagList[2][len(self.tagList[2])-1] + ".")
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck); | KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_geometries/geometry/mesh/polygons/one_geometry_one_polygons/one_geometry_one_polygons.py | Python | mit | 5,053 | 0.008114 |
# -*- coding: utf-8 -*-
from .dev import * # noqa
INSTALLED_APPS += (
'django_extensions',
)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'healthsites_dev',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
# Set to empty string for default.
'PORT': '',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
# define output formats
'verbose': {
'format': (
'%(levelname)s %(name)s %(asctime)s %(module)s %(process)d '
'%(thread)d %(message)s')
},
'simple': {
'format': (
'%(name)s %(levelname)s %(filename)s L%(lineno)s: '
'%(message)s')
},
},
'handlers': {
# console output
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'DEBUG',
},
# 'logfile': {
# 'class': 'logging.FileHandler',
# 'filename': '/tmp/app-dev.log',
# 'formatter': 'simple',
# 'level': 'DEBUG',
# }
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO', # switch to DEBUG to show actual SQL
},
# example app logger
'localities': {
'level': 'DEBUG',
'handlers': ['console'],
# propagate is True by default, which proppagates logs upstream
'propagate': False
}
},
# root logger
# non handled logs will propagate to the root logger
'root': {
'handlers': ['console'],
'level': 'WARNING'
}
}
| ismailsunni/healthsites | django_project/core/settings/dev_dodobas.py | Python | bsd-2-clause | 1,782 | 0 |
from bson.json_util import dumps, loads
from bson.objectid import ObjectId
from flask import Blueprint, request, Response
from app.commons import build_response
from app.commons.utils import update_document
from app.entities.models import Entity
entities_blueprint = Blueprint('entities_blueprint', __name__,
url_prefix='/entities')
@entities_blueprint.route('/', methods=['POST'])
def create_entity():
"""
Create a story from the provided json
:return:
"""
content = request.get_json(silent=True)
entity = Entity()
entity.name = content.get("name")
entity.entity_values = []
try:
entity_id = entity.save()
except Exception as e:
return build_response.build_json({"error": str(e)})
return build_response.build_json({
"_id": str(entity_id.id)
})
@entities_blueprint.route('/')
def read_entities():
"""
find list of entities
:return:
"""
intents = Entity.objects.only('name', 'id')
return build_response.sent_json(intents.to_json())
@entities_blueprint.route('/<id>')
def read_entity(id):
"""
Find details for the given entity name
:param id:
:return:
"""
return Response(
response=dumps(Entity.objects.get(
id=ObjectId(id)).to_mongo().to_dict()),
status=200, mimetype="application/json")
@entities_blueprint.route('/<id>', methods=['PUT'])
def update_entity(id):
"""
Update a story from the provided json
:param id:
:return:
"""
json_data = loads(request.get_data())
entity = Entity.objects.get(id=ObjectId(id))
entity = update_document(entity, json_data)
entity.save()
return build_response.sent_ok()
@entities_blueprint.route('/<id>', methods=['DELETE'])
def delete_entity(id):
"""
Delete a intent
:param id:
:return:
"""
Entity.objects.get(id=ObjectId(id)).delete()
return build_response.sent_ok()
| alfredfrancis/ai-chatbot-framework | app/entities/controllers.py | Python | mit | 1,963 | 0 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Densnet handler.
Adapted from
https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py
"""
import functools
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from cascaded_networks.models import custom_ops
from cascaded_networks.models import dense_blocks
from cascaded_networks.models import model_utils
class DenseNet(nn.Module):
"""Densenet."""
def __init__(self,
name,
block,
block_arch,
growth_rate=12,
reduction=0.5,
num_classes=10,
**kwargs):
"""Initialize dense net."""
super(DenseNet, self).__init__()
self.name = name
self.growth_rate = growth_rate
self._cascaded = kwargs['cascaded']
self.block_arch = block_arch
self._norm_layer_op = self._setup_bn_op(**kwargs)
self._build_net(block, block_arch, growth_rate,
reduction, num_classes, **kwargs)
def _build_net(self,
block,
block_arch,
growth_rate,
reduction,
num_classes,
**kwargs):
self.layers = []
num_planes = 2 * growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes,
block_arch[0], **kwargs)
num_planes += block_arch[0] * growth_rate
out_planes = int(np.floor(num_planes * reduction))
self.trans1 = dense_blocks.Transition(num_planes,
out_planes,
norm_layer=self._norm_layer_op,
**kwargs)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes,
block_arch[1], **kwargs)
num_planes += block_arch[1] * growth_rate
out_planes = int(np.floor(num_planes * reduction))
self.trans2 = dense_blocks.Transition(num_planes,
out_planes,
norm_layer=self._norm_layer_op,
**kwargs)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes,
block_arch[2], **kwargs)
num_planes += block_arch[2] * growth_rate
out_planes = int(np.floor(num_planes * reduction))
self.trans3 = dense_blocks.Transition(num_planes,
out_planes,
norm_layer=self._norm_layer_op,
**kwargs)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes,
block_arch[3], **kwargs)
num_planes += block_arch[3] * growth_rate
self.bn = self._norm_layer_op(num_planes)
self.fc = nn.Linear(num_planes, num_classes)
self.layers.append(self.trans1)
self.layers.append(self.trans2)
self.layers.append(self.trans3)
def _make_dense_layers(self, block, in_planes, n_blocks, **kwargs):
layers = []
for _ in range(n_blocks):
block_i = block(in_planes,
self.growth_rate,
norm_layer=self._norm_layer_op,
**kwargs)
self.layers.append(block_i)
layers.append(block_i)
in_planes += self.growth_rate
return nn.Sequential(*layers)
@property
def timesteps(self):
return sum(self.block_arch) + 1
def _setup_bn_op(self, **kwargs):
if self._cascaded:
self._norm_layer = custom_ops.BatchNorm2d
# Setup batchnorm opts
self.bn_opts = kwargs.get('bn_opts', {
'affine': False,
'standardize': False
})
self.bn_opts['n_timesteps'] = self.timesteps
norm_layer_op = functools.partial(self._norm_layer, self.bn_opts)
else:
self._norm_layer = nn.BatchNorm2d
norm_layer_op = self._norm_layer
return norm_layer_op
def _set_time(self, t):
for block in self.layers:
block.set_time(t)
def forward(self, x, t=0):
# Set time on all blocks
if self._cascaded:
self._set_time(t)
# Feature extraction
out = self.conv1(x)
out = self.dense1(out)
out = self.trans1(out)
out = self.dense2(out)
out = self.trans2(out)
out = self.dense3(out)
out = self.trans3(out)
out = self.dense4(out)
# Classifier
out = self.bn(out) if not self._cascaded else self.bn(out, t)
out = F.avg_pool2d(F.relu(out), 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def make_densenet(name, block, layers, pretrained, growth_rate, **kwargs):
model = DenseNet(name, block, layers, growth_rate=growth_rate, **kwargs)
if pretrained:
kwargs['model_name'] = name
model = model_utils.load_model(model, kwargs)
return model
def densenet121(pretrained=False, **kwargs):
return make_densenet('densenet121', dense_blocks.Bottleneck, [6, 12, 24, 16],
pretrained, growth_rate=32, **kwargs)
def densenet161(pretrained=False, **kwargs):
return make_densenet('densenet161', dense_blocks.Bottleneck, [6, 12, 36, 24],
pretrained, growth_rate=48, **kwargs)
def densenet169(pretrained=False, **kwargs):
return make_densenet('densenet169', dense_blocks.Bottleneck, [6, 12, 32, 32],
pretrained, growth_rate=32, **kwargs)
def densenet201(pretrained=False, **kwargs):
return make_densenet('densenet201', dense_blocks.Bottleneck, [6, 12, 48, 32],
pretrained, growth_rate=32, **kwargs)
def densenet_cifar(pretrained=False, **kwargs):
block_arch = [6, 12, 24, 16]
growth_rate = 16
return make_densenet('densenet121_cifar', dense_blocks.Bottleneck, block_arch,
pretrained, growth_rate=growth_rate, **kwargs)
| google-research/google-research | cascaded_networks/models/densenet.py | Python | apache-2.0 | 6,633 | 0.004975 |
#http://informatics.mccme.ru/mod/statements/view3.php?id=22783&chapterid=113362#1
n = int(input())
def sum_kv_cifr(x):
su = 0
for i in str(x):
su += int(i)*int(i)
return su
maxi_power = 0
for i in range(1, n//2+1):
print('______',i)
for k in range(n//i, 0, -1):
power = sum_kv_cifr(i * k)
print('_', k, power)
if power > maxi_power:
maxi_power = power
print(maxi_power)
| dichenko/kpk2016 | Diff/dra.py | Python | gpl-3.0 | 442 | 0.011312 |
# -*- coding: utf-8 -*-
# GMate - Plugin Based Programmer's Text Editor
# Copyright © 2008-2009 Alexandre da Silva
#
# This file is part of Gmate.
#
# See LICENTE.TXT for licence information
import gtk
from GMATE import i18n as i
from GMATE.status_widget import StatusWidget
class StatusPosition(StatusWidget):
"""
This box holds the current line number
"""
def initialize(self):
self.buffer = None
self.document = None
self.__changed_id = None
self.__mark_set_id = None
self.line_title_label = gtk.Label(i.status_line)
self.line_position_number = gtk.Label('0')
self.line_position_number.set_size_request(40, -1)
self.line_position_number.set_alignment(0.01, 0.5)
self.column_title_label = gtk.Label(i.status_column)
self.column_position_number = gtk.Label('0')
self.column_position_number.set_size_request(25,-1)
self.column_position_number.set_alignment(0.01, 0.5)
self.pack_start(self.line_title_label, False, False)
self.pack_start(self.line_position_number, False, False)
sep = gtk.VSeparator()
self.pack_start(sep,False, False, 5)
self.pack_start(self.column_title_label, False, False)
self.pack_start(self.column_position_number, False, False)
self.show_all()
def on_disconnect(self):
if self.buffer:
if self.__changed_id:
self.buff.disconnect(self.__changed_id)
self.__changed_id = None
if self.__mark_set_id:
self.buff.disconnect(self.__mark_set_id)
self.__mark_set_id = None
def on_set_document(self, doc):
self.on_disconnect()
self.buff = doc.View.get_buffer()
self.document = doc.View
self.__changed_id = self.buff.connect("changed", self.__changed_cb)
self.__mark_set_id = self.buff.connect("mark-set", self.__mark_set_cb)
self.__changed_cb(self.buff)
def __changed_cb(self, buff):
tabwidth = self.document.get_tab_width()
iter = buff.get_iter_at_mark(buff.get_insert())
row = iter.get_line() + 1
col_offset = iter.get_line_offset()
iter.set_line_offset(0)
col = 0
while not col_offset == iter.get_line_offset():
if iter.get_char() == '\t':
col += (tabwidth - (col % tabwidth))
else:
col += 1
iter.forward_char()
self.line_position_number.set_text(str(row))
self.column_position_number.set_text(str(col+1))
return False
def __mark_set_cb(self, buff, cursoriter, mark):
self.__changed_cb(buff)
return False
| lexrupy/gmate-editor | GMATE/status_position.py | Python | mit | 2,729 | 0.002566 |
#!/usr/bin/env python3
from linker import Linker
import htmlPage
import content.index,content.db,content.fincom
# TODO put into config
spbBudgetXlsPath='../spb-budget-xls'
if __name__=='__main__':
linker=Linker('filelists',{
'csv':['csv'],
'xls':['xls'],
'db':['zip','sql','xlsx'],
})
htmlPage.HtmlPage('index.html','Данные бюджета Санкт-Петербурга',content.index.content,linker).write('output/index.html')
htmlPage.HtmlPage('xls.html','Ведомственная структура расходов бюджета Санкт-Петербурга в csv и xls',htmlPage.importContent(spbBudgetXlsPath+'/index.html'),linker).write('output/xls.html')
htmlPage.HtmlPage('db.html','БД и таблицы расходов бюджета Санкт-Петербурга из разных источников',content.db.content,linker).write('output/db.html')
htmlPage.HtmlPage('fincom.html','Что можно найти на сайте Комитета финансов',content.fincom.content,linker).write('output/fincom.html')
| AntonKhorev/BudgetSpb | main.py | Python | bsd-2-clause | 1,072 | 0.041295 |
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
os.chdir('fluent_blogs')
try:
from django.core import management
management.call_command('compilemessages', stdout=sys.stderr, verbosity=1)
except ImportError:
if 'sdist' in sys.argv:
raise
finally:
os.chdir('..')
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-fluent-blogs',
version=find_version('fluent_blogs', '__init__.py'),
license='Apache 2.0',
install_requires=[
'django-fluent-contents>=2.0.2',
'django-fluent-utils>=2.0',
'django-categories-i18n>=1.1',
'django-parler>=1.9.1',
'django-slug-preview>=1.0.4',
'django-tag-parser>=3.1',
],
requires=[
'Django (>=1.10)',
],
extras_require = {
'tests': [
'django-fluent-pages>=2.0.1',
'django-wysiwyg>=0.7.1',
],
'blogpage': ['django-fluent-pages>=2.0.1'],
'taggit': ['taggit', 'taggit-autosuggest'],
},
description='A blog engine with flexible block contents (based on django-fluent-contents).',
long_description=read('README.rst'),
author='Diederik van der Boor',
author_email='opensource@edoburu.nl',
url='https://github.com/edoburu/django-fluent-blogs',
download_url='https://github.com/edoburu/django-fluent-blogs/zipball/master',
packages=find_packages(),
include_package_data=True,
test_suite = 'runtests',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| edoburu/django-fluent-blogs | setup.py | Python | apache-2.0 | 3,060 | 0.002614 |
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import sys
#NOTE: if you want to develop askbot
#you might want to install django-debug-toolbar as well
import askbot
setup(
name = "askbot",
version = askbot.get_version(),#remember to manually set this correctly
description = 'Exercise and Problem forum, like StackOverflow, written in python and Django',
packages = find_packages(),
author = 'Evgeny.Fadeev',
author_email = 'evgeny.fadeev@gmail.com',
license = 'GPLv3',
keywords = 'forum, community, wiki, Q&A',
entry_points = {
'console_scripts' : [
'askbot-setup = askbot.deployment:askbot_setup',
]
},
url = 'http://askbot.org',
include_package_data = True,
install_requires = askbot.REQUIREMENTS.values(),
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Natural Language :: Finnish',
'Natural Language :: German',
'Natural Language :: Russian',
'Natural Language :: Serbian',
'Natural Language :: Turkish',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: JavaScript',
'Topic :: Communications :: Usenet News',
'Topic :: Communications :: Email :: Mailing List Servers',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
long_description = """Askbot will work alone or with other django apps (with some limitations, please see below), Django 1.1.1 - 1.2.3(*), MySQL(**) and PostgresQL(recommended) (>=8.3).
Exercises? Suggestions? Found a bug? -> please post at http://askbot.org/
Features
========
* standard Q&A functionalities including votes, reputation system, etc.
* user levels: admin, moderator, regular, suspended, blocked
* per-user inbox for responses & flagged items (for moderators)
* email alerts - instant and delayed, optionally tag filtered
* search by full text and a set of tags simultaneously
* can import data from stackexchange database file
Installation
============
The general steps are:
* install the code
* if there is no database yet - create one
* create a new or configure existing django site for askbot
* create/update the database tables
Methods to install code
-----------------------
* **pip install askbot**
* **easy_install askbot**
* **download .tar.gz** file from the bottom of this page, then run **python setup.py install**
* clone code from the github **git clone git://github.com/ASKBOT/askbot-devel.git**, and then **python setup.py develop**
Create/configure django site
----------------------------
Either run command **askbot-setup** or merge contents of directory **askbot/setup_templates** in the source code into your project directory.
Create/update database tables
-----------------------------
Back up your database if it is not blank, then two commands:
* python manage.py syncdb
* python manage.py migrate
There are two apps to migrate - askbot and django_authopenid (a forked version of the original, included within askbot), so you can as well migrate them separately
Limitations
===========
There are some limitations that will be removed in the future. If any of these cause issues - please do not hesitate to contact admin@askbot.org.
Askbot patches `auth_user` table. The migration script will automatically add missing columns, however it will not overwrite any existing columns. Please do back up your database before adding askbot to an existing site.
Included into askbot there are two forked apps: `django_authopenid` and `livesettings`. If you have these apps on your site, you may have trouble installing askbot.
User registration and login system is bundled with Askbot. It is quite good though, it allows logging in with password and many authentication service providers, including popular social services and recover account by email.
If there are any other collisions, askbot will simply fail to install, it will not damage your data.
Background Information
======================
Askbot is based on CNPROG project by Mike Chen and Sailing Cai, project which was originally inspired by StackOverflow and Yahoo Problems.
Footnotes
=========
(*) - If you want to install with django 1.2.x a dependency "Coffin-0.3" needs to be replaced with "Coffin-0.3.3" - this will be automated in the future versions of the setup script.
(**) - With MySQL you have to use MyISAM data backend, because it's the only one that supports Full Text Search."""
)
print """**************************************************************
* *
* Thanks for installing Askbot. *
* *
* To start deploying type: askbot-setup *
* Please take a look at the manual askbot/doc/INSTALL *
* And please do not hesitate to ask your questions at *
* at http://askbot.org *
* *
**************************************************************"""
| maxwward/SCOPEBak | setup.py | Python | gpl-3.0 | 5,541 | 0.009024 |
'''
Created on Apr 20, 2011
@author: michel
'''
import os
from lxml import etree
from FeatureServer.WebFeatureService.FilterEncoding.Operator import Operator
class LogicalOperator(Operator):
def __init__(self, node):
super(LogicalOperator, self).__init__(node)
self.type = 'LogicalOperator'
def createStatement(self, datasource, operatorList):
logical = self.addOperators(operatorList)
xslt = etree.parse(os.path.dirname(os.path.abspath(__file__))+"/../../../../resources/filterencoding/logical_operators.xsl")
transform = etree.XSLT(xslt)
result = transform(logical, datasource="'"+datasource.type+"'", operationType="'"+str(self.node.xpath('local-name()'))+"'")
elements = result.xpath("//Statement")
if len(elements) > 0:
self.setStatement(str(elements[0].text).strip())
return
self.setStatement(None)
def addOperators(self, operatorList):
logical = etree.Element(self.node.tag)
for operator in operatorList:
element = etree.Element("Operator")
element.text = operator.stmt
logical.append(element)
return logical
| guolivar/totus-niwa | service/thirdparty/featureserver/FeatureServer/WebFeatureService/FilterEncoding/LogicalOperators/LogicalOperator.py | Python | gpl-3.0 | 1,243 | 0.008045 |
import os
import ROOT
from __common__ import *
OUTPUT_FILE_PATH = 'beta_sanity_check.root'
print 'Opening output file %s...' % OUTPUT_FILE_PATH
outputFile = ROOT.TFile(OUTPUT_FILE_PATH, 'RECREATE')
for station in STATIONS:
filePath = os.path.join(DATA_FOLDER, '%s_full_upgoingmu.root' % station)
f = ROOT.TFile(filePath)
t = f.Get('Events')
outputFile.cd()
# Beta (and beta^-1) distributions.
hname = 'hbeta_%s' % station
hbeta = ROOT.TH1F(hname, station, 200, 0, 3)
hbeta.SetXTitle('#beta')
hbeta.SetYTitle('Entries/bin')
t.Project(hname, BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbeta.Write()
print 'Done.'
hname = 'hbetainv_%s' % station
hbetainv = ROOT.TH1F(hname, station, 200, 0, 3)
hbetainv.SetXTitle('1/#beta')
hbetainv.SetYTitle('Entries/bin')
t.Project(hname, '1./(%s)' % BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbetainv.Write()
print 'Done.'
# Beta (and beta^-1) distributions for straight tracks.
hname = 'hbetastraight_%s' % station
hbeta = ROOT.TH1F(hname, station, 200, 0, 3)
hbeta.SetXTitle('#beta')
hbeta.SetYTitle('Entries/bin')
t.Project(hname, BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbeta.Write()
print 'Done.'
hname = 'hbetastraightinv_%s' % station
hbetainv = ROOT.TH1F(hname, station, 200, 0, 3)
hbetainv.SetXTitle('1/#beta')
hbetainv.SetYTitle('Entries/bin')
t.Project(hname, '1./(%s)' % BETA_EXPR, STRAIGHT_CUT_EXPR)
print 'Writing %s...' % hname
hbetainv.Write()
print 'Done.'
# Beta (and beta^-1) distributions as a function of theta.
hname = 'hbetazdir_%s' % station
hbetazdir = ROOT.TH2F(hname, station, 100, 0, 1, 100, 0, 3)
hbetazdir.SetXTitle('cos(#theta)')
hbetazdir.SetYTitle('#beta')
t.Project(hname, '(%s):ZDir' % BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbetazdir.Write()
print 'Done.'
hname = 'hbetazdirinv_%s' % station
hbetazdirinv = ROOT.TH2F(hname, station, 100, 0, 1, 100, 0, 3)
hbetazdirinv.SetXTitle('cos(#theta)')
hbetazdirinv.SetYTitle('1/#beta')
t.Project(hname, '1./(%s):ZDir' % BETA_EXPR, CUT_EXPR)
print 'Writing %s...' % hname
hbetazdirinv.Write()
print 'Done.'
# Beta (and beta^-1) distributions for upward-going particles from muon
# decay.
hname = 'hbetadaughters_%s' % station
hbetadaughters = ROOT.TH1F(hname, station, 200, -3, 0)
hbetadaughters.SetXTitle('#beta')
hbetadaughters.SetYTitle('Entries/bin')
t.Project(hname, BETA_EXPR, DAUGHTER_CUT_EXPR)
print 'Writing %s...' % hname
hbetadaughters.Write()
print 'Done.'
hname = 'hbetadaughtersinv_%s' % station
hbetadaughtersinv = ROOT.TH1F(hname, station, 200, -3, 0)
hbetadaughtersinv.SetXTitle('1/#beta')
hbetadaughtersinv.SetYTitle('Entries/bin')
t.Project(hname, '1./(%s)' % BETA_EXPR, DAUGHTER_CUT_EXPR)
print 'Writing %s...' % hname
hbetadaughtersinv.Write()
print 'Done.'
f.Close()
print 'Closing output file...'
outputFile.Close()
print 'Done, bye.'
| centrofermi/e3analysis | upgoingmu/beta_sanity_check.py | Python | lgpl-3.0 | 3,115 | 0.000642 |
"""
Copyright 2014
This file is part of Phase.
Phase is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Phase is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Phase. If not, see <http://www.gnu.org/licenses/>.
"""
from libphase.tabs import tab
import plugins.vulnerabilities
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import GdkPixbuf
from libphase import gtk
import pkgutil
import importlib
import traceback
import sys
import xml.etree.cElementTree as ET
import os
import threading
import Queue
import time
class Vulnerabilities(tab.Tab):
plugins=[]
def __init__(self,shared_objects):
tab.Tab.__init__(self,shared_objects)
icon_dir=os.path.abspath(os.path.dirname(sys.argv[0]))+os.sep+"resources"+os.sep+"icons"+os.sep
self.info_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"info.png", 15, 15)
self.low_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"low.png", 15, 15)
self.medium_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"medium.png", 15, 15)
self.high_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"high.png", 15, 15)
self.critical_icon=GdkPixbuf.Pixbuf.new_from_file_at_size(icon_dir+"critical.png", 15, 15)
self.view=self.builder.get_object("treeviewVulnerabilities")
treeview_vulnerabilities_cell_1 = Gtk.CellRendererPixbuf()
treeview_vulnerabilities_column_1 = Gtk.TreeViewColumn("Risk", treeview_vulnerabilities_cell_1)
treeview_vulnerabilities_column_1.set_cell_data_func(treeview_vulnerabilities_cell_1,self.data_function)
self.view.append_column(treeview_vulnerabilities_column_1)
treeview_vulnerabilities_cell_2 = Gtk.CellRendererText()
treeview_vulnerabilities_column_2 = Gtk.TreeViewColumn("Title", treeview_vulnerabilities_cell_2, text=0)
self.view.append_column(treeview_vulnerabilities_column_2)
self.store=gtk.TreeStore(str,int,str,str,str)
self.view.set_model(self.store)
self.view.connect("cursor-changed",self.handler_treeview_vulnerabilities_cursor_changed)
for importer, modname, ispkg in pkgutil.iter_modules(plugins.vulnerabilities.__path__):
if modname != "base":
try:
module = importlib.import_module("plugins.vulnerabilities."+modname)
plugin=module.Plugin()
self.plugins.append(plugin)
except:
print "Failed to import ",modname
print traceback.format_exc()
self.processing_queue=Queue.Queue()
self.finish_processing=False
self.processing_thread=threading.Thread(target=self.process_thread)
self.processing_thread.start()
def data_function(self,column,cell,model,iter,user_data):
cell_contents=model.get_value(iter,1)
if cell_contents==5:
cell.set_property('pixbuf',self.critical_icon)
if cell_contents==4:
cell.set_property('pixbuf',self.high_icon)
if cell_contents==3:
cell.set_property('pixbuf',self.medium_icon)
if cell_contents==2:
cell.set_property('pixbuf',self.low_icon)
if cell_contents==1:
cell.set_property('pixbuf',self.info_icon)
def stop(self):
self.finish_processing=True
def process(self,flow):
if self.builder.get_object("checkbuttonVulnerabilitesDetect").get_active():
self.processing_queue.put(flow)
def process_thread(self):
while not self.finish_processing:
try:
flow=self.processing_queue.get_nowait()
for plugin in self.plugins:
vulnerabilities=plugin.process(flow.copy())
self.add(vulnerabilities)
except Queue.Empty:
time.sleep(0.1)
def load(self,xml):
for parent_node in xml.phase.vulnerabilities.children:
parent=self.store.append(None,[parent_node["title"],int(parent_node["risk"]),parent_node["description"],parent_node["recommendation"],""])
for child_node in parent_node.children:
self.store.append(parent,[child_node["url"],int(parent_node["risk"]),parent_node["description"],parent_node["recommendation"],child_node["value"]])
def save(self,root):
vulnerabilities_node = ET.SubElement(root, "vulnerabilities")
for parent in self.store.get_children(None):
vuln_node=ET.SubElement(vulnerabilities_node, "vulnerability")
vuln_node.set("title",self.store.get_value(parent,0))
vuln_node.set("risk",str(self.store.get_value(parent,1)))
vuln_node.set("description",self.store.get_value(parent,2))
vuln_node.set("recommendation",self.store.get_value(parent,3))
for child in self.store.get_children(parent):
child_node=ET.SubElement(vuln_node, "affected_url")
child_node.set("url",self.store.get_value(child,0))
child_node.set("value",self.store.get_value(child,4))
def clear(self):
self.store.clear()
def handler_treeview_vulnerabilities_cursor_changed(self,treeview):
model,iter=self.view.get_selection().get_selected()
path=model.get_path(iter)
if len(path) == 1:
self.builder.get_object("textviewVulnerabilitiesDescription").get_buffer().set_text(treeview.get_model().get_value(iter,2))
self.builder.get_object("textviewVulnerabilitiesRecommendation").get_buffer().set_text(treeview.get_model().get_value(iter,3))
self.builder.get_object("textviewVulnerabilitiesValue").get_buffer().set_text("")
else:
self.builder.get_object("textviewVulnerabilitiesDescription").get_buffer().set_text(treeview.get_model().get_value(iter,2))
self.builder.get_object("textviewVulnerabilitiesRecommendation").get_buffer().set_text(treeview.get_model().get_value(iter,3))
self.builder.get_object("textviewVulnerabilitiesValue").get_buffer().set_text(treeview.get_model().get_value(iter,4))
def add(self,vulnerabilities):
for vulnerability in vulnerabilities:
parent=self.store.contains(None,[(vulnerability.title,0)])
if parent == None:
parent=self.store.append(None,[vulnerability.title,vulnerability.risk,vulnerability.description,vulnerability.recommendation,""])
self.store.append(parent,[vulnerability.url,vulnerability.risk,vulnerability.description,vulnerability.recommendation,vulnerability.value])
else:
if self.store.contains(parent,[(vulnerability.url,0)]) == None:
self.store.append(parent,[vulnerability.url,vulnerability.risk,vulnerability.description,vulnerability.recommendation,vulnerability.value])
| phase-dev/phase | libphase/tabs/vulnerabilities.py | Python | gpl-3.0 | 6,578 | 0.03907 |
import logging
from tornado.web import RequestHandler
from NXTornadoWServer.ControlSocketHandler import ControlSocketHandler
class ClientsHandler(RequestHandler):
def get(self):
try:
controller_i = ControlSocketHandler.clients.index(ControlSocketHandler.client_controller)
except ValueError:
controller_i = 0
return self.render('../static/clients.html', clients=ControlSocketHandler.clients, controller_i=controller_i)
def post(self):
args = {k: ''.join(v) for k, v in self.request.arguments.iteritems()}
try:
ControlSocketHandler.client_controller = ControlSocketHandler.clients[int(args['i'])]
except IndexError:
pass
ControlSocketHandler.refresh_clients()
| spseol/DOD-2014 | NXTornadoWServer/NXTornadoWServer/ClientsHandler.py | Python | lgpl-2.1 | 775 | 0.003871 |
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of the mlp/linear layer
'''
import itertools as itt
import numpy as np
from neon import NervanaObject
from neon.initializers.initializer import Uniform
from neon.layers.layer import Linear
def pytest_generate_tests(metafunc):
if metafunc.config.option.all:
bsz_rng = [16, 32, 64]
else:
bsz_rng = [128]
if 'basic_linargs' in metafunc.fixturenames:
fargs = []
if metafunc.config.option.all:
nin_rng = [1, 2, 1023, 1024, 1025]
nout_rng = [1, 4, 1023, 1024, 1025]
else:
nin_rng = [4, 32]
nout_rng = [3, 33]
fargs = itt.product(nin_rng, nout_rng, bsz_rng)
metafunc.parametrize('basic_linargs', fargs)
if 'allrand_args' in metafunc.fixturenames:
fargs = []
eps = np.finfo(np.float32).eps
# weight ranges
w_rng = [[0.0, 1.0], [-1.0, 0.0], [-1.0, 1.0]]
if metafunc.config.option.all:
rng_max = [eps, eps*10, 1.0, 2048.0, 1.0e6, 1.0e10]
else:
rng_max = [eps, 1.0, 1.0e10]
fargs = itt.product(w_rng, rng_max)
metafunc.parametrize('allrand_args', fargs)
def test_linear_zeros(backend, basic_linargs):
# basic sanity check with 0 weights random inputs
nin, nout, batch_size = basic_linargs
NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
dtypeu = np.float32
init_unif = Uniform(low=0.0, high=0.0)
layer = Linear(nout=nout, init=init_unif)
inp = layer.be.array(dtypeu(np.random.random((nin, batch_size))))
out = layer.fprop(inp).get()
assert np.min(out) == 0.0 and np.max(out) == 0.0
err = dtypeu(np.zeros((nout, batch_size)))
deltas = layer.bprop(layer.be.array(err)).asnumpyarray()
assert np.min(deltas) == 0.0 and np.max(deltas) == 0.0
dw = layer.dW.asnumpyarray()
assert np.min(dw) == 0.0 and np.max(dw) == 0.0
return
def test_linear_ones(backend, basic_linargs):
# basic sanity check with all ones on the inputs
# and weights, check that each row in output
# is the sum of the weights for that output
# this check will confirm that the correct number
# of operations is being run
nin, nout, batch_size = basic_linargs
NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
dtypeu = np.float32
init_unif = Uniform(low=1.0, high=1.0)
layer = Linear(nout=nout, init=init_unif)
inp = layer.be.array(dtypeu(np.ones((nin, batch_size))))
out = layer.fprop(inp).asnumpyarray()
w = layer.W.asnumpyarray()
sums = np.sum(w, 1).reshape((nout, 1))*np.ones((1, batch_size))
# for larger layers need to estimate numerical precision
# atol = est_mm_prec(w, inp.asnumpyarray())
assert (np.allclose(sums, out, atol=0.0, rtol=0.0), '%e'
% np.max(np.abs(out-sums)))
return
def test_all_rand(backend, allrand_args):
# test with random weights and random inputs
dtypeu = np.float32
w_rng, rngmax = allrand_args
inp_rng = [0.0, rngmax]
nin = 1024
nout = 2048
batch_size = 16
NervanaObject.be.bsz = NervanaObject.be.bs = batch_size
init_unif = Uniform(low=w_rng[0], high=w_rng[1])
layer = Linear(nout=nout, init=init_unif)
inp = np.random.random((nin, batch_size))
inp *= inp_rng[1] - inp_rng[0]
inp += inp_rng[0]
inp = inp.astype(dtypeu)
out = layer.fprop(layer.be.array(inp)).asnumpyarray()
w = layer.W.asnumpyarray()
# the expected output using numpy
out_exp = np.dot(w, inp)
# for larger layers need to estimate numerical precision
atol = 2 * est_mm_prec(w, inp, ntrials=1)
assert (np.allclose(out_exp, out, atol=atol, rtol=0.0),
'%e %e' % (np.max(np.abs(out - out_exp)), atol))
err = np.random.random((nout, batch_size))
err = err * (inp_rng[1] - inp_rng[0]) + inp_rng[0]
err = err.astype(dtypeu)
deltas = layer.bprop(layer.be.array(err)).asnumpyarray()
dw = layer.dW.asnumpyarray()
deltas_exp = np.dot(w.T, err)
atol = 2 * est_mm_prec(w.T, err, ntrials=1)
assert (np.allclose(deltas_exp, deltas, atol=atol, rtol=0.0),
'%e %e' % (np.max(np.abs(deltas_exp - deltas)), atol))
dw_exp = np.dot(err, inp.T)
atol = 2 * est_mm_prec(err, inp.T, ntrials=1)
assert (np.allclose(dw_exp, dw, atol=atol, rtol=0.0),
'%e %e' % (np.max(np.abs(dw_exp - dw)), atol))
return
# permute mm indicies to change order of computaitons
# to estimate numerical percision
# this is a rough estimate
def est_mm_prec(A, B, ntrials=1):
A64 = np.float64(A)
B64 = np.float64(B)
gt = np.dot(A64, B64)
max_err = -1.0
for trial in range(ntrials):
inds = np.random.permutation(A.shape[1])
# this method gives better estimate of precision tolerances
# but takes too long to run
# for i in range(A.shape[0]):
# for j in range(B.shape[1]):
# c = np.sum(np.multiply(A[i,inds], B[inds,j]))
# max_err = max( max_err, np.abs(c-gt[i,j]))
# need to scale this by 10 for comparison
C = np.dot(A[:, inds], B[inds, :])
dd = np.float32(gt - C)
# just save the worst case from each iteration
max_err = max(max_err, np.max(np.abs(dd)))
# need to scale the np.dot results by 10 to
# match the np.sum(np.multiply()) values
max_err *= 10.0
return max_err
| SalemAmeen/neon | tests/test_linear_layer.py | Python | apache-2.0 | 6,132 | 0 |
# import matplotlib.pyplot as plt
# import numpy as np
# from scipy import stats
# import sys
#
# c = list()
# b = list()
# for line in sys.stdin:
# linesp = list(map(float, line.strip().split(",")))
# if(linesp[0] < 4):
# c.append(linesp[0])
# b.append(linesp[1])
#
# carr = np.array(c)
# barr = np.array(b)
#
# slope, intercept, r_value, p_value, std_err = stats.linregress(c,b)
#
# plt.figure()
# plt.plot(carr,barr, 's')
# plt.show()
# print(slope,intercept,r_value,p_value,std_err)
# 2.0 0.0 1.0 0.0 0.0
import sys
for line in sys.stdin:
x = float(line.strip())
y = x < 4 and 2*x or 8
print(y)
| marcos-sb/hacker-rank | artificial-intelligence/statistics-and-machine-learning/battery/Solution.py | Python | apache-2.0 | 639 | 0 |
import os
from hanziconv import HanziConv # 简繁转换
from app.ResultParse import ResultParse
from app.statistics import analysis
from interface.api import LookUp, Search, Association
from tools.parse_and_filter import parse_sentence
from tools.translate import Translate
class Query:
def __init__(self, debug=False):
self.translator = Translate()
self.analysis = analysis()
self.conceptions = []
self.related_conceptions = list(tuple())
self.commonsense = set()
self.debug = debug
@staticmethod
def base_lookup(conception, Type='c', lang='zh', limit=100, s_to_t=True):
lookup = LookUp(lang=lang, Type=Type, limit=limit)
if s_to_t:
data = lookup.search_concept(HanziConv.toTraditional(conception))
else:
data = lookup.search_concept(conception)
r = ResultParse(data)
if r.get_num_found() > 0:
return [(edge.surfaceText, edge.start, edge.end, edge.rel)
for edge in r.parse_all_edges()]
return None
def concept_lookup(self):
print('find only one conception,so get its commonsense at most 10')
# 先中文查找
local_commonsense = Query.base_lookup(HanziConv.toTraditional(self.conceptions))
if not local_commonsense:
# 如果没有找到,翻译成英文再次查找
local_commonsense = Query.base_lookup(self.translator.zh_to_en(self.conceptions))
self.commonsense = set(local_commonsense)
@staticmethod
def base_search(conceptions, lang='zh'):
res = list()
for i in range(len(conceptions)):
conception = '/c/' + lang + '/' + conceptions[i]
s = Search(node=conception) # can add more key-value
data = s.search()
r = ResultParse(data)
if r.get_num_found() > 0:
tmp = [(edge.surfaceText, edge.start.split('/')[3], edge.end.split('/')[3],
edge.rel)
for edge in r.parse_all_edges()]
res.extend(tmp)
return res
def concept_search(self, to_traditional=True):
# print('looking for conceptions all related commonsense')
if not self.conceptions:
return
if to_traditional:
translated_conceptions = HanziConv.toTraditional(' '.join(self.conceptions))
conceptions = translated_conceptions.split()
else:
conceptions = self.conceptions
if self.debug:
print("关键词:" + ''.join(conceptions))
data = Query.base_search(conceptions)
# if not data:
# translated_conceptions = Translate.zh_to_en(self.conceptions)
#
# data = Query.base_search(translated_conceptions, lang='en')
if data:
self.commonsense = self.commonsense.union(set(data))
@staticmethod
def base_association(terms, lang='zh', limit=100):
a = Association(lang=lang, limit=limit)
raw_data = a.get_similar_concepts_by_term_list(terms)
r = ResultParse(raw_data)
return r.parse_all_similarity()
def conception_association(self):
translated_conception = HanziConv.toTraditional(' '.join(self.conceptions))
if self.debug:
print(translated_conception)
self.related_conceptions = Query.base_association(translated_conception.split())
def tranlate_to_simple(self):
for item in self.commonsense.copy():
text = HanziConv.toSimplified(item[0]) if item[0] else 'No text'
s = HanziConv.toSimplified(item[1])
e = HanziConv.toSimplified(item[2])
self.commonsense.remove(item)
self.commonsense.add((text, s, e, item[3]))
def commonsense_query(self, sentences):
self.conceptions = parse_sentence(sentences)
self.concept_search(False)
# self.conception_association()
# self.tranlate_to_simple()
return self.commonsense
def stastics(self):
len_conceptiton = len(self.conceptions)
self.analysis.write_commonsense(self.commonsense)
self.analysis.write_all_relations()
self.analysis.print_comparation(len_conceptiton)
if __name__ == "__main__":
query = Query(debug=False)
# sentences = ["找寻新的利润增长点成为摆在各行面前的一大课题。在资产荒的背景下,个人房贷成为各家银行争抢的“香饽饽”,但随着多地推出楼市调控政策,按揭贷款或将从11月开始有所回落。",
# "精准医疗的目的是进行个体化定制治疗。因为每个人都存在着个体化差异,就算患上同一种疾病,在病理表现上也是不同的,可以表现为基因水平和蛋白水平上的差异",
# "全国人大常委会表决通过网络安全法,特别增加了惩治网络诈骗的有关规定,对个人信息保护做出规定,要求网络运营者应当采取技术措施和其他必要措施,确保其收集的个人信息安全,防止信息泄露、毁损、丢失"]
files = ["../data/" + f for f in os.listdir("../data/")]
for file in files:
print(file)
with open(file, encoding='utf-8') as f:
data = f.readlines()
data_filtered = [s.replace(' ', '') for s in data if not s.isspace() and '---' not in s]
sentences = ''.join(data_filtered).split("。")
for sentence in sentences:
# print("句子是"+sentence)
query.commonsense_query(sentence)
query.stastics()
| zhouhoo/conceptNet_55_client | app/query.py | Python | apache-2.0 | 5,682 | 0.001562 |
from dataingestion.initial_input import InitialInput
from const.constants import Constants
from dataingestion.preprocessing import preprocess_basic
from dataingestion.window import get_windows
from dataingestion.cache_control import *
from analysis.preparation import permutate
from analysis.preparation import split_test_train
from analysis.feature_selection import feature_selection
from utils.header_tools import create_headers
def main():
const = Constants()
init_input = InitialInput(const)
data = None
if not has_preprocess_basic_cache(const):
data = init_input.read_all_data_init()
const.remove_stripped_headers()
data = preprocess_basic(data, const)
data, labels = get_windows(data, const)
create_headers(const)
print("flex const index trace info / main:")
print(len(const.feature_indices['flex']['row_1']))
print(len(const.feature_indices['flex']['row_2']))
r1 = []
for i in const.feature_indices['flex']['row_1']:
r1.append(const.feature_headers[i])
print(r1)
# permutate the data
data, labels = permutate(data, labels)
# split train and testset
train_data,train_labels,test_data,test_labels = split_test_train(data,labels,0.7)
feature_selection(train_data,train_labels,const)
if __name__ == '__main__':
main() | joergsimon/gesture-analysis | main.py | Python | apache-2.0 | 1,322 | 0.008321 |
# encoding: utf-8
"""calculator.py - module for choosing a calculator."""
import gtk
from gettext import gettext as _
import os
import numpy as np
from copy import copy
from ase.gui.setupwindow import SetupWindow
from ase.gui.progress import DefaultProgressIndicator, GpawProgressIndicator
from ase.gui.widgets import pack, oops, cancel_apply_ok
from ase import Atoms
from ase.data import chemical_symbols
import ase
# Asap and GPAW may be imported if selected.
introtext = _("""\
To make most calculations on the atoms, a Calculator object must first
be associated with it. ASE supports a number of calculators, supporting
different elements, and implementing different physical models for the
interatomic interactions.\
""")
# Informational text about the calculators
lj_info_txt = _("""\
The Lennard-Jones pair potential is one of the simplest
possible models for interatomic interactions, mostly
suitable for noble gasses and model systems.
Interactions are described by an interaction length and an
interaction strength.\
""")
emt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au, the Al potential is however not suitable for materials
science application, as the stacking fault energy is wrong.
A number of parameter sets are provided.
<b>Default parameters:</b>
The default EMT parameters, as published in K. W. Jacobsen,
P. Stoltze and J. K. Nørskov, <i>Surf. Sci.</i> <b>366</b>, 394 (1996).
<b>Alternative Cu, Ag and Au:</b>
An alternative set of parameters for Cu, Ag and Au,
reoptimized to experimental data including the stacking
fault energies by Torben Rasmussen (partly unpublished).
<b>Ruthenium:</b>
Parameters for Ruthenium, as published in J. Gavnholt and
J. Schiøtz, <i>Phys. Rev. B</i> <b>77</b>, 035404 (2008).
<b>Metallic glasses:</b>
Parameters for MgCu and CuZr metallic glasses. MgCu
parameters are in N. P. Bailey, J. Schiøtz and
K. W. Jacobsen, <i>Phys. Rev. B</i> <b>69</b>, 144205 (2004).
CuZr in A. Paduraru, A. Kenoufi, N. P. Bailey and
J. Schiøtz, <i>Adv. Eng. Mater.</i> <b>9</b>, 505 (2007).
""")
aseemt_info_txt = _("""\
The EMT potential is a many-body potential, giving a
good description of the late transition metals crystalling
in the FCC crystal structure. The elements described by the
main set of EMT parameters are Al, Ni, Cu, Pd, Ag, Pt, and
Au. In addition, this implementation allows for the use of
H, N, O and C adatoms, although the description of these is
most likely not very good.
<b>This is the ASE implementation of EMT.</b> For large
simulations the ASAP implementation is more suitable; this
implementation is mainly to make EMT available when ASAP is
not installed.
""")
eam_info_txt = _("""\
The EAM/ADP potential is a many-body potential
implementation of the Embedded Atom Method and
equipotential plus the Angular Dependent Potential,
which is an extension of the EAM to include
directional bonds. EAM is suited for FCC metallic
bonding while the ADP is suited for metallic bonds
with some degree of directionality.
For EAM see M.S. Daw and M.I. Baskes,
Phys. Rev. Letters 50 (1983) 1285.
For ADP see Y. Mishin, M.J. Mehl, and
D.A. Papaconstantopoulos, Acta Materialia 53 2005
4029--4041.
Data for the potential is contained in a file in either LAMMPS Alloy
or ADP format which need to be loaded before use. The Interatomic
Potentials Repository Project at http://www.ctcms.nist.gov/potentials/
contains many suitable potential files.
For large simulations the LAMMPS calculator is more
suitable; this implementation is mainly to make EAM
available when LAMMPS is not installed or to develop
new EAM/ADP poentials by matching results using ab
initio.
""")
brenner_info_txt = _("""\
The Brenner potential is a reactive bond-order potential for
carbon and hydrocarbons. As a bond-order potential, it takes
into account that carbon orbitals can hybridize in different
ways, and that carbon can form single, double and triple
bonds. That the potential is reactive means that it can
handle gradual changes in the bond order as chemical bonds
are formed or broken.
The Brenner potential is implemented in Asap, based on a
C implentation published at http://www.rahul.net/pcm/brenner/ .
The potential is documented here:
Donald W Brenner, Olga A Shenderova, Judith A Harrison,
Steven J Stuart, Boris Ni and Susan B Sinnott:
"A second-generation reactive empirical bond order (REBO)
potential energy expression for hydrocarbons",
J. Phys.: Condens. Matter 14 (2002) 783-802.
doi: 10.1088/0953-8984/14/4/312
""")
gpaw_info_txt = _("""\
GPAW implements Density Functional Theory using a
<b>G</b>rid-based real-space representation of the wave
functions, and the <b>P</b>rojector <b>A</b>ugmented <b>W</b>ave
method for handling the core regions.
""")
aims_info_txt = _("""\
FHI-aims is an external package implementing density
functional theory and quantum chemical methods using
all-electron methods and a numeric local orbital basis set.
For full details, see http://www.fhi-berlin.mpg.de/aims/
or Comp. Phys. Comm. v180 2175 (2009). The ASE
documentation contains information on the keywords and
functionalities available within this interface.
""")
aims_pbc_warning_text = _("""\
WARNING:
Your system seems to have more than zero but less than
three periodic dimensions. Please check that this is
really what you want to compute. Assuming full
3D periodicity for this calculator.""")
vasp_info_txt = _("""\
VASP is an external package implementing density
functional functional theory using pseudopotentials
or the projector-augmented wave method together
with a plane wave basis set. For full details, see
http://cms.mpi.univie.ac.at/vasp/vasp/
""")
emt_parameters = (
(_("Default (Al, Ni, Cu, Pd, Ag, Pt, Au)"), None),
(_("Alternative Cu, Ag and Au"), "EMTRasmussenParameters"),
(_("Ruthenium"), "EMThcpParameters"),
(_("CuMg and CuZr metallic glass"), "EMTMetalGlassParameters")
)
class SetCalculator(SetupWindow):
"Window for selecting a calculator."
# List the names of the radio button attributes
radios = ("none", "lj", "emt", "aseemt", "eam", "brenner",
"gpaw", "aims", "vasp")
# List the names of the parameter dictionaries
paramdicts = ("lj_parameters", "eam_parameters", "gpaw_parameters",
"aims_parameters",)
# The name used to store parameters on the gui object
classname = "SetCalculator"
def __init__(self, gui):
SetupWindow.__init__(self)
self.set_title(_("Select calculator"))
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
pack(vbox, [gtk.Label(_("Calculator:"))])
# No calculator (the default)
self.none_radio = gtk.RadioButton(None, _("None"))
pack(vbox, [self.none_radio])
# Lennard-Jones
self.lj_radio = gtk.RadioButton(self.none_radio,
_("Lennard-Jones (ASAP)"))
self.lj_setup = gtk.Button(_("Setup"))
self.lj_info = InfoButton(lj_info_txt)
self.lj_setup.connect("clicked", self.lj_setup_window)
self.pack_line(vbox, self.lj_radio, self.lj_setup, self.lj_info)
# EMT
self.emt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASAP)"))
self.emt_setup = gtk.combo_box_new_text()
self.emt_param_info = {}
for p in emt_parameters:
self.emt_setup.append_text(p[0])
self.emt_param_info[p[0]] = p[1]
self.emt_setup.set_active(0)
self.emt_info = InfoButton(emt_info_txt)
self.pack_line(vbox, self.emt_radio, self.emt_setup, self.emt_info)
# EMT (ASE implementation)
self.aseemt_radio = gtk.RadioButton(
self.none_radio, _("EMT - Effective Medium Theory (ASE)"))
self.aseemt_info = InfoButton(aseemt_info_txt)
self.pack_line(vbox, self.aseemt_radio, None, self.aseemt_info)
# EAM
self.eam_radio = gtk.RadioButton(
self.none_radio,
_("EAM - Embedded Atom Method/Angular Dependent Potential (ASE)"))
self.eam_setup = gtk.Button(_("Setup"))
self.eam_setup.connect("clicked", self.eam_setup_window)
self.eam_info = InfoButton(eam_info_txt)
self.pack_line(vbox, self.eam_radio, self.eam_setup, self.eam_info)
# Brenner potential
self.brenner_radio = gtk.RadioButton(
self.none_radio, _("Brenner Potential (ASAP)"))
self.brenner_info = InfoButton(brenner_info_txt)
self.pack_line(vbox, self.brenner_radio, None, self.brenner_info)
# GPAW
self.gpaw_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory (GPAW)")
)
self.gpaw_setup = gtk.Button(_("Setup"))
self.gpaw_info = InfoButton(gpaw_info_txt)
self.gpaw_setup.connect("clicked", self.gpaw_setup_window)
self.pack_line(vbox, self.gpaw_radio, self.gpaw_setup, self.gpaw_info)
# FHI-aims
self.aims_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory "
"(FHI-aims)"))
self.aims_setup = gtk.Button(_("Setup"))
self.aims_info = InfoButton(aims_info_txt)
self.aims_setup.connect("clicked", self.aims_setup_window)
self.pack_line(vbox, self.aims_radio, self.aims_setup, self.aims_info)
# VASP
self.vasp_radio = gtk.RadioButton(self.none_radio,
_("Density Functional Theory "
"(VASP)"))
self.vasp_setup = gtk.Button(_("Setup"))
self.vasp_info = InfoButton(vasp_info_txt)
self.vasp_setup.connect("clicked", self.vasp_setup_window)
self.pack_line(vbox, self.vasp_radio, self.vasp_setup, self.vasp_info)
# Buttons etc.
pack(vbox, gtk.Label(""))
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [buts], end=True, bottom=True)
self.check = gtk.CheckButton(_("Check that the calculator is "
"reasonable."))
self.check.set_active(True)
fr = gtk.Frame()
fr.add(self.check)
fr.show_all()
pack(vbox, [fr], end=True, bottom=True)
# Finalize setup
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
self.load_state()
def pack_line(self, box, radio, setup, info):
hbox = gtk.HBox()
hbox.pack_start(radio, 0, 0)
hbox.pack_start(gtk.Label(" "), 0, 0)
hbox.pack_end(info, 0, 0)
if setup is not None:
radio.connect("toggled", self.radio_toggled, setup)
setup.set_sensitive(False)
hbox.pack_end(setup, 0, 0)
hbox.show_all()
box.pack_start(hbox, 0, 0)
def radio_toggled(self, radio, button):
button.set_sensitive(radio.get_active())
def lj_setup_window(self, widget):
if not self.get_atoms():
return
lj_param = getattr(self, "lj_parameters", None)
LJ_Window(self, lj_param, "lj_parameters")
# When control is retuned, self.lj_parameters has been set.
def eam_setup_window(self, widget):
if not self.get_atoms():
return
eam_param = getattr(self, "eam_parameters", None)
EAM_Window(self, eam_param, "eam_parameters")
# When control is retuned, self.eam_parameters has been set.
def gpaw_setup_window(self, widget):
if not self.get_atoms():
return
gpaw_param = getattr(self, "gpaw_parameters", None)
GPAW_Window(self, gpaw_param, "gpaw_parameters")
# When control is retuned, self.gpaw_parameters has been set.
def aims_setup_window(self, widget):
if not self.get_atoms():
return
aims_param = getattr(self, "aims_parameters", None)
AIMS_Window(self, aims_param, "aims_parameters")
# When control is retuned, self.aims_parameters has been set.
def vasp_setup_window(self, widget):
if not self.get_atoms():
return
vasp_param = getattr(self, "vasp_parameters", None)
VASP_Window(self, vasp_param, "vasp_parameters")
# When control is retuned, self.vasp_parameters has been set.
def get_atoms(self):
"Make an atoms object from the active frame"
images = self.gui.images
frame = self.gui.frame
if images.natoms < 1:
oops(_("No atoms present"))
return False
self.atoms = Atoms(positions=images.P[frame],
symbols=images.Z,
cell=images.A[frame],
pbc=images.pbc,
magmoms=images.M[frame])
if not images.dynamic.all():
from ase.constraints import FixAtoms
self.atoms.set_constraint(FixAtoms(mask=1 - images.dynamic))
return True
def apply(self, *widget):
if self.do_apply():
self.save_state()
return True
else:
return False
def do_apply(self):
nochk = not self.check.get_active()
self.gui.simulation["progress"] = DefaultProgressIndicator()
if self.none_radio.get_active():
self.gui.simulation['calc'] = None
return True
elif self.lj_radio.get_active():
if nochk or self.lj_check():
self.choose_lj()
return True
elif self.emt_radio.get_active():
if nochk or self.emt_check():
self.choose_emt()
return True
elif self.aseemt_radio.get_active():
if nochk or self.aseemt_check():
self.choose_aseemt()
return True
elif self.eam_radio.get_active():
if nochk or self.eam_check():
self.choose_eam()
return True
elif self.brenner_radio.get_active():
if nochk or self.brenner_check():
self.choose_brenner()
return True
elif self.gpaw_radio.get_active():
if nochk or self.gpaw_check():
self.choose_gpaw()
return True
elif self.aims_radio.get_active():
if nochk or self.aims_check():
self.choose_aims()
return True
elif self.vasp_radio.get_active():
if nochk or self.vasp_check():
self.choose_vasp()
return True
return False
def ok(self, *widget):
if self.apply():
self.destroy()
def save_state(self):
state = {}
for r in self.radios:
radiobutton = getattr(self, r + "_radio")
if radiobutton.get_active():
state["radio"] = r
state["emtsetup"] = self.emt_setup.get_active()
state["check"] = self.check.get_active()
for p in self.paramdicts:
if hasattr(self, p):
state[p] = getattr(self, p)
self.gui.module_state[self.classname] = state
def load_state(self):
try:
state = self.gui.module_state[self.classname]
except KeyError:
return
r = state["radio"]
radiobutton = getattr(self, r + "_radio")
radiobutton.set_active(True)
self.emt_setup.set_active(state["emtsetup"])
self.check.set_active(state["check"])
for p in self.paramdicts:
if state.has_key(p):
setattr(self, p, state[p])
def lj_check(self):
try:
import asap3
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
if not hasattr(self, "lj_parameters"):
oops(_("You must set up the Lennard-Jones parameters"))
return False
try:
self.atoms.set_calculator(asap3.LennardJones(**self.lj_parameters))
except (asap3.AsapError, TypeError, ValueError), e:
oops(_("Could not create useful Lennard-Jones calculator."),
str(e))
return False
return True
def choose_lj(self):
# Define a function on the fly!
import asap3
def lj_factory(p=self.lj_parameters, lj=asap3.LennardJones):
return lj(**p)
self.gui.simulation["calc"] = lj_factory
def emt_get(self):
import asap3
provider_name = self.emt_setup.get_active_text()
provider = self.emt_param_info[provider_name]
if provider is not None:
provider = getattr(asap3, provider)
return (asap3.EMT, provider, asap3)
def emt_check(self):
if not self.get_atoms():
return False
try:
emt, provider, asap3 = self.emt_get()
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
try:
if provider is not None:
self.atoms.set_calculator(emt(provider()))
else:
self.atoms.set_calculator(emt())
except (asap3.AsapError, TypeError, ValueError), e:
oops(_("Could not attach EMT calculator to the atoms."),
str(e))
return False
return True
def choose_emt(self):
emt, provider, asap3 = self.emt_get()
if provider is None:
emt_factory = emt
else:
def emt_factory(emt=emt, prov=provider):
return emt(prov())
self.gui.simulation["calc"] = emt_factory
def aseemt_check(self):
return self.element_check("ASE EMT", ['H', 'Al', 'Cu', 'Ag', 'Au',
'Ni', 'Pd', 'Pt', 'C', 'N', 'O'])
def eam_check(self):
from ase.calculators.eam import EAM
if not hasattr(self, "eam_parameters"):
oops(_("You must set up the EAM parameters"))
return False
self.atoms.set_calculator(EAM(**self.eam_parameters))
return self.element_check("EAM", self.atoms.get_calculator().elements)
def choose_eam(self):
from ase.calculators.eam import EAM
def eam_factory(p=self.eam_parameters):
calc = EAM(**p)
return calc
self.gui.simulation["calc"] = eam_factory
def brenner_check(self):
try:
import asap3
except ImportError:
oops(_("ASAP is not installed. (Failed to import asap3)"))
return False
return self.element_check("Brenner potential", ['H', 'C', 'Si'])
def choose_brenner(self):
import asap3
self.gui.simulation["calc"] = asap3.BrennerPotential
def choose_aseemt(self):
import ase.calculators.emt
self.gui.simulation["calc"] = ase.calculators.emt.EMT
# In case Asap has been imported
ase.calculators.emt.EMT.disabled = False
def gpaw_check(self):
try:
import gpaw
except ImportError:
oops(_("GPAW is not installed. (Failed to import gpaw)"))
return False
if not hasattr(self, "gpaw_parameters"):
oops(_("You must set up the GPAW parameters"))
return False
return True
def choose_gpaw(self):
# This reuses the same GPAW object.
try:
import gpaw
except ImportError:
oops(_("GPAW is not installed. (Failed to import gpaw)"))
return False
p = self.gpaw_parameters
use = ["xc", "kpts", "mode"]
if p["use_h"]:
use.append("h")
else:
use.append("gpts")
if p["mode"] == "lcao":
use.append("basis")
gpaw_param = {}
for s in use:
gpaw_param[s] = p[s]
if p["use mixer"]:
mx = getattr(gpaw, p["mixer"])
mx_args = {}
mx_arg_n = ["beta", "nmaxold", "weight"]
if p["mixer"] == "MixerDiff":
mx_arg_n.extend(["beta_m", "nmaxold_m", "weight_m"])
for s in mx_arg_n:
mx_args[s] = p[s]
gpaw_param["mixer"] = mx(**mx_args)
progress = GpawProgressIndicator()
self.gui.simulation["progress"] = progress
gpaw_param["txt"] = progress.get_gpaw_stream()
gpaw_calc = gpaw.GPAW(**gpaw_param)
def gpaw_factory(calc=gpaw_calc):
return calc
self.gui.simulation["calc"] = gpaw_factory
def aims_check(self):
if not hasattr(self, "aims_parameters"):
oops(_("You must set up the FHI-aims parameters"))
return False
return True
def choose_aims(self):
param = self.aims_parameters
from ase.calculators.aims import Aims
calc_aims = Aims(**param)
def aims_factory(calc=calc_aims):
return calc
self.gui.simulation["calc"] = aims_factory
def vasp_check(self):
if not hasattr(self, "vasp_parameters"):
oops(_("You must set up the VASP parameters"))
return False
return True
def choose_vasp(self):
param = self.vasp_parameters
from ase.calculators.vasp import Vasp
calc_vasp = Vasp(**param)
def vasp_factory(calc=calc_vasp):
return calc
self.gui.simulation["calc"] = vasp_factory
def element_check(self, name, elements):
"Check that all atoms are allowed"
elements = [ase.data.atomic_numbers[s] for s in elements]
elements_dict = {}
for e in elements:
elements_dict[e] = True
if not self.get_atoms():
return False
try:
for e in self.atoms.get_atomic_numbers():
elements_dict[e]
except KeyError:
oops(_("Element %(sym)s not allowed by the '%(name)s' calculator")
% dict(sym=ase.data.chemical_symbols[e], name=name))
return False
return True
class InfoButton(gtk.Button):
def __init__(self, txt):
gtk.Button.__init__(self, _("Info"))
self.txt = txt
self.connect('clicked', self.run)
def run(self, widget):
dialog = gtk.MessageDialog(flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_CLOSE)
dialog.set_markup(self.txt)
dialog.connect('response', lambda x, y: dialog.destroy())
dialog.show()
class LJ_Window(gtk.Window):
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("Lennard-Jones parameters"))
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
atnos = atoms.get_atomic_numbers()
found = {}
for z in atnos:
found[z] = True
self.present = found.keys()
self.present.sort() # Sorted list of atomic numbers
nelem = len(self.present)
vbox = gtk.VBox()
label = gtk.Label(_("Specify the Lennard-Jones parameters here"))
pack(vbox, [label])
pack(vbox, gtk.Label(""))
pack(vbox, [gtk.Label(_("Epsilon (eV):"))])
tbl, self.epsilon_adj = self.makematrix(self.present)
pack(vbox, [tbl])
pack(vbox, gtk.Label(""))
pack(vbox, [gtk.Label(_(u"Sigma (Å):"))])
tbl, self.sigma_adj = self.makematrix(self.present)
pack(vbox, [tbl])
# TRANSLATORS: Shift roughly means adjust (about a potential)
self.modif = gtk.CheckButton(_("Shift to make smooth at cutoff"))
self.modif.set_active(True)
pack(vbox, gtk.Label(""))
pack(vbox, self.modif)
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
self.add(vbox)
# Now, set the parameters
if param and param['elements'] == self.present:
self.set_param(self.epsilon_adj, param["epsilon"], nelem)
self.set_param(self.sigma_adj, param["sigma"], nelem)
self.modif.set_active(param["modified"])
self.show()
self.grab_add() # Lock all other windows
def makematrix(self, present):
nelem = len(present)
adjdict = {}
tbl = gtk.Table(2 + nelem, 2 + nelem)
for i in range(nelem):
s = chemical_symbols[present[i]]
tbl.attach(gtk.Label(" " + str(present[i])), 0, 1, i, i + 1)
tbl.attach(gtk.Label(" " + s + " "), 1, 2, i, i + 1)
tbl.attach(gtk.Label(str(present[i])),
i + 2, i + 3, 1 + nelem, 2 + nelem)
tbl.attach(gtk.Label(s), i + 2, i + 3, nelem, 1 + nelem)
for j in range(i + 1):
adj = gtk.Adjustment(1.0, 0.0, 100.0, 0.1)
spin = gtk.SpinButton(adj, 0.1, 3)
tbl.attach(spin, 2 + j, 3 + j, i, i + 1)
adjdict[(i, j)] = adj
tbl.show_all()
return tbl, adjdict
def set_param(self, adj, params, n):
for i in range(n):
for j in range(n):
if j <= i:
adj[(i, j)].value = params[i, j]
def get_param(self, adj, params, n):
for i in range(n):
for j in range(n):
if j <= i:
params[i, j] = params[j, i] = adj[(i, j)].value
def destroy(self):
self.grab_remove()
gtk.Window.destroy(self)
def ok(self, *args):
params = {}
params["elements"] = copy(self.present)
n = len(self.present)
eps = np.zeros((n, n))
self.get_param(self.epsilon_adj, eps, n)
sigma = np.zeros((n, n))
self.get_param(self.sigma_adj, sigma, n)
params["epsilon"] = eps
params["sigma"] = sigma
params["modified"] = self.modif.get_active()
setattr(self.owner, self.attrname, params)
self.destroy()
class EAM_Window(gtk.Window):
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("EAM parameters"))
self.owner = owner
self.attrname = attrname
self.owner = owner
atoms = owner.atoms
self.natoms = len(atoms)
vbox = gtk.VBox()
vbox.show()
self.add(vbox)
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
import_potential_but = gtk.Button(_("Import Potential"))
import_potential_but.connect("clicked", self.import_potential)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(import_potential_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
# Now, set the parameters
if param:
self.eam_file = param['fileobj']
self.show()
self.grab_add() # Lock all other windows
def ok(self, *args):
if not hasattr(self.owner, "eam_parameters"):
oops(_("You need to import the potential file"))
self.destroy()
def import_potential(self, *args):
dirname = "."
filename = "Al99.eam.alloy"
chooser = gtk.FileChooserDialog(
_('Import .alloy or .adp potential file ... '),
None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(dirname + filename)
openr = chooser.run()
if openr == gtk.RESPONSE_OK:
param = {}
filename = chooser.get_filename()
param['fileobj'] = filename
setattr(self.owner, self.attrname, param)
chooser.destroy()
class GPAW_Window(gtk.Window):
gpaw_xc_list = ['LDA', 'PBE', 'RPBE', 'revPBE']
gpaw_xc_default = 'PBE'
def __init__(self, owner, param, attrname):
gtk.Window.__init__(self)
self.set_title(_("GPAW parameters"))
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.ucell = atoms.get_cell()
self.size = tuple([self.ucell[i, i] for i in range(3)])
self.pbc = atoms.get_pbc()
self.orthogonal = self.isorthogonal(self.ucell)
self.natoms = len(atoms)
vbox = gtk.VBox()
#label = gtk.Label("Specify the GPAW parameters here")
#pack(vbox, [label])
# Print some info
txt = _("%i atoms.\n") % (self.natoms,)
if self.orthogonal:
txt += _(u"Orthogonal unit cell: %.2f x %.2f x %.2f Å.") % self.size
else:
txt += _("Non-orthogonal unit cell:\n")
txt += str(self.ucell)
pack(vbox, [gtk.Label(txt)])
# XC potential
self.xc = gtk.combo_box_new_text()
for i, x in enumerate(self.gpaw_xc_list):
self.xc.append_text(x)
if x == self.gpaw_xc_default:
self.xc.set_active(i)
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),
self.xc])
# Grid spacing
self.radio_h = gtk.RadioButton(None, _("Grid spacing"))
self.h = gtk.Adjustment(0.18, 0.0, 1.0, 0.01)
self.h_spin = gtk.SpinButton(self.h, 0, 2)
pack(vbox, [self.radio_h, gtk.Label(" h = "), self.h_spin,
gtk.Label(_(u"Å"))])
self.radio_gpts = gtk.RadioButton(self.radio_h, _("Grid points"))
self.gpts = []
self.gpts_spin = []
for i in range(3):
g = gtk.Adjustment(4, 4, 1000, 4)
s = gtk.SpinButton(g, 0, 0)
self.gpts.append(g)
self.gpts_spin.append(s)
self.gpts_hlabel = gtk.Label("")
self.gpts_hlabel_format = _(u"h<sub>eff</sub> = (%.3f, %.3f, %.3f) Å")
pack(vbox, [self.radio_gpts, gtk.Label(" gpts = ("), self.gpts_spin[0],
gtk.Label(", "), self.gpts_spin[1], gtk.Label(", "),
self.gpts_spin[2], gtk.Label(") "), self.gpts_hlabel])
self.radio_h.connect("toggled", self.radio_grid_toggled)
self.radio_gpts.connect("toggled", self.radio_grid_toggled)
self.radio_grid_toggled(None)
for g in self.gpts:
g.connect("value-changed", self.gpts_changed)
self.h.connect("value-changed", self.h_changed)
# K-points
self.kpts = []
self.kpts_spin = []
for i in range(3):
if self.pbc[i] and self.orthogonal:
default = np.ceil(20.0 / self.size[i])
else:
default = 1
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
if not self.pbc[i]:
s.set_sensitive(False)
g.connect("value-changed", self.k_changed)
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2], gtk.Label(")")])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å")
pack(vbox, [self.kpts_label])
self.k_changed()
# Spin polarized
self.spinpol = gtk.CheckButton(_("Spin polarized"))
pack(vbox, [self.spinpol])
pack(vbox, gtk.Label(""))
# Mode and basis functions
self.mode = gtk.combo_box_new_text()
self.mode.append_text(_("FD - Finite Difference (grid) mode"))
self.mode.append_text(_("LCAO - Linear Combination of Atomic "
"Orbitals"))
self.mode.set_active(0)
pack(vbox, [gtk.Label(_("Mode: ")), self.mode])
self.basis = gtk.combo_box_new_text()
self.basis.append_text(_("sz - Single Zeta"))
self.basis.append_text(_("szp - Single Zeta polarized"))
self.basis.append_text(_("dzp - Double Zeta polarized"))
self.basis.set_active(2) # dzp
pack(vbox, [gtk.Label(_("Basis functions: ")), self.basis])
pack(vbox, gtk.Label(""))
self.mode.connect("changed", self.mode_changed)
self.mode_changed()
# Mixer
self.use_mixer = gtk.CheckButton(_("Non-standard mixer parameters"))
pack(vbox, [self.use_mixer])
self.radio_mixer = gtk.RadioButton(None, "Mixer ")
self.radio_mixersum = gtk.RadioButton(self.radio_mixer, "MixerSum ")
self.radio_mixerdiff = gtk.RadioButton(self.radio_mixer, "MixerDiff")
pack(vbox, [self.radio_mixer, self.radio_mixersum,
self.radio_mixerdiff])
self.beta_adj = gtk.Adjustment(0.25, 0.0, 1.0, 0.05)
self.beta_spin = gtk.SpinButton(self.beta_adj, 0, 2)
self.nmaxold_adj = gtk.Adjustment(3, 1, 10, 1)
self.nmaxold_spin = gtk.SpinButton(self.nmaxold_adj, 0, 0)
self.weight_adj = gtk.Adjustment(50, 1, 500, 1)
self.weight_spin = gtk.SpinButton(self.weight_adj, 0, 0)
pack(vbox, [gtk.Label("beta = "), self.beta_spin,
gtk.Label(" nmaxold = "), self.nmaxold_spin,
gtk.Label(" weight = "), self.weight_spin])
self.beta_m_adj = gtk.Adjustment(0.70, 0.0, 1.0, 0.05)
self.beta_m_spin = gtk.SpinButton(self.beta_m_adj, 0, 2)
self.nmaxold_m_adj = gtk.Adjustment(2, 1, 10, 1)
self.nmaxold_m_spin = gtk.SpinButton(self.nmaxold_m_adj, 0, 0)
self.weight_m_adj = gtk.Adjustment(10, 1, 500, 1)
self.weight_m_spin = gtk.SpinButton(self.weight_m_adj, 0, 0)
pack(vbox, [gtk.Label("beta_m = "), self.beta_m_spin,
gtk.Label(" nmaxold_m = "), self.nmaxold_m_spin,
gtk.Label(" weight_m = "), self.weight_m_spin])
for but in (self.spinpol, self.use_mixer, self.radio_mixer,
self.radio_mixersum, self.radio_mixerdiff):
but.connect("clicked", self.mixer_changed)
self.mixer_changed()
# Eigensolver
# Poisson-solver
vbox.show()
self.add(vbox)
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
# Set stored parameters
if param:
self.xc.set_active(param["xc#"])
if param["use_h"]:
self.radio_h.set_active(True)
else:
self.radio_gpts.set_active(True)
for i in range(3):
self.gpts[i].value = param["gpts"][i]
self.kpts[i].value = param["kpts"][i]
self.spinpol.set_active(param["spinpol"])
self.mode.set_active(param["mode#"])
self.basis.set_active(param["basis#"])
self.use_mixer.set_active(param["use mixer"])
getattr(self, "radio_" + param["mixer"].lower()).set_active(True)
for t in ("beta", "nmaxold", "weight", "beta_m", "nmaxold_m",
"weight_m"):
getattr(self, t + "_adj").value = param[t]
self.show()
self.grab_add() # Lock all other windows
def radio_grid_toggled(self, widget):
hmode = self.radio_h.get_active()
self.h_spin.set_sensitive(hmode)
for s in self.gpts_spin:
s.set_sensitive(not hmode)
self.gpts_changed()
def gpts_changed(self, *args):
if self.radio_gpts.get_active():
g = np.array([int(g.value) for g in self.gpts])
size = np.array([self.ucell[i, i] for i in range(3)])
txt = self.gpts_hlabel_format % tuple(size / g)
self.gpts_hlabel.set_markup(txt)
else:
self.gpts_hlabel.set_markup("")
def h_changed(self, *args):
h = self.h.value
for i in range(3):
g = 4 * round(self.ucell[i, i] / (4 * h))
self.gpts[i].value = g
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],
self.ucell[i]))
for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def mode_changed(self, *args):
self.basis.set_sensitive(self.mode.get_active() == 1)
def mixer_changed(self, *args):
radios = (self.radio_mixer, self.radio_mixersum, self.radio_mixerdiff)
spin1 = (self.beta_spin, self.nmaxold_spin, self.weight_spin)
spin2 = (self.beta_m_spin, self.nmaxold_m_spin, self.weight_m_spin)
if self.use_mixer.get_active():
# Mixer parameters can be specified.
if self.spinpol.get_active():
self.radio_mixer.set_sensitive(False)
self.radio_mixersum.set_sensitive(True)
self.radio_mixerdiff.set_sensitive(True)
if self.radio_mixer.get_active():
self.radio_mixersum.set_active(True)
else:
self.radio_mixer.set_sensitive(True)
self.radio_mixersum.set_sensitive(False)
self.radio_mixerdiff.set_sensitive(False)
self.radio_mixer.set_active(True)
if self.radio_mixerdiff.get_active():
active = spin1 + spin2
passive = ()
else:
active = spin1
passive = spin2
for widget in active:
widget.set_sensitive(True)
for widget in passive:
widget.set_sensitive(False)
else:
# No mixer parameters
for widget in radios + spin1 + spin2:
widget.set_sensitive(False)
def isorthogonal(self, matrix):
ortho = True
for i in range(3):
for j in range(3):
if i != j and matrix[i][j] != 0.0:
ortho = False
return ortho
def ok(self, *args):
param = {}
param["xc"] = self.xc.get_active_text()
param["xc#"] = self.xc.get_active()
param["use_h"] = self.radio_h.get_active()
param["h"] = self.h.value
param["gpts"] = [int(g.value) for g in self.gpts]
param["kpts"] = [int(k.value) for k in self.kpts]
param["spinpol"] = self.spinpol.get_active()
param["mode"] = self.mode.get_active_text().split()[0].lower()
param["mode#"] = self.mode.get_active()
param["basis"] = self.basis.get_active_text().split()[0].lower()
param["basis#"] = self.basis.get_active()
param["use mixer"] = self.use_mixer.get_active()
if self.radio_mixer.get_active():
m = "Mixer"
elif self.radio_mixersum.get_active():
m = "MixerSum"
else:
assert self.radio_mixerdiff.get_active()
m = "MixerDiff"
param["mixer"] = m
for t in ("beta", "nmaxold", "weight", "beta_m", "nmaxold_m",
"weight_m"):
param[t] = getattr(self, t + "_adj").value
setattr(self.owner, self.attrname, param)
self.destroy()
class AIMS_Window(gtk.Window):
aims_xc_cluster = ['pw-lda','pz-lda','pbe','pbesol','rpbe','revpbe',
'blyp','am05','b3lyp','hse03','hse06','pbe0','pbesol0',
'hf','mp2']
aims_xc_periodic = ['pw-lda','pz-lda','pbe','pbesol','rpbe','revpbe',
'blyp','am05']
aims_xc_default = 'pbe'
aims_relativity_list = ['none','atomic_zora','zora']
aims_keyword_gui_list = ['xc','vdw_correction_hirshfeld','k_grid','spin','charge','relativistic',
'sc_accuracy_etot','sc_accuracy_eev','sc_accuracy_rho','sc_accuracy_forces',
'compute_forces','run_command','species_dir','default_initial_moment']
def __init__(self, owner, param, attrname):
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.periodic = atoms.get_pbc().all()
if not self.periodic and atoms.get_pbc().any():
aims_periodic_warning = True
self.periodic = True
else:
aims_periodic_warning = False
from ase.calculators.aims import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,input_keys
self.aims_keyword_list =float_keys+exp_keys+string_keys+int_keys+bool_keys+list_keys+input_keys
self.expert_keywords = []
natoms = len(atoms)
gtk.Window.__init__(self)
self.set_title(_("FHI-aims parameters"))
vbox = gtk.VBox()
vbox.set_border_width(5)
# Print some info
txt = _("%i atoms.\n") % (natoms)
if self.periodic:
self.ucell = atoms.get_cell()
txt += _("Periodic geometry, unit cell is:\n")
for i in range(3):
txt += "(%8.3f %8.3f %8.3f)\n" % (self.ucell[i][0], self.ucell[i][1], self.ucell[i][2])
self.xc_list = self.aims_xc_periodic
else:
txt += _("Non-periodic geometry.\n")
self.xc_list = self.aims_xc_cluster
pack(vbox, [gtk.Label(txt)])
# XC functional & dispersion correction
self.xc = gtk.combo_box_new_text()
self.xc_setup = False
self.TS = gtk.CheckButton(_("Hirshfeld-based dispersion correction"))
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),self.xc])
pack(vbox, [self.TS])
pack(vbox, [gtk.Label("")])
# k-grid?
if self.periodic:
self.kpts = []
self.kpts_spin = []
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
g.connect("value-changed", self.k_changed)
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2], gtk.Label(")")])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å")
pack(vbox, [self.kpts_label])
self.k_changed()
pack(vbox, gtk.Label(""))
# Spin polarized, charge, relativity
self.spinpol = gtk.CheckButton(_("Spin / initial moment "))
self.spinpol.connect('toggled',self.spinpol_changed)
self.moment = gtk.Adjustment(0,-100,100,0.1)
self.moment_spin = gtk.SpinButton(self.moment, 0, 0)
self.moment_spin.set_digits(2)
self.moment_spin.set_sensitive(False)
self.charge = gtk.Adjustment(0,-100,100,0.1)
self.charge_spin = gtk.SpinButton(self.charge, 0, 0)
self.charge_spin.set_digits(2)
self.relativity_type = gtk.combo_box_new_text()
for i, x in enumerate(self.aims_relativity_list):
self.relativity_type.append_text(x)
self.relativity_type.connect('changed',self.relativity_changed)
self.relativity_threshold = gtk.Entry(max=8)
self.relativity_threshold.set_text('1.00e-12')
self.relativity_threshold.set_sensitive(False)
pack(vbox, [self.spinpol,
self.moment_spin,
gtk.Label(_(" Charge")),
self.charge_spin,
gtk.Label(_(" Relativity")),
self.relativity_type,
gtk.Label(_(" Threshold")),
self.relativity_threshold])
pack(vbox, gtk.Label(""))
# self-consistency criteria
pack(vbox,[gtk.Label(_("Self-consistency convergence:"))])
self.sc_tot_energy = gtk.Adjustment(1e-6, 1e-6, 1e0, 1e-6)
self.sc_tot_energy_spin = gtk.SpinButton(self.sc_tot_energy, 0, 0)
self.sc_tot_energy_spin.set_digits(6)
self.sc_tot_energy_spin.set_numeric(True)
self.sc_sum_eigenvalue = gtk.Adjustment(1e-3, 1e-6, 1e0, 1e-6)
self.sc_sum_eigenvalue_spin = gtk.SpinButton(self.sc_sum_eigenvalue, 0, 0)
self.sc_sum_eigenvalue_spin.set_digits(6)
self.sc_sum_eigenvalue_spin.set_numeric(True)
self.sc_density = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-6)
self.sc_density_spin = gtk.SpinButton(self.sc_density, 0, 0)
self.sc_density_spin.set_digits(6)
self.sc_density_spin.set_numeric(True)
self.compute_forces = gtk.CheckButton(_("Compute forces"))
self.compute_forces.set_active(True)
self.compute_forces.connect("toggled", self.compute_forces_toggled,"")
self.sc_forces = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-6)
self.sc_forces_spin = gtk.SpinButton(self.sc_forces, 0, 0)
self.sc_forces_spin.set_numeric(True)
self.sc_forces_spin.set_digits(6)
# XXX: use gtk table for layout. Spaces will not work well otherwise
# (depend on fonts, widget style, ...)
# TRANSLATORS: Don't care too much about these, just get approximately
# the same string lengths
pack(vbox, [gtk.Label(_("Energy: ")),
self.sc_tot_energy_spin,
gtk.Label(_(" eV Sum of eigenvalues: ")),
self.sc_sum_eigenvalue_spin,
gtk.Label(_(" eV"))])
pack(vbox, [gtk.Label(_("Electron density: ")),
self.sc_density_spin,
gtk.Label(_(" Force convergence: ")),
self.sc_forces_spin,
gtk.Label(_(" eV/Ang "))])
pack(vbox, [self.compute_forces])
pack(vbox, gtk.Label(""))
swin = gtk.ScrolledWindow()
swin.set_border_width(0)
swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.expert_keyword_set = gtk.Entry(max=55)
self.expert_keyword_add = gtk.Button(stock=gtk.STOCK_ADD)
self.expert_keyword_add.connect("clicked", self.expert_keyword_import)
self.expert_keyword_set.connect("activate", self.expert_keyword_import)
pack(vbox,[gtk.Label(_("Additional keywords: ")),
self.expert_keyword_set,
self.expert_keyword_add])
self.expert_vbox = gtk.VBox()
vbox.pack_start(swin, True, True, 0)
swin.add_with_viewport(self.expert_vbox)
self.expert_vbox.get_parent().set_shadow_type(gtk.SHADOW_NONE)
self.expert_vbox.get_parent().set_size_request(-1, 100)
swin.show()
self.expert_vbox.show()
pack(vbox, gtk.Label(""))
# run command and species defaults:
pack(vbox, gtk.Label(_('FHI-aims execution command: ')))
self.run_command = pack(vbox, gtk.Entry(max=0))
pack(vbox, gtk.Label(_('Directory for species defaults: ')))
self.species_defaults = pack(vbox, gtk.Entry(max=0))
# set defaults from previous instance of the calculator, if applicable:
if param is not None:
self.set_param(param)
else:
self.set_defaults()
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
default_but = gtk.Button(_("Set Defaults"))
default_but.connect("clicked",self.set_defaults)
import_control_but = gtk.Button(_("Import control.in"))
import_control_but.connect("clicked",self.import_control)
export_control_but = gtk.Button(_("Export control.in"))
export_control_but.connect("clicked", self.export_control)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(default_but, 0, 0)
butbox.pack_start(import_control_but, 0, 0)
butbox.pack_start(export_control_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
self.expert_vbox.show()
vbox.show()
self.add(vbox)
self.show()
self.grab_add()
if aims_periodic_warning:
oops(aims_pbc_warning_text)
def set_defaults(self, *args):
atoms = self.owner.atoms.copy()
if not self.xc_setup:
self.xc_setup = True
for i, x in enumerate(self.xc_list):
self.xc.append_text(x)
for i, x in enumerate(self.xc_list):
if x == self.aims_xc_default:
self.xc.set_active(i)
self.TS.set_active(False)
if self.periodic:
self.ucell = atoms.get_cell()
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
self.kpts_spin[i].set_value(default)
self.spinpol.set_active(False)
self.moment.set_value(0)
self.moment_spin.set_sensitive(False)
self.charge.set_value(0)
aims_relativity_default = 'none'
for a in atoms:
if a.number > 20:
aims_relativity_default = 'atomic_zora'
for i, x in enumerate(self.aims_relativity_list):
if x == aims_relativity_default:
self.relativity_type.set_active(i)
self.sc_tot_energy.set_value(1e-6)
self.sc_sum_eigenvalue.set_value(1e-3)
self.sc_density.set_value(1e-4)
self.sc_forces.set_value(1e-4)
for key in self.expert_keywords:
key[0].destroy()
key[1].destroy()
key[2].destroy()
key[3] = False
for child in self.expert_vbox.children():
self.expert_vbox.remove(child)
if os.environ.has_key('AIMS_COMMAND'):
text = os.environ['AIMS_COMMAND']
else:
text = ""
self.run_command.set_text(text)
if os.environ.has_key('AIMS_SPECIES_DIR'):
text = os.environ['AIMS_SPECIES_DIR']
else:
text = ""
self.species_defaults.set_text(text)
def set_attributes(self, *args):
param = {}
param["xc"] = self.xc.get_active_text()
if self.periodic:
param["k_grid"] = (int(self.kpts[0].value),
int(self.kpts[1].value),
int(self.kpts[2].value))
if self.spinpol.get_active():
param["spin"] = "collinear"
param["default_initial_moment"] = self.moment.get_value()
else:
param["spin"] = "none"
param["default_initial_moment"] = None
param["vdw_correction_hirshfeld"] = self.TS.get_active()
param["charge"] = self.charge.value
param["relativistic"] = self.relativity_type.get_active_text()
if param["relativistic"] == 'atomic_zora':
param["relativistic"] += " scalar "
if param["relativistic"] == 'zora':
param["relativistic"] += " scalar "+self.relativity_threshold.get_text()
param["sc_accuracy_etot"] = self.sc_tot_energy.value
param["sc_accuracy_eev"] = self.sc_sum_eigenvalue.value
param["sc_accuracy_rho"] = self.sc_density.value
param["compute_forces"] = self.compute_forces.get_active()
param["sc_accuracy_forces"] = self.sc_forces.value
param["run_command"] = self.run_command.get_text()
param["species_dir"] = self.species_defaults.get_text()
from ase.calculators.aims import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,input_keys
for option in self.expert_keywords:
if option[3]: # set type of parameter according to which list it is in
key = option[0].get_text().strip()
val = option[1].get_text().strip()
if key == 'output':
if param.has_key('output'):
param[key] += [val]
else:
param[key] = [val]
elif key in float_keys or key in exp_keys:
param[key] = float(val)
elif key in list_keys or key in string_keys or key in input_keys:
param[key] = val
elif key in int_keys:
param[key] = int(val)
elif key in bool_keys:
param[key] = bool(val)
setattr(self.owner, self.attrname, param)
def set_param(self, param):
if param["xc"] is not None:
for i, x in enumerate(self.xc_list):
if x == param["xc"]:
self.xc.set_active(i)
if isinstance(param["vdw_correction_hirshfeld"],bool):
self.TS.set_active(param["vdw_correction_hirshfeld"])
if self.periodic and param["k_grid"] is not None:
self.kpts[0].value = int(param["k_grid"][0])
self.kpts[1].value = int(param["k_grid"][1])
self.kpts[2].value = int(param["k_grid"][2])
if param["spin"] is not None:
self.spinpol.set_active(param["spin"] == "collinear")
self.moment_spin.set_sensitive(param["spin"] == "collinear")
if param["default_initial_moment"] is not None:
self.moment.value = param["default_initial_moment"]
if param["charge"] is not None:
self.charge.value = param["charge"]
if param["relativistic"] is not None:
if isinstance(param["relativistic"],(tuple,list)):
rel = param["relativistic"]
else:
rel = param["relativistic"].split()
for i, x in enumerate(self.aims_relativity_list):
if x == rel[0]:
self.relativity_type.set_active(i)
if x == 'zora':
self.relativity_threshold.set_text(rel[2])
self.relativity_threshold.set_sensitive(True)
if param["sc_accuracy_etot"] is not None:
self.sc_tot_energy.value = param["sc_accuracy_etot"]
if param["sc_accuracy_eev"] is not None:
self.sc_sum_eigenvalue.value = param["sc_accuracy_eev"]
if param["sc_accuracy_rho"] is not None:
self.sc_density.value = param["sc_accuracy_rho"]
if param["compute_forces"] is not None:
if param["compute_forces"]:
if param["sc_accuracy_forces"] is not None:
self.sc_forces.value = param["sc_accuracy_forces"]
self.compute_forces.set_active(param["compute_forces"])
else:
self.compute_forces.set_active(False)
if param["run_command"] is not None:
self.run_command.set_text(param["run_command"])
if param["species_dir"] is not None:
self.species_defaults.set_text(param["species_dir"])
for (key,val) in param.items():
if key in self.aims_keyword_list and key not in self.aims_keyword_gui_list:
if val is not None: # = existing "expert keyword"
if key == 'output': # 'output' can be used more than once
options = val
if isinstance(options,str):
options = [options]
for arg in options:
self.expert_keyword_create([key]+[arg])
else:
if isinstance(val,str):
arg = [key]+val.split()
elif isinstance(val,(tuple,list)):
arg = [key]+[str(a) for a in val]
else:
arg = [key]+[str(val)]
self.expert_keyword_create(arg)
def ok(self, *args):
self.set_attributes(*args)
self.destroy()
def export_control(self, *args):
filename = "control.in"
chooser = gtk.FileChooserDialog(
_('Export parameters ... '), None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK or save == gtk.RESPONSE_SAVE:
filename = chooser.get_filename()
self.set_attributes(*args)
param = getattr(self.owner, "aims_parameters")
from ase.calculators.aims import Aims
calc_temp = Aims(**param)
atoms_temp = self.owner.atoms.copy()
atoms_temp.set_calculator(calc_temp)
atoms_temp.calc.write_control(file=filename)
atoms_temp.calc.write_species(file=filename)
chooser.destroy()
def import_control(self, *args):
filename = "control.in"
chooser = gtk.FileChooserDialog(
_('Import control.in file ... '), None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK:
self.set_defaults()
filename = chooser.get_filename()
control = open(filename,'r')
while True:
line = control.readline()
if not line:
break
if "List of parameters used to initialize the calculator:" in line:
control.readline()
from ase.io.aims import read_aims_calculator
calc = read_aims_calculator(control)
found_aims_calculator = True
control.close()
if found_aims_calculator:
param = calc.float_params
for key in calc.exp_params:
param[key] = calc.exp_params[key]
for key in calc.string_params:
param[key] = calc.string_params[key]
for key in calc.int_params:
param[key] = calc.int_params[key]
for key in calc.bool_params:
param[key] = calc.bool_params[key]
for key in calc.list_params:
param[key] = calc.list_params[key]
for key in calc.input_parameters:
param[key] = calc.input_parameters[key]
self.set_defaults()
self.set_param(param)
chooser.destroy()
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],self.ucell[i])) for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def compute_forces_toggled(self, *args):
self.sc_forces_spin.set_sensitive(self.compute_forces.get_active())
def relativity_changed(self, *args):
self.relativity_threshold.set_sensitive(self.relativity_type.get_active() == 2)
def spinpol_changed(self, *args):
self.moment_spin.set_sensitive(self.spinpol.get_active())
def expert_keyword_import(self, *args):
command = self.expert_keyword_set.get_text().split()
if len(command) > 0 and command[0] in self.aims_keyword_list and not command[0] in self.aims_keyword_gui_list:
self.expert_keyword_create(command)
elif command[0] in self.aims_keyword_gui_list:
oops(_("Please use the facilities provided in this window to "
"manipulate the keyword: %s!") % command[0])
else:
oops(_("Don't know this keyword: %s\n"
"\nPlease check!\n\n"
"If you really think it should be available, "
"please add it to the top of ase/calculators/aims.py.")
% command[0])
self.expert_keyword_set.set_text("")
def expert_keyword_create(self, command):
key = command[0]
argument = command[1]
if len(command) > 2:
for a in command[2:]:
argument += ' '+a
index = len(self.expert_keywords)
self.expert_keywords += [[gtk.Label(" " +key+" "),
gtk.Entry(max=45),
ExpertDeleteButton(index),
True]]
self.expert_keywords[index][1].set_text(argument)
self.expert_keywords[index][2].connect('clicked',self.expert_keyword_delete)
if not self.expert_vbox.get_children():
table = gtk.Table(1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, 0, 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, 0, 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, 0, 1, 0)
table.show_all()
pack(self.expert_vbox, table)
else:
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows + 1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, nrows, nrows + 1, 0)
table.show_all()
def expert_keyword_delete(self, button, *args):
index = button.index # which one to kill
for i in [0,1,2]:
self.expert_keywords[index][i].destroy()
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows-1, 3)
self.expert_keywords[index][3] = False
class ExpertDeleteButton(gtk.Button):
def __init__(self, index):
gtk.Button.__init__(self, stock=gtk.STOCK_DELETE)
alignment = self.get_children()[0]
hbox = alignment.get_children()[0]
#self.set_size_request(1, 3)
image, label = hbox.get_children()
if image is not None:
label.set_text('Del')
self.index = index
class VASP_Window(gtk.Window):
vasp_xc_list = ['PW91', 'PBE', 'LDA']
vasp_xc_default = 'PBE'
vasp_prec_default = 'Normal'
def __init__(self, owner, param, attrname):
self.owner = owner
self.attrname = attrname
atoms = owner.atoms
self.periodic = atoms.get_pbc().all()
self.vasp_keyword_gui_list = ['ediff','encut', 'ismear', 'ispin', 'prec', 'sigma']
from ase.calculators.vasp import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,special_keys
self.vasp_keyword_list = float_keys+exp_keys+string_keys+int_keys+bool_keys+list_keys+special_keys
self.expert_keywords = []
natoms = len(atoms)
gtk.Window.__init__(self)
self.set_title(_("VASP parameters"))
vbox = gtk.VBox()
vbox.set_border_width(5)
# Print some info
txt = _("%i atoms.\n") % natoms
self.ucell = atoms.get_cell()
txt += _("Periodic geometry, unit cell is: \n")
for i in range(3):
txt += "(%8.3f %8.3f %8.3f)\n" % (self.ucell[i][0], self.ucell[i][1], self.ucell[i][2])
pack(vbox, [gtk.Label(txt)])
# XC functional ()
self.xc = gtk.combo_box_new_text()
for i, x in enumerate(self.vasp_xc_list):
self.xc.append_text(x)
# Spin polarized
self.spinpol = gtk.CheckButton(_("Spin polarized"))
pack(vbox, [gtk.Label(_("Exchange-correlation functional: ")),
self.xc,
gtk.Label(" "),
self.spinpol])
pack(vbox, gtk.Label(""))
# k-grid
self.kpts = []
self.kpts_spin = []
for i in range(3):
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
g = gtk.Adjustment(default, 1, 100, 1)
s = gtk.SpinButton(g, 0, 0)
self.kpts.append(g)
self.kpts_spin.append(s)
g.connect("value-changed", self.k_changed)
# Precision of calculation
self.prec = gtk.combo_box_new_text()
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
self.prec.append_text(x)
if x == self.vasp_prec_default:
self.prec.set_active(i)
# cutoff energy
if os.environ.has_key('VASP_PP_PATH'):
self.encut_min_default, self.encut_max_default = self.get_min_max_cutoff()
else:
self.encut_max_default = 400.0
self.encut_min_default = 100.0
self.encut = gtk.Adjustment(self.encut_max_default, 0, 9999, 10)
self.encut_spin = gtk.SpinButton(self.encut, 0, 0)
self.encut_spin.set_digits(2)
self.encut_spin.connect("value-changed",self.check_encut_warning)
self.encut_warning = gtk.Label("")
pack(vbox, [gtk.Label(_("k-points k = (")), self.kpts_spin[0],
gtk.Label(", "), self.kpts_spin[1], gtk.Label(", "),
self.kpts_spin[2],
gtk.Label(_(") Cutoff: ")),self.encut_spin,
gtk.Label(_(" Precision: ")),self.prec])
self.kpts_label = gtk.Label("")
self.kpts_label_format = _(u"k-points x size: (%.1f, %.1f, %.1f) Å ")
pack(vbox, [self.kpts_label, self.encut_warning])
self.k_changed()
pack(vbox, gtk.Label(""))
self.ismear = gtk.combo_box_new_text()
for x in ['Fermi', 'Gauss', 'Methfessel-Paxton']:
self.ismear.append_text(x)
self.ismear.set_active(2)
self.smearing_order = gtk.Adjustment(2,0,9,1)
self.smearing_order_spin = gtk.SpinButton(self.smearing_order,0,0)
self.smearing_order_spin.set_digits(0)
self.ismear.connect("changed", self.check_ismear_changed)
self.sigma = gtk.Adjustment(0.1, 0.001, 9.0, 0.1)
self.sigma_spin = gtk.SpinButton(self.sigma,0,0)
self.sigma_spin.set_digits(3)
pack(vbox, [gtk.Label(_("Smearing: ")),
self.ismear,
gtk.Label(_(" order: ")),
self.smearing_order_spin,
gtk.Label(_(" width: ")),
self.sigma_spin])
pack(vbox, gtk.Label(""))
self.ediff = gtk.Adjustment(1e-4, 1e-6, 1e0, 1e-4)
self.ediff_spin = gtk.SpinButton(self.ediff, 0, 0)
self.ediff_spin.set_digits(6)
pack(vbox,[gtk.Label(_("Self-consistency convergence: ")),
self.ediff_spin,
gtk.Label(_(" eV"))])
pack(vbox,gtk.Label(""))
swin = gtk.ScrolledWindow()
swin.set_border_width(0)
swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.expert_keyword_set = gtk.Entry(max=55)
self.expert_keyword_add = gtk.Button(stock=gtk.STOCK_ADD)
self.expert_keyword_add.connect("clicked", self.expert_keyword_import)
self.expert_keyword_set.connect("activate", self.expert_keyword_import)
pack(vbox,[gtk.Label(_("Additional keywords: ")),
self.expert_keyword_set,
self.expert_keyword_add])
self.expert_vbox = gtk.VBox()
vbox.pack_start(swin, True, True, 0)
swin.add_with_viewport(self.expert_vbox)
self.expert_vbox.get_parent().set_shadow_type(gtk.SHADOW_NONE)
self.expert_vbox.get_parent().set_size_request(-1, 100)
swin.show()
self.expert_vbox.show()
pack(vbox, gtk.Label(""))
# run command and location of POTCAR files:
pack(vbox, gtk.Label(_('VASP execution command: ')))
self.run_command = pack(vbox, gtk.Entry(max=0))
if os.environ.has_key('VASP_COMMAND'):
self.run_command.set_text(os.environ['VASP_COMMAND'])
pack(vbox, gtk.Label(_('Directory for species defaults: ')))
self.pp_path = pack(vbox, gtk.Entry(max=0))
if os.environ.has_key('VASP_PP_PATH'):
self.pp_path.set_text(os.environ['VASP_PP_PATH'])
# Buttons at the bottom
pack(vbox, gtk.Label(""))
butbox = gtk.HButtonBox()
set_default_but = gtk.Button(_("Set Defaults"))
set_default_but.connect("clicked", self.set_defaults)
import_vasp_but = gtk.Button(_("Import VASP files"))
import_vasp_but.connect("clicked", self.import_vasp_files)
export_vasp_but = gtk.Button(_("Export VASP files"))
export_vasp_but.connect("clicked", self.export_vasp_files)
cancel_but = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel_but.connect('clicked', lambda widget: self.destroy())
ok_but = gtk.Button(stock=gtk.STOCK_OK)
ok_but.connect('clicked', self.ok)
butbox.pack_start(set_default_but, 0, 0)
butbox.pack_start(import_vasp_but, 0, 0)
butbox.pack_start(export_vasp_but, 0, 0)
butbox.pack_start(cancel_but, 0, 0)
butbox.pack_start(ok_but, 0, 0)
butbox.show_all()
pack(vbox, [butbox], end=True, bottom=True)
vbox.show()
self.add(vbox)
self.show()
self.grab_add() # Lock all other windows
self.load_attributes()
def load_attributes(self, directory="."):
"""Sets values of fields of the window according to the values
set inside the INCAR, KPOINTS and POTCAR file in 'directory'."""
from os import chdir
chdir(directory)
# Try and load INCAR, in the current directory
from ase.calculators.vasp import Vasp
calc_temp = Vasp()
try:
calc_temp.read_incar("INCAR")
except IOError:
pass
else:
if calc_temp.spinpol:
self.spinpol.set_active(True)
else:
self.spinpol.set_active(False)
if calc_temp.float_params['encut']:
self.encut.set_value(calc_temp.float_params['encut'])
if calc_temp.int_params['ismear'] == -1: # Fermi
vasp_ismear_default = 'Fermi'
elif calc_temp.int_params['ismear'] == 0: # Gauss
vasp_ismear_default = 'Gauss'
elif calc_temp.int_params['ismear'] > 0: # Methfessel-Paxton
vasp_ismear_default = 'Methfessel-Paxton'
else:
vasp_ismear_default = None
for i, x in enumerate(['Fermi', 'Gauss', 'Methfessel-Paxton']):
if vasp_ismear_default == x:
self.ismear.set_active(i)
if calc_temp.exp_params['ediff']:
self.ediff.set_value(calc_temp.exp_params['ediff'])
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
if x == calc_temp.string_params['prec']:
self.prec.set_active(i)
if calc_temp.float_params['sigma']:
self.sigma.set_value(calc_temp.float_params['sigma'])
import copy
all_params = copy.deepcopy(calc_temp.float_params)
all_params.update(calc_temp.exp_params)
all_params.update(calc_temp.string_params)
all_params.update(calc_temp.int_params)
all_params.update(calc_temp.bool_params)
all_params.update(calc_temp.special_params)
for (key, value) in all_params.items():
if key in self.vasp_keyword_list \
and key not in self.vasp_keyword_gui_list \
and value is not None:
command = key + " " + str(value)
self.expert_keyword_create(command.split())
for (key, value) in calc_temp.list_params.items():
if key == "magmom" and value is not None:
command = key + " "
rep = 1
previous = value[0]
for v in value[1:]:
if v == previous:
rep += 1
else:
if rep > 1:
command += "%d*%f " % (rep, previous)
else:
command += "%f " % previous
rep = 1
previous = v
if rep > 1:
command += "%d*%f " % (rep, previous)
else:
command += "%f" % previous
self.expert_keyword_create(command.split())
elif value is not None:
command = key + " "
for v in value:
command += str(v) + " "
self.expert_keyword_create(command.split())
# Try and load POTCAR, in the current directory
try:
calc_temp.read_potcar()
except IOError:
pass
else:
#Set xc read from POTCAR
for i, x in enumerate(self.vasp_xc_list):
if x == calc_temp.input_params['xc']:
self.xc.set_active(i)
# Try and load KPOINTS, in the current directory
try:
calc_temp.read_kpoints("KPOINTS")
except IOError:
pass
else:
# Set KPOINTS grid dimensions
for i in range(3):
self.kpts_spin[i].set_value(calc_temp.input_params['kpts'][i])
def set_attributes(self, *args):
self.param = {}
self.param["xc"] = self.xc.get_active_text()
self.param["prec"] = self.prec.get_active_text()
self.param["kpts"] = (int(self.kpts[0].value),
int(self.kpts[1].value),
int(self.kpts[2].value))
self.param["encut"] = self.encut.value
self.param["ediff"] = self.ediff.value
self.param["ismear"] = self.get_ismear()
self.param["sigma"] = self.sigma.value
if self.spinpol.get_active():
self.param["ispin"] = 2
else:
self.param["ispin"] = 1
from ase.calculators.vasp import float_keys,exp_keys,string_keys,int_keys,bool_keys,list_keys,special_keys
for option in self.expert_keywords:
if option[3]: # set type of parameter accoding to which list it is in
key = option[0].get_text().split()[0].strip()
val = option[1].get_text().strip()
if key in float_keys or key in exp_keys:
self.param[key] = float(val)
elif key == "magmom":
val = val.replace("*", " * ")
c = val.split()
val = []
i = 0
while i < len(c):
if c[i] == "*":
b = val.pop()
i += 1
for j in range(int(b)):
val.append(float(c[i]))
else:
val.append(float(c[i]))
i += 1
self.param[key] = val
elif key in list_keys:
c = val.split()
val = []
for i in c:
val.append(float(i))
self.param[key] = val
elif key in string_keys or key in special_keys:
self.param[key] = val
elif key in int_keys:
self.param[key] = int(val)
elif key in bool_keys:
self.param[key] = bool(val)
setattr(self.owner, self.attrname, self.param)
os.environ['VASP_COMMAND'] = self.run_command.get_text()
os.environ['VASP_PP_PATH'] = self.pp_path.get_text()
def ok(self, *args):
self.set_attributes(*args)
self.destroy()
def get_min_max_cutoff(self, *args):
# determine the recommended energy cutoff limits
from ase.calculators.vasp import Vasp
calc_temp = Vasp()
atoms_temp = self.owner.atoms.copy()
calc_temp.initialize(atoms_temp)
calc_temp.write_potcar(suffix='.check_energy_cutoff')
enmin = -1e6
enmax = -1e6
for line in open("POTCAR.check_energy_cutoff",'r').readlines():
if "ENMIN" in line:
enmax = max(enmax,float(line.split()[2].split(';')[0]))
enmin = max(enmin,float(line.split()[5]))
from os import system
system("rm POTCAR.check_energy_cutoff")
return enmin, enmax
def k_changed(self, *args):
size = [self.kpts[i].value * np.sqrt(np.vdot(self.ucell[i],self.ucell[i])) for i in range(3)]
self.kpts_label.set_text(self.kpts_label_format % tuple(size))
def check_encut_warning(self,*args):
if self.encut.value < self.encut_min_default:
self.encut_warning.set_markup(_("<b>WARNING:</b> cutoff energy is lower than recommended minimum!"))
else:
self.encut_warning.set_markup("")
def check_ismear_changed(self,*args):
if self.ismear.get_active_text() == 'Methfessel-Paxton':
self.smearing_order_spin.set_sensitive(True)
else:
self.smearing_order_spin.set_sensitive(False)
def get_ismear(self,*args):
type = self.ismear.get_active_text()
if type == 'Methfessel-Paxton':
ismear_value = self.smearing_order.value
elif type == 'Fermi':
ismear_value = -1
else:
ismear_value = 0
return ismear_value
def destroy(self):
self.grab_remove()
gtk.Window.destroy(self)
def set_defaults(self, *args):
# Reset fields to what they were
self.spinpol.set_active(False)
for i, x in enumerate(['Low', 'Normal', 'Accurate']):
if x == self.vasp_prec_default:
self.prec.set_active(i)
self.encut_spin.set_value(self.encut_max_default)
self.ismear.set_active(2)
self.smearing_order.set_value(2)
self.ediff.set_value(1e-4)
for child in self.expert_vbox.children():
self.expert_vbox.remove(child)
for i, x in enumerate(self.vasp_xc_list):
if x == self.vasp_xc_default:
self.xc.set_active(i)
default = np.ceil(20.0 / np.sqrt(np.vdot(self.ucell[i],self.ucell[i])))
for i in range(3):
self.kpts_spin[i].set_value(default)
def import_vasp_files(self, *args):
dirname = ""
chooser = gtk.FileChooserDialog(
_('Import VASP input files: choose directory ... '),
None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(dirname)
openr = chooser.run()
if openr == gtk.RESPONSE_OK or openr == gtk.RESPONSE_SAVE:
dirname = chooser.get_filename()
self.load_attributes(dirname)
chooser.destroy()
def export_vasp_files(self, *args):
filename = ""
chooser = gtk.FileChooserDialog(
_('Export VASP input files: choose directory ... '),
None, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_filename(filename)
save = chooser.run()
if save == gtk.RESPONSE_OK or save == gtk.RESPONSE_SAVE:
filename = chooser.get_filename()
from os import chdir
chdir(filename)
self.set_attributes(*args)
param = getattr(self.owner, "vasp_parameters")
from ase.calculators.vasp import Vasp
calc_temp = Vasp(**param)
atoms_temp = self.owner.atoms.copy()
atoms_temp.set_calculator(calc_temp)
calc_temp.initialize(atoms_temp)
calc_temp.write_incar(atoms_temp)
calc_temp.write_potcar()
calc_temp.write_kpoints()
calc_temp.write_sort_file()
from ase.io.vasp import write_vasp
write_vasp('POSCAR', calc_temp.atoms_sorted, symbol_count=calc_temp.symbol_count)
chooser.destroy()
def expert_keyword_import(self, *args):
command = self.expert_keyword_set.get_text().split()
if len(command) > 0 and command[0] in self.vasp_keyword_list and not command[0] in self.vasp_keyword_gui_list:
self.expert_keyword_create(command)
elif command[0] in self.vasp_keyword_gui_list:
oops(_("Please use the facilities provided in this window to "
"manipulate the keyword: %s!") % command[0])
else:
oops(_("Don't know this keyword: %s"
"\nPlease check!\n\n"
"If you really think it should be available, "
"please add it to the top of ase/calculators/vasp.py.")
% command[0])
self.expert_keyword_set.set_text("")
def expert_keyword_create(self, command):
key = command[0]
if command[1] == "=":
command.remove("=")
argument = command[1]
if len(command) > 2:
for a in command[2:]:
argument += ' '+a
index = len(self.expert_keywords)
self.expert_keywords += [[gtk.Label(" " +key+" = "),
gtk.Entry(max=55),
ExpertDeleteButton(index),
True]]
self.expert_keywords[index][1].set_text(argument)
self.expert_keywords[index][2].connect('clicked',self.expert_keyword_delete)
if not self.expert_vbox.get_children():
table = gtk.Table(1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, 0, 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, 0, 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, 0, 1, 0)
table.show_all()
pack(self.expert_vbox, table)
else:
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows + 1, 3)
table.attach(self.expert_keywords[index][0], 0, 1, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][1], 1, 2, nrows, nrows + 1, 0)
table.attach(self.expert_keywords[index][2], 2, 3, nrows, nrows + 1, 0)
table.show_all()
def expert_keyword_delete(self, button, *args):
index = button.index # which one to kill
for i in [0,1,2]:
self.expert_keywords[index][i].destroy()
table = self.expert_vbox.get_children()[0]
nrows = table.get_property('n-rows')
table.resize(nrows-1, 3)
self.expert_keywords[index][3] = False
| alexei-matveev/ase-local | ase/gui/calculator.py | Python | gpl-2.0 | 84,895 | 0.003028 |
from warnings import warn
warn("IPython.utils.daemonize has moved to ipyparallel.apps.daemonize", stacklevel=2)
from ipyparallel.apps.daemonize import daemonize
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/utils/daemonize.py | Python | bsd-2-clause | 162 | 0.012346 |
# -*- coding: UTF-8 -*-
from freesms import FreeClient
from base import BaseProvider, MissingConfigParameter, ServerError
class FreeProvider(BaseProvider):
"""
This is a provider class for the French telco 'Free'.
>>> f = FreeProvider({'api_id': '12345678', 'api_key':'xyz'})
>>> f.send('Hello, World!')
True
"""
def required_keys(self):
return ['api_id', 'api_key']
def send(self, msg):
params = {
'user': self.params['api_id'],
'passwd': self.params['api_key']
}
f = FreeClient(**params)
res = f.send_sms(msg)
if res.status_code == 200:
return True
if res.status_code == 400:
raise MissingConfigParameter()
if res.status_code == 500:
raise ServerError()
return False
| bfontaine/RemindMe | remindme/providers/free.py | Python | mit | 841 | 0 |
import sys
import xbmc,xbmcaddon,xbmcvfs
import sqlite3
from subprocess import Popen
import datetime,time
channel = sys.argv[1]
start = sys.argv[2]
ADDON = xbmcaddon.Addon(id='script.tvguide.Vader')
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-callable
return time.mktime(ts.timetuple())
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
sqlite3.register_converter('timestamp', convert_datetime)
path = xbmc.translatePath('special://profile/addon_data/script.tvguide.Vader/source.db')
try:
conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
except Exception as detail:
xbmc.log("EXCEPTION: (script.tvguide.Vader) %s" % detail, xbmc.LOGERROR)
c = conn.cursor()
startDate = datetime.datetime.fromtimestamp(float(start))
c.execute('SELECT DISTINCT * FROM programs WHERE channel=? AND start_date = ?', [channel,startDate])
for row in c:
title = row["title"]
endDate = row["end_date"]
duration = endDate - startDate
before = int(ADDON.getSetting('autoplaywiths.before'))
after = int(ADDON.getSetting('autoplaywiths.after'))
extra = (before + after) * 60
#TODO start from now
#seconds = duration.seconds + extra
#if seconds > (3600*4):
seconds = 3600*4
break
# Find the channel's stream url
c.execute('SELECT stream_url FROM custom_stream_url WHERE channel=?', [channel])
row = c.fetchone()
url = ""
if row:
url = row[0]
if not url:
quit()
# Find the actual url used to play the stream
#core = "dummy"
#xbmc.executebuiltin('PlayWith(%s)' % core)
player = xbmc.Player()
player.play(url)
count = 30
url = ""
while count:
count = count - 1
time.sleep(1)
if player.isPlaying():
url = player.getPlayingFile()
break
player.stop()
# Play with your own preferred player and paths
if url:
name = "%s = %s = %s" % (start,channel,title)
name = name.encode("cp1252")
filename = xbmc.translatePath("special://temp/%s.ts" % name)
#filename = "/storage/recordings/%s.ts" % name
ffmpeg = r"c:\utils\ffmpeg.exe"
ffmpeg = r"/usr/bin/ffmpeg"
cmd = [ffmpeg, "-y", "-i", url, "-c", "copy", "-t", str(seconds), filename]
p = Popen(cmd,shell=True)
#p = Popen(cmd,shell=False)
| ledtvavs/repository.ledtv | script.tvguide.Vader/resources/playwith/playwithchannel.py | Python | gpl-3.0 | 2,435 | 0.009035 |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import logging
def setup_logging(name="cct", level=logging.DEBUG):
# create logger
logger = logging.getLogger(name)
logger.handlers = []
logger.setLevel(level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
| goldmann/cct | cct/__init__.py | Python | mit | 696 | 0.001437 |
#!/usr/bin/env python
# Execute with
# $ python webvulnscan/__main__.py (2.6+)
# $ python -m webvulnscan (2.7+)
import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(path)))
import webvulnscan
if __name__ == '__main__':
webvulnscan.main()
| hhucn/webvulnscan | webvulnscan/__main__.py | Python | mit | 428 | 0 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class SyncListTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.create()
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.create()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"lists": [],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0",
"key": "lists",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"lists": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0",
"key": "lists",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.assertIsNotNone(actual)
| twilio/twilio-python | tests/integration/preview/sync/service/test_sync_list.py | Python | mit | 8,183 | 0.003666 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
bibauthorid_tables_utils
Bibauthorid's DB handler
"""
import sys
import re
import random
import bibauthorid_config as bconfig
import bibauthorid_structs as dat
from search_engine import get_record
from bibrank_citation_searcher import get_citation_dict
from dbquery import run_sql
from dbquery import OperationalError, ProgrammingError
from bibauthorid_utils import split_name_parts, create_normalized_name
from bibauthorid_utils import clean_name_string
from bibauthorid_authorname_utils import update_doclist
def get_papers_recently_modified(date=''):
'''
Returns the bibrecs with modification date more recent then date, or all
the bibrecs if no date is specified.
@param date: date
'''
papers = run_sql("select id from bibrec where modification_date > %s",
(str(date),))
if papers:
bibrecs = [i[0] for i in papers]
bibrecs.append(-1)
min_date = run_sql("select max(modification_date) from bibrec where "
"id in %s", (tuple(bibrecs),))
else:
min_date = run_sql("select now()")
return papers, min_date
def populate_authornames_bibrefs_from_authornames():
'''
Populates aidAUTHORNAMESBIBREFS.
For each entry in aidAUTHORNAMES creates a corresponding entry in aidA.B. so it's possible to search
by bibrec/bibref at a reasonable speed as well and not only by name.
'''
nids = run_sql("select id,bibrefs from aidAUTHORNAMES")
for nid in nids:
for bibref in nid[1].split(','):
if bconfig.TABLES_UTILS_DEBUG:
print ('populate_authornames_bibrefs_from_authornames: Adding: '
' %s %s' % (str(nid[0]), str(bibref)))
run_sql("insert into aidAUTHORNAMESBIBREFS (Name_id, bibref) "
"values (%s,%s)", (str(nid[0]), str(bibref)))
def authornames_tables_gc(bunch_size=50):
'''
Performs garbage collecting on the authornames tables.
Potentially really slow.
'''
bunch_start = run_sql("select min(id) from aidAUTHORNAMESBIBREFS")
if len(bunch_start) >= 1:
bunch_start = int(bunch_start[0][0])
else:
return
abfs_ids_bunch = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS limit %s, %s"
, (str(bunch_start - 1), str(bunch_size)))
bunch_start += bunch_size
while len(abfs_ids_bunch) >= 1:
bib100list = []
bib700list = []
for i in abfs_ids_bunch:
if i[2].split(':')[0] == '100':
bib100list.append(i[2].split(':')[1])
elif i[2].split(':')[0] == '700':
bib700list.append(i[2].split(':')[1])
bib100liststr = '( '
for i in bib100list:
bib100liststr += "'" + str(i) + "',"
bib100liststr = bib100liststr[0:len(bib100liststr) - 1] + " )"
bib700liststr = '( '
for i in bib700list:
bib700liststr += "'" + str(i) + "',"
bib700liststr = bib700liststr[0:len(bib700liststr) - 1] + " )"
if len(bib100list) >= 1:
bib10xids = run_sql("select id from bib10x where id in %s"
% bib100liststr)
else:
bib10xids = []
if len(bib700list) >= 1:
bib70xids = run_sql("select id from bib70x where id in %s"
% bib700liststr)
else:
bib70xids = []
bib10xlist = []
bib70xlist = []
for i in bib10xids:
bib10xlist.append(str(i[0]))
for i in bib70xids:
bib70xlist.append(str(i[0]))
bib100junk = set(bib100list).difference(set(bib10xlist))
bib700junk = set(bib700list).difference(set(bib70xlist))
idsdict = {}
for i in abfs_ids_bunch:
idsdict[i[2]] = [i[0], i[1]]
junklist = []
for i in bib100junk:
junklist.append('100:' + i)
for i in bib700junk:
junklist.append('700:' + i)
for junkref in junklist:
try:
id_to_remove = idsdict[junkref]
run_sql("delete from aidAUTHORNAMESBIBREFS where id=%s",
(str(id_to_remove[0]),))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: idAUTHORNAMESBIBREFS deleting row " + str(id_to_remove)
authrow = run_sql("select id,Name,bibrefs,db_name from aidAUTHORNAMES where id=%s", (str(id_to_remove[1]),))
if len(authrow[0][2].split(',')) == 1:
run_sql("delete from aidAUTHORNAMES where id=%s", (str(id_to_remove[1]),))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: aidAUTHORNAMES deleting " + str(authrow)
else:
bibreflist = ''
for ref in authrow[0][2].split(','):
if ref != junkref:
bibreflist += ref + ','
bibreflist = bibreflist[0:len(bibreflist) - 1]
run_sql("update aidAUTHORNAMES set bibrefs=%s where id=%s",
(bibreflist, id_to_remove[1]))
if bconfig.TABLES_UTILS_DEBUG:
print "authornames_tables_gc: aidAUTHORNAMES updating " + str(authrow) + ' with ' + str(bibreflist)
except:
pass
abfs_ids_bunch = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS limit %s,%s" ,
(str(bunch_start - 1), str(bunch_size)))
bunch_start += bunch_size
def update_authornames_tables_from_paper(papers_list=[]):
"""
Updates the authornames tables with the names on the given papers list
@param papers_list: list of the papers which have been updated (bibrecs) ((1,),)
For each paper of the list gathers all names, bibrefs and bibrecs to be added to aidAUTHORNAMES
table, taking care of updating aidA.B. as well
NOTE: update_authornames_tables_from_paper: just to remember: get record would be faster but
we don't have the bibref there,
maybe there is a way to rethink everything not to use bibrefs? How to address
authors then?
"""
def update_authornames_tables(name, bibref):
'''
Update the tables for one bibref,name touple
'''
authornames_row = run_sql("select id,Name,bibrefs,db_name from aidAUTHORNAMES where db_name like %s",
(str(name),))
authornames_bibrefs_row = run_sql("select id,Name_id,bibref from aidAUTHORNAMESBIBREFS "
"where bibref like %s", (str(bibref),))
#@XXX: update_authornames_tables: if i'm not wrong there should always be only one result; will be checked further on
if ((len(authornames_row) > 1) or (len(authornames_bibrefs_row) > 1) or
(len(authornames_row) < len(authornames_bibrefs_row))):
if bconfig.TABLES_UTILS_DEBUG:
print "update_authornames_tables: More then one result or missing authornames?? Something is wrong, not updating" + str(authornames_row) + str(authornames_bibrefs_row)
return
if len(authornames_row) == 1:
# we have an hit for the name string; check if there is the 'new' bibref associated,
# if yes there is nothing to do, otherwise shold add it here and in the ANbibrefs table
if authornames_row[0][2].count(bibref) < 1:
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: Adding new bibref to ' + str(authornames_row) + ' ' + str(name) + ' ' + str(bibref)
run_sql("update aidAUTHORNAMES set bibrefs=%s where id=%s",
(authornames_row[0][2] + ',' + str(bibref), authornames_row[0][0]))
if len(authornames_bibrefs_row) < 1:
# we have to add the bibref to the name, would be strange if it would be already there
run_sql("insert into aidAUTHORNAMESBIBREFS (Name_id,bibref) values (%s,%s)",
(authornames_row[0][0], str(bibref)))
else:
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: Nothing to add to ' + str(authornames_row) + ' ' + str(name) + ' ' + str(bibref)
else:
#@NOTE: update_authornames_tables: we don't have the name string in the db: the name associated to the bibref is changed
# or this is a new name? Experimenting with bibulpload looks like if a name on a paper changes a new bibref is created;
#
if len(authornames_bibrefs_row) == 1:
# If len(authornames_row) is zero but we have a row in authornames_bibrefs_row means that
# the name string is changed, somehow!
# @FIXME: update_authornames_tables: this case should really be considered?
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: The name associated to the bibref is changed?? ' + str(name) + ' ' + str(bibref)
else:
artifact_removal = re.compile("[^a-zA-Z0-9]")
raw_name = artifact_removal.sub("", name)
if len(raw_name) > 1:
dbname = name
else:
dbname = 'Error in name parsing!'
clean_name = create_normalized_name(split_name_parts(name))
authornamesid = run_sql("insert into aidAUTHORNAMES (Name,bibrefs,db_name) values (%s,%s,%s)",
(clean_name, str(bibref), dbname))
run_sql("insert into aidAUTHORNAMESBIBREFS (Name_id,bibref) values (%s,%s)",
(authornamesid, str(bibref)))
if bconfig.TABLES_UTILS_DEBUG:
print 'update_authornames_tables: Created new name ' + str(authornamesid) + ' ' + str(name) + ' ' + str(bibref)
tables = [['bibrec_bib10x', 'bib10x', '100__a', '100'], ['bibrec_bib70x', 'bib70x', '700__a', '700']]
for paper in papers_list:
for table in tables:
sqlstr = "select id_bibxxx from %s where id_bibrec=" % table[0]
bibrefs = run_sql(sqlstr+"%s", (str(paper[0]),))
for ref in bibrefs:
sqlstr = "select value from %s where tag='%s' and id=" % (table[1], table[2])
name = run_sql(sqlstr+"%s", (str(ref[0]),))
if len(name) >= 1:
update_authornames_tables(name[0][0], table[3] + ':' + str(ref[0]))
def populate_authornames():
"""
Author names table population from bib10x and bib70x
Average Runtime: 376.61 sec (6.27 min) for 327k entries
Should be called only with empty table, then use
update_authornames_tables_from_paper with the new papers which
are coming in or modified.
"""
max_rows_per_run = bconfig.TABLE_POPULATION_BUNCH_SIZE
if max_rows_per_run == -1:
max_rows_per_run = 5000
max100 = run_sql("SELECT COUNT(id) FROM bib10x WHERE tag = '100__a'")
max700 = run_sql("SELECT COUNT(id) FROM bib70x WHERE tag = '700__a'")
tables = "bib10x", "bib70x"
authornames_is_empty_checked = 0
authornames_is_empty = 1
# Bring author names from bib10x and bib70x to authornames table
for table in tables:
if table == "bib10x":
table_number = "100"
else:
table_number = "700"
querylimiter_start = 0
querylimiter_max = eval('max' + str(table_number) + '[0][0]')
if bconfig.TABLES_UTILS_DEBUG:
print "\nProcessing %s (%s entries):" % (table, querylimiter_max)
sys.stdout.write("0% ")
sys.stdout.flush()
while querylimiter_start <= querylimiter_max:
sys.stdout.write(".")
sys.stdout.flush()
percentage = int(((querylimiter_start + max_rows_per_run) * 100)
/ querylimiter_max)
sys.stdout.write(".%s%%." % (percentage))
sys.stdout.flush()
# Query the Database for a list of authors from the correspondent
# tables--several thousands at a time
bib = run_sql("SELECT id, value FROM %s WHERE tag = '%s__a' "
"LIMIT %s, %s" % (table, table_number,
querylimiter_start, max_rows_per_run))
authorexists = None
querylimiter_start += max_rows_per_run
for i in bib:
# For mental sanity, exclude things that are not names...
# Yes, I know that there are strange names out there!
# Yes, I read the 40 misconceptions about names.
# Yes, I know!
# However, these statistical outlaws are harmful.
artifact_removal = re.compile("[^a-zA-Z0-9]")
authorname = ""
if not i[1]:
continue
raw_name = artifact_removal.sub("", i[1])
if len(raw_name) > 1:
authorname = i[1]
if not authorname:
continue
if not authornames_is_empty_checked:
authornames_is_empty = run_sql("SELECT COUNT(id) "
"FROM aidAUTHORNAMES")
if authornames_is_empty[0][0] == 0:
authornames_is_empty_checked = 1
authornames_is_empty = 1
if not authornames_is_empty:
# Find duplicates in the database and append id if
# duplicate is found
authorexists = run_sql("SELECT id,Name,bibrefs,db_name FROM aidAUTHORNAMES "
"WHERE db_name = %s", (authorname,))
bibrefs = "%s:%s" % (table_number, i[0])
if not authorexists:
insert_name = ""
if len(authorname) > 240:
bconfig.LOGGER.warn("\nName too long, truncated to 240"
" chars: %s" % (authorname))
insert_name = authorname[0:254]
else:
insert_name = authorname
run_sql("INSERT INTO aidAUTHORNAMES VALUES"
" (NULL, %s, %s, %s)",
(create_normalized_name(
split_name_parts(insert_name)),
bibrefs, insert_name))
if authornames_is_empty:
authornames_is_empty = 0
else:
if authorexists[0][2].count(bibrefs) >= 0:
upd_bibrefs = "%s,%s" % (authorexists[0][2], bibrefs)
run_sql("UPDATE aidAUTHORNAMES SET bibrefs = "
"%s WHERE id = %s",
(upd_bibrefs, authorexists[0][0]))
if bconfig.TABLES_UTILS_DEBUG:
sys.stdout.write(" Done.")
sys.stdout.flush()
def get_diff_marc10x70x_to_anames():
'''
Determines the difference between the union of bib10x and bib70x and the
aidAUTHORNAMES table.
It will return the entries which are present in bib10x and bib70x but not
in aidAUTHORNAMES. Meant to be run periodically.
@todo: get_diff_marc10x70x_to_anames: find meaningful use for the
returned results.
@return: a list of the author names not contained in the authornames table
@rtype: list
'''
run_sql("DROP VIEW authors")
run_sql("create view authors AS \
(SELECT value FROM bib10x WHERE tag =\"100__a\") \
UNION \
(SELECT value FROM bib70x WHERE tag =\"700__a\")")
diff = run_sql("SELECT value from authors LEFT JOIN aidAUTHORNAMES as b"
+ " ON (authors.value = b.Name) WHERE b.name IS NULL")
return diff
def populate_doclist_for_author_surname(surname):
"""
Searches for all the documents containing a given surname and processes
them: creates the virtual author for each author on a document.
@param surname: The search is based on this last name.
@type surname: string
"""
if not dat.CITES_DICT:
cites = get_citation_dict("citationdict")
for key in cites:
dat.CITES_DICT[key] = cites[key]
if not dat.CITED_BY_DICT:
cited_by = get_citation_dict("reversedict")
for key in cited_by:
dat.CITED_BY_DICT[key] = cited_by[key]
bconfig.LOGGER.log(25, "Populating document list for %s" % (surname))
init_authornames(surname)
authors = [row for row in dat.AUTHOR_NAMES if not row['processed']]
for author in authors:
marc_100 = []
marc_700 = []
temp_marc = author['bibrefs'].split(',')
for j in temp_marc:
marcfield, internalid = j.split(':')
if marcfield == '100':
marc_100.append(internalid)
elif marcfield == '700':
marc_700.append(internalid)
else:
bconfig.LOGGER.error("Wrong MARC field. How did you do"
" that?!--This should never happen! boo!")
bibrecs = []
if marc_100:
bibrecs_100 = run_sql("SELECT id_bibrec FROM bibrec_bib10x"
+ " WHERE id_bibxxx = %s"
% (" OR id_bibxxx = ".join(marc_100)))
for j in bibrecs_100:
bibrecs.append(j[0])
if marc_700:
bibrecs_700 = run_sql("SELECT id_bibrec FROM bibrec_bib70x"
+ " WHERE id_bibxxx = %s"
% (" OR id_bibxxx = ".join(marc_700)))
for j in bibrecs_700:
bibrecs.append(j[0])
if load_records_to_mem_cache(bibrecs):
for bibrec in bibrecs:
update_doclist(bibrec, authorname_id=author['id'])
def load_records_to_mem_cache(bibrec_ids):
'''
Loads all the records specified in the list into the memory storage
facility. It will try to attach citation information to each record in
the process.
@param bibrec_ids: list of bibrec IDs to load to memory
@type bibrec_ids: list
@return: Success (True) or failure (False) of the process
@rtype: boolean
'''
if not bibrec_ids:
return False
for bibrec in bibrec_ids:
if not bibrec in dat.RELEVANT_RECORDS:
rec = get_record(bibrec)
if bconfig.LIMIT_AUTHORS_PER_DOCUMENT:
is_collaboration = False
authors = 0
try:
for field in rec['710'][0][0]:
if field[0] == 'g':
is_collaboration = True
break
except KeyError:
pass
if is_collaboration:
# If experimentalists shall be excluded uncomment
# the following line
#continue
pass
else:
try:
for field in rec['100'][0][0]:
if field[0] == 'a':
authors += 1
break
except KeyError:
pass
try:
for coauthor in rec['700']:
if coauthor[0][0][0] == 'a':
authors += 1
except KeyError:
pass
if authors > bconfig.MAX_AUTHORS_PER_DOCUMENT:
continue
dat.RELEVANT_RECORDS[bibrec] = rec
cites = []
cited_by = []
try:
cites = dat.CITES_DICT[bibrec]
except KeyError:
pass
try:
cited_by = dat.CITED_BY_DICT[bibrec]
except KeyError:
pass
dat.RELEVANT_RECORDS[bibrec]['cites'] = cites
dat.RELEVANT_RECORDS[bibrec]['cited_by'] = cited_by
return True
def init_authornames(surname):
'''
Initializes the AUTHOR_NAMES memory storage
@param surname: The surname to search for
@type surname: string
'''
if len(dat.AUTHOR_NAMES) > 0:
existing = [row for row in dat.AUTHOR_NAMES
if row['name'].split(",")[0] == surname]
if existing:
bconfig.LOGGER.log(25, "AUTHOR_NAMES already holds the "
"correct data.")
else:
bconfig.LOGGER.debug("AUTHOR_NAMES will have additional content")
for updated in [row for row in dat.AUTHOR_NAMES
if not row['processed']]:
updated['processed'] = True
_perform_authornames_init(surname)
else:
_perform_authornames_init(surname)
def _perform_authornames_init(surname):
'''
Performs the actual AUTHOR_NAMES memory storage init by reading values
from the database
@param surname: The surname to search for
@type surname: string
'''
# instead of replacing with ' ', this will construct the regex for the
# SQL query as well as the next if statement.
surname = clean_name_string(surname,
replacement=".{0,3}",
keep_whitespace=False)
if not surname.startswith(".{0,3}"):
surname = "^['`-]*%s" % (surname)
sql_query = ("SELECT id, name, bibrefs, db_name "
"FROM aidAUTHORNAMES WHERE name REGEXP \"%s\""
% (surname))
for author in run_sql(sql_query):
dat.AUTHOR_NAMES.append({'id': author[0],
'name': author[1],
'bibrefs': author[2],
'db_name': author[3],
'processed': False})
def find_all_last_names():
'''
Filters out all last names from all names in the database.
@return: a list of last names
@rtype: list of strings
'''
all_names = run_sql("SELECT Name FROM aidAUTHORNAMES")
last_names = set()
for name in all_names:
last_name = split_name_parts(name[0])[0]
# For mental sanity, exclude things that are not names...
# - Single letter names
# - Single number names
# - Names containing only numbers and/or symbols
# Yes, I know that there are strange names out there!
# Yes, I read the 40 misconceptions about names.
# Yes, I know!
# However, these statistical outlaws are harmful to the data set.
artifact_removal = re.compile("[^a-zA-Z0-9]")
last_name_test = artifact_removal.sub("", last_name)
if len(last_name_test) > 1:
last_names.add("%s," % (last_name,))
# for name in all_names:
# last_names.add([split_name_parts(name[0]), name[0]])
return list(last_names)
def write_mem_cache_to_tables(sanity_checks=False):
'''
Reads every memory cache and writes its contents to the appropriate
table in the database.
@param sanity_checks: Perform sanity checks before inserting (i.e. is the
data already present in the db?) and after the insertion (i.e. is the
data entered correctly?)
@type sanity_checks: boolean
'''
ra_id_offset = run_sql("SELECT max(realauthorID) FROM"
+ " aidREALAUTHORS")[0][0]
va_id_offset = run_sql("SELECT max(virtualauthorID) FROM"
+ " aidVIRTUALAUTHORS")[0][0]
cluster_id_offset = run_sql("SELECT max(id) FROM"
" aidVIRTUALAUTHORSCLUSTERS")[0][0]
if not ra_id_offset:
ra_id_offset = 0
if not va_id_offset:
va_id_offset = 0
if not cluster_id_offset:
cluster_id_offset = 0
max_va_id = dat.ID_TRACKER["va_id_counter"]
if max_va_id <= 1:
max_va_id = 2
random_va_id = random.randint(1, max_va_id - 1)
va_mem_data = [row['value'] for row in dat.VIRTUALAUTHOR_DATA
if (row["virtualauthorid"] == random_va_id
and row['tag'] == "orig_authorname_id")][0]
if sanity_checks:
if va_mem_data:
check_on_va = run_sql("SELECT id,virtualauthorID,tag,value FROM aidVIRTUALAUTHORSDATA "
"WHERE tag='orig_authorname_id' AND "
"value=%s" , (va_mem_data,))
if check_on_va:
bconfig.LOGGER.error("Sanity check reported that the data "
"exists. We'll skip this record for now. "
"Please check the data set manually.")
return False
bconfig.LOGGER.log(25, "Writing to persistence layer")
bconfig.LOGGER.log(25, "Offsets...RA: %s; VA: %s; CL: %s" % (ra_id_offset,
va_id_offset,
cluster_id_offset))
batch_max = bconfig.TABLE_POPULATION_BUNCH_SIZE
query = []
for va_cluster in dat.VIRTUALAUTHOR_CLUSTERS:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return False
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSCLUSTERS (cluster_name) "
"VALUES (\"%s\"); "
% (va_cluster['clustername'],))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return False
query = []
for va_data in dat.VIRTUALAUTHOR_DATA:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return False
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSDATA "
"(virtualauthorID, tag, value) VALUES "
"(%d, \"%s\", \"%s\"); "
% (va_data['virtualauthorid'] + va_id_offset,
va_data['tag'], va_data['value']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return False
query = []
for va_entry in dat.VIRTUALAUTHORS:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return False
query = []
query.append("INSERT INTO aidVIRTUALAUTHORS "
"(virtualauthorID, authornamesID, p, clusterID) VALUES "
"(%d, %d, \"%s\", %d); "
% (va_entry['virtualauthorid'] + va_id_offset,
va_entry['authornamesid'], va_entry['p'],
va_entry['clusterid'] + cluster_id_offset))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return False
query = []
for ra_data in dat.REALAUTHOR_DATA:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return False
query = []
if not ra_data['tag'] == 'outgoing_citation':
query.append("INSERT INTO aidREALAUTHORDATA "
"(realauthorID, tag, value, va_count, "
"va_names_p, va_p) VALUES "
"(%d, \"%s\", \"%s\", %d, "
"%f, %f); "
% (ra_data['realauthorid'] + ra_id_offset,
ra_data['tag'], ra_data['value'],
ra_data['va_count'], ra_data['va_np'],
ra_data['va_p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return False
query = []
for ra_entry in dat.REALAUTHORS:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return False
query = []
query.append("INSERT INTO aidREALAUTHORS "
"(realauthorID, virtualauthorID, p) VALUES "
"(%d, %d, %f); "
% (ra_entry['realauthorid'] + ra_id_offset,
ra_entry['virtualauthorid'] + va_id_offset,
ra_entry['p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return False
query = []
for doc in dat.DOC_LIST:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return False
query = []
for processed_author in doc['authornameids']:
query.append("INSERT INTO aidDOCLIST "
"(bibrecID, processed_author) VALUES "
"(%d, %d); "
% (doc['bibrecid'], processed_author))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return False
query = []
if sanity_checks:
if va_mem_data:
check_on_va = run_sql("SELECT id,virtualauthorID,tag,value FROM aidVIRTUALAUTHORSDATA "
"WHERE tag='orig_authorname_id' AND "
"value=%s" , (va_mem_data,))
if not check_on_va:
bconfig.LOGGER.error("Sanity check reported that no data "
" exists in the database after writing "
" to it.")
return False
bconfig.LOGGER.log(25, "Everything is now written to the database. "
"Thanks. Bye.")
return True
def get_existing_last_names():
'''
Find all authors that have been processed and written to the database.
Extract all last names from this list and return these last names.
Especially helpful to exclude these clusters (last names) from a run.
@return: list of last names
@rtype: list of strings
'''
bconfig.LOGGER.log(25, "Reading info about existing authors from database")
db_lnames = set()
db_names = run_sql("select value from aidVIRTUALAUTHORSDATA where"
+ " tag='orig_name_string'")
for i in db_names:
db_lnames.add(i[0].split(',')[0])
return list(db_lnames)
def get_len_authornames_bibrefs():
'''
Reads the lengths of authornames and bibrefs.
Used to determine if esstential tables already exist.
@return: dict({'names': -1, 'bibrefs': -1})
@rtype: dict
'''
lengths = {'names':-1,
'bibrefs':-1}
if check_and_create_aid_tables():
authornames_len = run_sql("SELECT count(id) from aidAUTHORNAMES")
bibrefs_len = run_sql("SELECT count(id) from aidAUTHORNAMESBIBREFS")
try:
lengths['names'] = int(authornames_len[0][0])
lengths['bibrefs'] = int(bibrefs_len[0][0])
except (ValueError, TypeError):
lengths['names'] = -1
lengths['bibrefs'] = -1
return lengths
def check_and_create_aid_tables():
'''
Checks if the database tables for Bibauthorid exist. If not, creates them
@return: True if tables exist, False if there was an error
@rtype: boolean
'''
try:
if not run_sql("show tables like 'aidAUTHORNAMES';"):
return False
except (ProgrammingError, OperationalError):
return False
return True
def load_mem_cache_from_tables():
'''
Loads database content for an author's last name cluster into the
memory storage facility.
@precondition: memory storage facility needs to be loaded with respective
authornames data (init_authornames(lastname))
@return: Success (True) or failure (False) of the loading process
@rtype: boolean
'''
# print "check for authornames mem table"
if not dat.AUTHOR_NAMES:
return False
authornames_ids = [row['id'] for row in dat.AUTHOR_NAMES]
if not authornames_ids:
return False
# print "Building offsets"
ra_id_offset = run_sql("SELECT max(realauthorID) FROM"
" aidREALAUTHORS")[0][0]
va_id_offset = run_sql("SELECT max(virtualauthorID) FROM"
" aidVIRTUALAUTHORS")[0][0]
cluster_id_offset = run_sql("SELECT max(id) FROM"
" aidVIRTUALAUTHORSCLUSTERS")[0][0]
dat.set_tracker("raid_counter", ra_id_offset + 1)
dat.set_tracker("va_id_counter", va_id_offset + 1)
dat.set_tracker("cluster_id", cluster_id_offset + 1)
# print "working on authornames ids..."
for authornames_id in authornames_ids:
db_vas = run_sql("SELECT virtualauthorid, authornamesid, p, clusterid "
"from aidVIRTUALAUTHORS WHERE authornamesid = %s",
(authornames_id,))
# print "loading VAs for authid %s" % authornames_id
db_vas_set = set([row[0] for row in db_vas])
if not db_vas_set:
db_vas_set = (-1, -1)
else:
db_vas_set.add(-1)
db_vas_tuple = tuple(db_vas_set)
db_ras = run_sql("SELECT realauthorid FROM "
"aidREALAUTHORS WHERE virtualauthorid in %s"
, (tuple(db_vas_tuple),))
if db_ras:
db_ras_set = set([row[0] for row in db_ras])
db_ras_set.add(-1)
db_ras_tuple = tuple(db_ras_set)
db_ra_vas = run_sql("SELECT virtualauthorid FROM aidREALAUTHORS "
"WHERE realauthorid in %s", (db_ras_tuple,))
db_ra_vas_set = set([row[0] for row in db_ra_vas])
db_ra_vas_set.add(-1)
db_ras_tuple = tuple(db_ra_vas_set)
db_vas_all = run_sql("SELECT virtualauthorid, authornamesid, p, "
"clusterid FROM aidVIRTUALAUTHORS WHERE "
"virtualauthorid in %s",
(db_ras_tuple,))
else:
db_vas_all = db_vas
for db_va in db_vas_all:
dat.VIRTUALAUTHORS.append({'virtualauthorid': db_va[0],
'authornamesid': db_va[1],
'p': db_va[2],
'clusterid': db_va[3]})
if not dat.VIRTUALAUTHORS:
# print "No Virtual Authors loaded. None created before."
return True
# print "Loading clusters"
cluster_ids = set([row['clusterid'] for row in dat.VIRTUALAUTHORS])
if not cluster_ids:
cluster_ids = (-1, -1)
else:
cluster_ids.add(-1)
db_va_clusters = run_sql("SELECT id, cluster_name FROM "
"aidVIRTUALAUTHORSCLUSTERS WHERE id in %s"
, (tuple(cluster_ids),))
# print "Storing clusters"
for db_va_cluster in db_va_clusters:
dat.VIRTUALAUTHOR_CLUSTERS.append({'clusterid': db_va_cluster[0],
'clustername': db_va_cluster[1]})
# print "Loading VA data"
va_ids = set([row['virtualauthorid'] for row in dat.VIRTUALAUTHORS])
if not va_ids:
va_ids = (-1, -1)
else:
va_ids.add(-1)
# print "Storing VA data"
db_va_data = run_sql("SELECT virtualauthorid, tag, value FROM "
"aidVIRTUALAUTHORSDATA WHERE virtualauthorid in %s"
, (tuple(va_ids),))
for db_va_dat in db_va_data:
dat.VIRTUALAUTHOR_DATA.append({'virtualauthorid' : db_va_dat[0],
'tag': db_va_dat[1],
'value': db_va_dat[2]})
# print "Loading RAs"
db_ras = run_sql("SELECT realauthorid, virtualauthorid, p FROM "
"aidREALAUTHORS WHERE virtualauthorid in %s"
, (tuple(va_ids),))
# print "Storing RAs"
for db_ra in db_ras:
dat.REALAUTHORS.append({'realauthorid': db_ra[0],
'virtualauthorid': db_ra[1],
'p': db_ra[2]})
# print "Loading RA data"
ra_ids = set([row['realauthorid'] for row in dat.REALAUTHORS])
if not ra_ids:
ra_ids = (-1, -1)
else:
ra_ids.add(-1)
db_ra_data = run_sql("SELECT realauthorid, tag, value, va_count, "
"va_names_p, va_p FROM aidREALAUTHORDATA WHERE "
"realauthorid in %s", (tuple(ra_ids),))
# print "Storing RA data"
for db_ra_dat in db_ra_data:
dat.REALAUTHOR_DATA.append({'realauthorid': db_ra_dat[0],
'tag': db_ra_dat[1],
'value': db_ra_dat[2],
'va_count': db_ra_dat[3],
'va_np': db_ra_dat[4],
'va_p': db_ra_dat[5]})
# print "Loading doclist entries"
bibrec_ids = set([int(row['value']) for row in dat.REALAUTHOR_DATA
if row['tag'] == "bibrec_id"])
if not bibrec_ids:
bibrec_ids = (-1, -1)
else:
bibrec_ids.add(-1)
db_doclist = run_sql("SELECT bibrecid, processed_author FROM aidDOCLIST "
"WHERE bibrecid in %s", (tuple(bibrec_ids),))
# print "Storing doclist entries"
for db_doc in db_doclist:
existing_item = [row for row in dat.DOC_LIST
if row['bibrecid'] == db_doc[0]]
if existing_item:
for update in [row for row in dat.DOC_LIST
if row['bibrecid'] == db_doc[0]]:
if not db_doc[1] in update['authornameids']:
update['authornameids'].append(db_doc[1])
else:
dat.DOC_LIST.append({'bibrecid': db_doc[0],
'authornameids': [db_doc[1]]})
if set(bibrec_ids).remove(-1):
# print "will load recs"
if not load_records_to_mem_cache(list(bibrec_ids)):
# print" FAILED loading records"
return False
return True
def update_tables_from_mem_cache(sanity_checks=False, return_ra_updates=False):
'''
Updates the tables in the database with the information in the memory
storage while taking into account only changed data to optimize the time
needed for the update.
@param sanity_checks: Perform sanity checks while updating--slows down the
process but might detect mistakes and prevent harm. Default: False
@type sanity_checks: boolean
@param return_ra_updates: Will force the method to return a list of real
author ids that have been updated. Default: False
@type return_ra_updates: boolean
@return: Either True if update went through without trouble or False if it
did not and a list of updated real authors or an empty list
@rtype: tuple of (boolean, list)
'''
del_ra_ids = set([-1])
del_va_ids = dat.UPDATES_LOG['deleted_vas'].union(
dat.UPDATES_LOG['touched_vas'])
if del_va_ids:
del_va_ids.add(-1)
del_ra_ids_db = run_sql("SELECT realauthorid FROM aidREALAUTHORS "
"WHERE virtualauthorid in %s"
, (tuple(del_va_ids),))
for ra_id in del_ra_ids_db:
del_ra_ids.add(ra_id[0])
if sanity_checks:
va_count_db = run_sql("SELECT COUNT(DISTINCT virtualauthorid) "
"FROM aidVIRTUALAUTHORS WHERE "
"virtualauthorid in %s"
, (tuple(del_va_ids),))
try:
va_count_db = int(va_count_db[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"virtual authors in database")
va_count_db = -1
if not (va_count_db == len(del_va_ids)):
bconfig.LOGGER.error("Sanity checks reported that the number "
"of virtual authors in the memory "
"storage is not equal to the number of "
"virtual authors in the database. "
"Aborting update mission.")
return (False, [])
bconfig.LOGGER.log(25, "Removing updated entries from "
"persistence layer")
run_sql("DELETE FROM aidVIRTUALAUTHORSDATA "
"WHERE virtualauthorid in %s", (tuple(del_va_ids),))
run_sql("DELETE FROM aidVIRTUALAUTHORS "
"WHERE virtualauthorid in %s", (tuple(del_va_ids),))
if len(tuple(del_ra_ids)) > 1:
run_sql("DELETE FROM aidREALAUTHORDATA "
"WHERE realauthorid in %s", (tuple(del_ra_ids),))
run_sql("DELETE FROM aidREALAUTHORS "
"WHERE realauthorid in %s", (tuple(del_ra_ids),))
insert_ra_ids = dat.UPDATES_LOG['new_ras'].union(del_ra_ids)
insert_va_ids = dat.UPDATES_LOG['new_vas'].union(
dat.UPDATES_LOG['touched_vas'])
bconfig.LOGGER.log(25, "Writing to persistence layer")
batch_max = bconfig.TABLE_POPULATION_BUNCH_SIZE
ra_id_db_max = run_sql("SELECT max(realauthorID) FROM"
" aidREALAUTHORS")[0][0]
va_id_db_max = run_sql("SELECT max(virtualauthorID) FROM"
" aidVIRTUALAUTHORS")[0][0]
cluster_id_db_max = run_sql("SELECT max(id) FROM"
" aidVIRTUALAUTHORSCLUSTERS")[0][0]
if not ra_id_db_max or not va_id_db_max or not cluster_id_db_max:
return (False, [])
new_clusters = [row for row in dat.VIRTUALAUTHOR_CLUSTERS
if row['clusterid'] > cluster_id_db_max]
query = []
if not insert_ra_ids or not insert_va_ids:
bconfig.LOGGER.log(25, "Saving update to persistence layer finished "
"with success! (There was nothing to do.)")
return (True, [])
for va_cluster in new_clusters:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return (False, [])
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSCLUSTERS (cluster_name) "
"VALUES (\"%s\"); "
% (va_cluster['clustername'],))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"cluster table failed")
return (False, [])
query = []
va_data_to_insert = [row for row in dat.VIRTUALAUTHOR_DATA
if row['virtualauthorid'] in insert_va_ids]
if sanity_checks:
db_existing_va_ids = run_sql("SELECT COUNT(DISTINCT virtualauthorid) "
"WHERE virtualauthorid in %s"
, (tuple(insert_va_ids),))
try:
db_existing_va_ids = int(va_count_db[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"virtual authors in database")
db_existing_va_ids = -1
if not (db_existing_va_ids == 0):
bconfig.LOGGER.error("Sanity checks reported that the "
"virtual authors in the memory storage "
"that shall be inserted already exist "
"in the database. Aborting update mission.")
return (False, [])
for va_data in va_data_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return (False, [])
query = []
query.append("INSERT INTO aidVIRTUALAUTHORSDATA "
"(virtualauthorID, tag, value) VALUES "
"(%d, \"%s\", \"%s\"); "
% (va_data['virtualauthorid'],
va_data['tag'], va_data['value']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"data table failed")
return (False, [])
query = []
vas_to_insert = [row for row in dat.VIRTUALAUTHORS
if row['virtualauthorid'] in insert_va_ids]
for va_entry in vas_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return (False, [])
query = []
query.append("INSERT INTO aidVIRTUALAUTHORS "
"(virtualauthorID, authornamesID, p, clusterID) VALUES "
"(%d, %d, \"%s\", %d); "
% (va_entry['virtualauthorid'],
va_entry['authornamesid'], va_entry['p'],
va_entry['clusterid']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into virtual author "
"table failed")
return (False, [])
query = []
if sanity_checks:
db_existing_ra_ids = run_sql("SELECT COUNT(DISTINCT realauthorid) "
"WHERE realauthorid in %s"
, (tuple(insert_ra_ids),))
try:
db_existing_ra_ids = int(db_existing_ra_ids[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"real authors in database")
db_existing_va_ids = -1
if not (db_existing_ra_ids == 0):
bconfig.LOGGER.error("Sanity checks reported that the "
"real authors in the memory storage "
"that shall be inserted already exist "
"in the database. Aborting update mission.")
return (False, [])
ra_data_to_insert = [row for row in dat.REALAUTHOR_DATA
if row['realauthorid'] in insert_ra_ids]
for ra_data in ra_data_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return (False, [])
query = []
if not ra_data['tag'] == 'outgoing_citation':
query.append("INSERT INTO aidREALAUTHORDATA "
"(realauthorID, tag, value, va_count, "
"va_names_p, va_p) VALUES "
"(%d, \"%s\", \"%s\", %d, "
"%f, %f); "
% (ra_data['realauthorid'],
ra_data['tag'], ra_data['value'],
ra_data['va_count'], ra_data['va_np'],
ra_data['va_p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"data table failed")
return (False, [])
query = []
ras_to_insert = [row for row in dat.REALAUTHORS
if row['realauthorid'] in insert_ra_ids]
for ra_entry in ras_to_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return (False, [])
query = []
query.append("INSERT INTO aidREALAUTHORS "
"(realauthorID, virtualauthorID, p) VALUES "
"(%d, %d, %f); "
% (ra_entry['realauthorid'],
ra_entry['virtualauthorid'],
ra_entry['p']))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into real author "
"table failed")
return (False, [])
query = []
if sanity_checks:
db_existing_ra_ids = run_sql("SELECT COUNT(DISTINCT realauthorid) "
"WHERE realauthorid in %s"
, (tuple(insert_ra_ids),))
try:
db_existing_ra_ids = int(db_existing_ra_ids[0][0])
except (ValueError, IndexError, TypeError):
bconfig.LOGGER.exception("Error while reading number of "
"real authors in database")
db_existing_ra_ids = -1
if not (db_existing_ra_ids == len(insert_ra_ids)):
bconfig.LOGGER.error("Sanity checks reported that the number of"
"real authors in the memory storage "
"that shall be inserted is not equal to "
"the number of real authors now "
"in the database. Aborting update mission.")
return (False, [])
recid_updates = dat.UPDATES_LOG["rec_updates"]
if recid_updates:
recid_updates.add(-1)
run_sql("DELETE FROM aidDOCLIST WHERE bibrecid in %s"
, (tuple(recid_updates),))
doclist_insert = [row for row in dat.DOC_LIST
if row['bibrecid'] in dat.UPDATES_LOG["rec_updates"]]
for doc in doclist_insert:
if len(query) >= batch_max:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return (False, [])
query = []
for processed_author in doc['authornameids']:
query.append("INSERT INTO aidDOCLIST "
"(bibrecID, processed_author) VALUES "
"(%d, %d); "
% (doc['bibrecid'], processed_author))
if query:
try:
run_sql(''.join(query))
except:
bconfig.LOGGER.critical("Inserting into doc list "
"table failed")
return (False, [])
query = []
bconfig.LOGGER.log(25, "Saving update to persistence layer finished "
"with success!")
if return_ra_updates:
ra_ids = [[row['realauthorid']] for row in ras_to_insert]
return (True, ra_ids)
else:
return (True, [])
| valkyriesavage/invenio | modules/bibauthorid/lib/bibauthorid_tables_utils.py | Python | gpl-2.0 | 54,551 | 0.00275 |
import random
import time
from dot3k.menu import MenuOption
class Debris(MenuOption):
def __init__(self, backlight=None):
if backlight is None:
import dot3k.backlight
self.backlight = dot3k.backlight
else:
self.backlight = backlight
self.debug = False
self.star_seed = 'thestarsmydestination'
self.debris_seed = 'piratemonkeyrobotninja'
self.debris = []
self.stars = []
self.running = False
self.max_debris = 10
self.max_stars = 10
self.last_update = 0
self.time_start = 0
self.sprites = [
[14, 31, 31, 14, 0, 0, 0, 0], # 0: Debris top of char
[0, 0, 0, 0, 14, 31, 31, 14], # 1: Debris bottom of char
[30, 5, 7, 30, 0, 0, 0, 0], # 2: Ship top of char
[0, 0, 0, 0, 30, 5, 7, 30], # 3: Ship bottom of char
[30, 5, 7, 30, 14, 31, 31, 14], # 4: Ship above debris
[14, 31, 31, 14, 30, 5, 7, 30], # 5: Ship below debris
[0, 14, 31, 31, 31, 31, 14, 0] # 6: Giant debris
]
self.width = 16
self.height = 5 # Two rows per char
self.player_x = 1 # Player horizontal position
self.player_y = 3 # Player vertical position
self.current_player_x = None
self.current_player_y = None
self.current_player_pos = None
self.fill_debris()
MenuOption.__init__(self)
def begin(self):
self.running = False
self.reset()
self.backlight.hue(0.0)
def reset(self):
self.player_x = 1
self.player_y = 3
self.fill_debris()
self.fill_stars()
self.running = True
self.time_start = 0
self.last_update = 0
def fill_stars(self):
random.seed(self.star_seed)
self.stars = []
while len(self.stars) < self.max_stars:
new = (random.randint(0, 15), random.randint(0, 2))
if not new in self.stars:
self.stars.append(new)
def fill_debris(self):
random.seed(self.debris_seed)
self.debris = []
while len(self.debris) < self.max_debris:
new = (random.randint(5, 15), random.randint(0, self.height))
if not new in self.debris:
self.debris.append(new)
print(self.debris)
def left(self):
if not self.running:
r = int(self.get_option('Backlight', 'r'))
g = int(self.get_option('Backlight', 'g'))
b = int(self.get_option('Backlight', 'b'))
self.backlight.rgb(r, g, b)
return False
self.player_x -= 1
if self.player_x < 0:
self.player_x = 0
return True
def right(self):
if not self.running:
self.reset()
return True
self.player_x += 1
if self.player_x > 15:
self.player_x = 15
return True
def up(self):
self.player_y -= 1
if self.player_y < 0:
self.player_y = 0
if self.debug:
print("Player up", self.player_y)
return True
def down(self):
self.player_y += 1
if self.player_y > self.height:
self.player_y = self.height - 1
if self.debug:
print("Player down", self.player_y)
return True
def update(self, menu):
if self.time_start == 0:
for idx, sprite in enumerate(self.sprites):
menu.lcd.create_char(idx, sprite)
menu.clear_row(0)
menu.clear_row(1)
menu.clear_row(2)
for x in range(3):
menu.lcd.set_cursor_position(5, 1)
menu.lcd.write(' 0' + str(3 - x) + '! ')
time.sleep(0.5)
self.backlight.hue(0.5)
self.time_start = self.millis()
# Move all stars left
for idx, star in enumerate(self.stars):
self.stars[idx] = (star[0] - 0.5, star[1])
# Move all debris left 1 place
for idx, rock in enumerate(self.debris):
self.debris[idx] = (rock[0] - 1, rock[1])
debris_x = int(rock[0])
debris_y = int(rock[1])
if debris_x < 0:
continue
if debris_x == self.player_x and debris_y == self.player_y:
# Boom!
menu.lcd.set_cursor_position(5, 1)
menu.lcd.write(' BOOM!')
if self.debug:
print(debris_x, debris_y)
print(self.player_x,
self.player_y)
exit()
self.running = False
return False
# Remove off-screen debris
self.debris = list(filter(lambda x: x[0] > -1, self.debris))
# Remove off-screen stars
self.stars = list(filter(lambda x: x[0] > -1, self.stars))
# Create new debris to replace the removed ones
while len(self.debris) < self.max_debris:
self.debris.append((15, random.randint(0, self.height)))
while len(self.stars) < self.max_stars:
self.stars.append((15, random.randint(0, 2)))
return True
def redraw(self, menu):
if not self.running:
return False
if self.millis() - self.last_update >= 250:
if not self.update(menu):
return False
self.last_update = self.millis()
game_time = str(int((self.millis() - self.time_start) / 1000)).zfill(3)
self.backlight.sweep(((self.millis() - self.time_start) / 500 % 360) / 359.0)
buffer = []
for i in range(3):
buffer.append([' '] * 16)
for idx, rock in enumerate(self.stars):
buffer[rock[1]][int(rock[0])] = '.'
player_v = (self.player_y % 2)
buffer[int(self.player_y / 2)][self.player_x] = chr(2 + player_v)
for idx, rock in enumerate(self.debris):
debris_x = int(rock[0])
debris_y = int(rock[1])
debris_v = (debris_y % 2)
debris_sprite = debris_v
if int(debris_y / 2) == int(self.player_y / 2) and debris_x == self.player_x and debris_v != player_v:
debris_sprite = 4 + player_v
current = buffer[int(debris_y / 2)][debris_x]
if current == chr(0) or current == chr(1):
debris_sprite = 6 # Giant Debris!
buffer[int(debris_y / 2)][debris_x] = chr(debris_sprite)
# Draw elapsed seconds
buffer[0][16 - len(game_time):len(game_time)] = game_time
for idx, row in enumerate(buffer):
menu.write_row(idx, ''.join(row))
| teamllamauk/ScopeDriver | plugins/debris.py | Python | gpl-3.0 | 6,763 | 0.000591 |
import json
from aiohttp import ClientSession
from tribler_core.restapi import get_param
from tribler_core.tests.tools.test_as_server import TestAsServer
from tribler_core.utilities.path_util import Path
from tribler_core.version import version_id
def path_to_str(obj):
if isinstance(obj, dict):
return {path_to_str(k):path_to_str(v) for k, v in obj.items()}
if isinstance(obj, list):
return [path_to_str(i) for i in obj]
if isinstance(obj, Path):
return str(obj)
return obj
class AbstractBaseApiTest(TestAsServer):
"""
Tests for the Tribler HTTP API should create a subclass of this class.
"""
def setUpPreSession(self):
super(AbstractBaseApiTest, self).setUpPreSession()
self.config.set_http_api_enabled(True)
self.config.set_http_api_retry_port(True)
self.config.set_tunnel_community_enabled(False)
self.config.set_trustchain_enabled(False)
# Make sure we select a random port for the HTTP API
self.config.set_http_api_port(self.get_port())
async def do_request(self, endpoint, req_type, data, headers, json_response):
url = 'http://localhost:%d/%s' % (self.session.config.get_http_api_port(), endpoint)
headers = headers or {'User-Agent': 'Tribler ' + version_id}
async with ClientSession() as session:
async with session.request(req_type, url, data=data, headers=headers) as response:
return response.status, (await response.json(content_type=None)
if json_response else await response.read())
class AbstractApiTest(AbstractBaseApiTest):
"""
This class contains some helper methods to perform requests and to check the right response code/
response json returned.
"""
async def do_request(self, endpoint, expected_code=200, expected_json=None,
request_type='GET', post_data={}, headers=None, json_response=True):
data = json.dumps(path_to_str(post_data)) if isinstance(post_data, (dict, list)) else post_data
status, response = await super(AbstractApiTest, self).do_request(endpoint, request_type,
data, headers, json_response)
self.assertEqual(status, expected_code, response)
if response is not None and expected_json is not None:
self.assertDictEqual(expected_json, response)
return response
class TestBaseApi(TestAsServer):
"""
Test some basic functionality of the restful API
"""
def test_get_parameters(self):
"""
Test the get_parameters method
"""
parameters = {'abc': [3]}
self.assertIsNone(get_param(parameters, 'abcd'))
self.assertIsNotNone(get_param(parameters, 'abc'))
| hbiyik/tribler | src/tribler-core/tribler_core/restapi/base_api_test.py | Python | lgpl-3.0 | 2,844 | 0.003516 |
class DeferredForeignKey(object):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name', None)
self.args = args
self.kwargs = kwargs
| danfairs/django-dfk | dfk/models.py | Python | bsd-3-clause | 176 | 0 |
import logging, os, json
import search.archive
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
logging.info('**** new Singleton instance')
return cls._instances[cls]
class Archives(metaclass=Singleton):
def __init__(self, archives_dir=None):
if archives_dir==None:
from www import config
self.archives_dir = config.ARCHIVES_PATH
else:
self.archives_dir = archives_dir
self.data = {}
self.loaded = False
logging.info('loading archives...')
self.load()
logging.info('done.')
def load(self):
if self.loaded:
return
if not os.path.isdir(self.archives_dir):
logging.error("Archives:: the path - " + self.archives_dir + " - is not a valid directory. Aborting.")
logging.error(" -- current cwd is: " + os.getcwd())
return
arch = [d for d in os.listdir(self.archives_dir) if os.path.isdir(os.path.join(self.archives_dir, d))]
self.data = {}
for a in arch:
logging.info("loading " + a)
# archive_path = os.path.join(self.archives_dir, a)
self.data[a] = self.load_archive(self.archives_dir, a)
logging.info("done.")
self.loaded = True
def load_archive(self, archive_dir, archive_name):
if not os.path.isdir(archive_dir):
logging.error("Archives:: the path - " + archive_dir + " - is not a valid directory. Aborting.")
return
archive = search.archive.Archive(archive_dir)
archive.load(archive_name)
return archive
# # -- shoudl use Archive in searh module here....
# files = [f for f in os.listdir(archive_dir) if f.endswith('.json')]
# arch = {}
# for f in files:
# file_path = os.path.join(archive_dir, f)
# with open(file_path) as fdata:
# arch[f.replace('.json', '')] = json.load(fdata)
# return arch
| gauthiier/mailinglists | www/archives.py | Python | gpl-3.0 | 1,896 | 0.030063 |
#!/usr/bin/env python
from ecl.summary import EclSum
OIL_PRICES = {
"2010-01-01": 78.33,
"2010-02-01": 76.39,
"2010-03-01": 81.20,
"2010-04-01": 84.29,
"2010-05-01": 73.74,
"2010-06-01": 75.34,
"2010-07-01": 76.32,
"2010-08-01": 76.60,
"2010-09-01": 75.24,
"2010-10-01": 81.89,
"2010-11-01": 84.25,
"2010-12-01": 89.15,
"2011-01-01": 89.17,
"2011-02-01": 88.58,
"2011-03-01": 102.86,
"2011-04-01": 109.53,
"2011-05-01": 100.90,
"2011-06-01": 96.26,
"2011-07-01": 97.30,
"2011-08-01": 86.33,
"2011-09-01": 85.52,
"2011-10-01": 86.32,
"2011-11-01": 97.16,
"2011-12-01": 98.56,
"2012-01-01": 100.27,
"2012-02-01": 102.20,
"2012-03-01": 106.16,
"2012-04-01": 103.32,
"2012-05-01": 94.65,
"2012-06-01": 82.30,
"2012-07-01": 87.90,
"2012-08-01": 94.13,
"2012-09-01": 94.51,
"2012-10-01": 89.49,
"2012-11-01": 86.53,
"2012-12-01": 87.86,
"2013-01-01": 94.76,
"2013-02-01": 95.31,
"2013-03-01": 92.94,
"2013-04-01": 92.02,
"2013-05-01": 94.51,
"2013-06-01": 95.77,
"2013-07-01": 104.67,
"2013-08-01": 106.57,
"2013-09-01": 106.29,
"2013-10-01": 100.54,
"2013-11-01": 93.86,
"2013-12-01": 97.63,
"2014-01-01": 94.62,
"2014-02-01": 100.82,
"2014-03-01": 100.80,
"2014-04-01": 102.07,
"2014-05-01": 102.18,
"2014-06-01": 105.79,
"2014-07-01": 103.59,
"2014-08-01": 96.54,
"2014-09-01": 93.21,
"2014-10-01": 84.40,
"2014-11-01": 75.79,
"2014-12-01": 59.29,
"2015-01-01": 47.22,
"2015-02-01": 50.58,
"2015-03-01": 47.82,
"2015-04-01": 54.45,
"2015-05-01": 59.27,
"2015-06-01": 59.82,
"2015-07-01": 50.90,
"2015-08-01": 42.87,
"2015-09-01": 45.48,
}
if __name__ == "__main__":
ecl_sum = EclSum("SNAKE_OIL_FIELD")
start_time = ecl_sum.getStartTime()
date_ranges = ecl_sum.timeRange(start_time, interval="1M")
production_sums = ecl_sum.blockedProduction("FOPT", date_ranges)
npv = 0.0
for index in range(0, len(date_ranges) - 1):
date = date_ranges[index + 1] # end of period
production_sum = production_sums[index]
oil_price = OIL_PRICES[date.date().strftime("%Y-%m-%d")]
production_value = oil_price * production_sum
npv += production_value
with open("snake_oil_npv.txt", "w") as output_file:
output_file.write("NPV %s\n" % npv)
if npv < 80000:
rating = "POOR"
elif 80000 <= npv < 100000:
rating = "AVERAGE"
elif 100000 <= npv < 120000:
rating = "GOOD"
else:
rating = "EXCELLENT"
output_file.write("RATING %s\n" % rating)
| joakim-hove/ert | test-data/local/snake_oil_structure/snake_oil/jobs/snake_oil_npv.py | Python | gpl-3.0 | 2,757 | 0 |
#!/usr/bin/env python
##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Cinesite VFX Ltd. nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import datetime
import github
import os
import re
import sys
import json
# GitHub Action workflow variables can be populated at run-time by echoing special
# strings to an env file. The following allows vars to be set:
#
# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#environment-files
#
# echo var=value >> $GITHUB_ENV
#
# We make use of this mechanism to allow custom logic to define the build name
# as well as determine the correct commit hash depending on the nature of the
# trigger. These variables can be referenced in a pipeline yaml file downstream
# of the step that runs this script.
# Actions is somewhat sparse in what information is available via the GITHUB_*
# env vars (from the github context). There is however a veritable treasure
# trove in the .json file pointed to by GITHUB_EVENT_PATH. "The Internets" seem
# to suggest this is the most reliable way of determining information about the
# triggering commit. Some of the official context vars may vary after a retry,
# etc. too.
#
# The contents of this file is based on the webhook payload so should be
# relatively stable as it is part of that public API.
with open( os.environ["GITHUB_EVENT_PATH"] ) as f :
eventData = json.load( f )
## Source Branches
buildBranch = os.environ.get( "GITHUB_BASE_REF", "" )
sourceBranch = os.environ.get( "GITHUB_HEAD_REF", buildBranch )
## Source Commit Hash
commit = os.environ["GITHUB_SHA"]
## Pull Request builds
# Actions merges the branch into its target in PR build, so GITHUB_SHA isn't
# correct as it references the ephemeral merge. We also want to extract the
# pull request number for the build name.
pullRequest = ""
if os.environ.get( "GITHUB_EVENT_NAME" ) == "pull_request" :
commit = eventData["pull_request"]["head"]["sha"]
pullRequest = eventData["pull_request"]["number"]
## Tag builds
# A variety of build types may be in service of a tag (ie: release publish
# or manual retry for a specific tag).
tag = ""
if "/tags" in os.environ["GITHUB_REF"] :
tag = os.environ["GITHUB_REF"].replace( "refs/tags/", "" )
## Release ID
# To allow builds to be published to a release, we need to lookup the ID of any
# release that matches the tag we're building, if there is one.
releaseId = ""
if tag :
githubClient = github.Github( os.environ.get( 'GITHUB_ACCESS_TOKEN' ) )
repo = githubClient.get_repo( os.environ.get( 'GITHUB_REPOSITORY' ) )
for r in repo.get_releases() :
if r.tag_name == tag :
releaseId = r.id
break
if releaseId :
# Check that the version specified by the SConstruct matches the
# version in the tag.
versions = {}
versionRe = re.compile( r"^gaffer(.*)Version = (\d+)")
with open( "SConstruct" ) as sconstruct :
for line in sconstruct.readlines() :
versionMatch = versionRe.match( line )
if versionMatch :
versions[versionMatch.group( 1 )] = versionMatch.group( 2 )
version = "{Milestone}.{Major}.{Minor}.{Patch}".format( **versions )
if version != tag :
sys.stderr.write( "Tag \"{}\" does not match SConstruct version \"{}\"\n".format( tag, version ) )
sys.exit( 1 )
## Build Name
# We have a couple of naming conventions for builds, depending on the nature of the trigger.
formatVars = {
"variant" : os.environ["GAFFER_BUILD_VARIANT"],
"timestamp" : datetime.datetime.now().strftime( "%Y_%m_%d_%H%M" ),
"pullRequest" : pullRequest,
"shortCommit" : commit[:8],
"tag" : tag,
"branch" : re.sub( r"[^a-zA-Z0-9_]", "", sourceBranch )
}
nameFormats = {
"default" : "gaffer-{timestamp}-{shortCommit}-{variant}",
"pull_request" : "gaffer-pr{pullRequest}-{branch}-{timestamp}-{shortCommit}-{variant}",
"release" : "gaffer-{tag}-{variant}"
}
trigger = os.environ.get( 'GITHUB_EVENT_NAME', '' )
# If we have a releaseID (and tag) then we always use release naming convention
# to allow manual re-runs of release builds that fail for <reasons>.
if tag and releaseId :
print( "Have Release ID %s for tag %s, using release naming." % ( releaseId, tag ) )
trigger = "release"
buildName = nameFormats.get( trigger, nameFormats['default'] ).format( **formatVars )
## Set vars in the downstream workflow environment
with open( os.environ["GITHUB_ENV"], "a" ) as f :
print( "Setting $GAFFER_BUILD_NAME to '%s'" % buildName )
f.write( 'GAFFER_BUILD_NAME=%s\n' % buildName )
print( "Setting $GAFFER_SOURCE_COMMIT to '%s'" % commit )
f.write( 'GAFFER_SOURCE_COMMIT=%s\n' % commit )
print( "Setting $GAFFER_GITHUB_RELEASEID to '%s'" % releaseId )
f.write( 'GAFFER_GITHUB_RELEASEID=%s\n' % releaseId )
| GafferHQ/gaffer | .github/workflows/main/setBuildVars.py | Python | bsd-3-clause | 6,394 | 0.021896 |
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.county_bulk import CountyBulk
class TestCountyBulk(unittest.TestCase):
""" CountyBulk unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCountyBulk(self):
"""
Test CountyBulk
"""
model = vericred_client.models.county_bulk.CountyBulk()
if __name__ == '__main__':
unittest.main()
| vericred/vericred-python | test/test_county_bulk.py | Python | apache-2.0 | 9,989 | 0.002903 |
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=no-member,too-few-public-methods
import datetime
from sqlalchemy.orm import relationship, backref
from sqlalchemy import Column, String, ForeignKey, Integer, DateTime, Boolean, Text, Enum
from rapid.lib import get_declarative_base
from rapid.master.data.database.models.base.base_model import BaseModel
from rapid.lib.constants import VcsReleaseStepType
Base = get_declarative_base()
class Release(BaseModel, Base):
name = Column(String(255), nullable=False, index=True)
date_created = Column(DateTime(), nullable=False, default=datetime.datetime.utcnow, index=True)
status_id = Column(Integer, ForeignKey('statuses.id'), nullable=False, index=True)
commit_id = Column(Integer, ForeignKey('commits.id'), nullable=False, index=True)
integration_id = Column(Integer, ForeignKey('integrations.id'), index=True)
status = relationship('Status')
integration = relationship('Integration')
commit = relationship('Commit', backref=backref('release', uselist=False))
details = relationship('ReleaseDetail', uselist=False, backref=backref('release'))
class ReleaseDetail(BaseModel, Base):
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False, index=True)
details = Column(Text)
class StepIntegration(BaseModel, Base):
step_id = Column(Integer, ForeignKey('steps.id'), nullable=False, index=True)
integration_id = Column(Integer, ForeignKey('integrations.id'), nullable=False, index=True)
class Step(BaseModel, Base):
name = Column(String(100), nullable=False)
custom_id = Column(String(25), nullable=False)
status_id = Column(Integer, ForeignKey('statuses.id'), nullable=False, index=True)
user_required = Column(Boolean, default=False, nullable=False)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False, index=True)
sort_order = Column(Integer, default=0)
release = relationship("Release", lazy='subquery', backref="steps")
status = relationship('Status')
integrations = relationship("Integration", secondary="step_integrations")
class StepUser(BaseModel, Base):
step_id = Column(Integer, ForeignKey('steps.id'), nullable=False, index=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False, index=True)
date_created = Column(DateTime(), nullable=False, default=datetime.datetime.utcnow)
class StepUserComment(BaseModel, Base):
step_user_id = Column(Integer, ForeignKey('step_users.id'), nullable=False)
comment = Column(Text)
class User(BaseModel, Base):
name = Column(String(150), nullable=False)
username = Column(String(150), nullable=False)
active = Column(Boolean, default=True, nullable=False)
class VcsRelease(BaseModel, Base):
search_filter = Column(String(500), nullable=False)
notification_id = Column(String(250), nullable=False)
vcs_id = Column(Integer, ForeignKey('vcs.id'), nullable=False, index=True)
auto_release = Column(Boolean, nullable=False, default=False)
vcs = relationship('Vcs', lazy='subquery', backref='product_release')
steps = relationship("VcsReleaseStep", backref='vcs_release')
class VcsReleaseStep(BaseModel, Base):
name = Column(String(250), nullable=False)
custom_id = Column(String(250), nullable=False)
user_required = Column(Boolean, default=False, nullable=False)
sort_order = Column(Integer, default=0)
type = Column(Enum(*list(map(lambda x: x.name, VcsReleaseStepType))), nullable=False, default='PRE')
vcs_release_id = Column(Integer, ForeignKey('vcs_releases.id'), nullable=False, index=True)
__all__ = ['Release', 'StepIntegration', 'Step', 'StepUser', 'StepUserComment', 'StepIntegration', 'User', 'VcsRelease', 'VcsReleaseStep']
| BambooHR/rapid | rapid/release/data/models.py | Python | apache-2.0 | 4,322 | 0.003702 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_business_cards.py
DESCRIPTION:
This sample demonstrates how to recognize fields on business cards.
See fields found on a business card here:
https://aka.ms/formrecognizer/businesscardfields
USAGE:
python sample_recognize_business_cards.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
class RecognizeBusinessCardSample(object):
def recognize_business_card(self):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
"..", "..", "./sample_forms/business_cards/business-card-english.jpg"))
# [START recognize_business_cards]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_business_cards(business_card=f, locale="en-US")
business_cards = poller.result()
for idx, business_card in enumerate(business_cards):
print("--------Recognizing business card #{}--------".format(idx+1))
contact_names = business_card.fields.get("ContactNames")
if contact_names:
for contact_name in contact_names.value:
print("Contact First Name: {} has confidence: {}".format(
contact_name.value["FirstName"].value, contact_name.value["FirstName"].confidence
))
print("Contact Last Name: {} has confidence: {}".format(
contact_name.value["LastName"].value, contact_name.value["LastName"].confidence
))
company_names = business_card.fields.get("CompanyNames")
if company_names:
for company_name in company_names.value:
print("Company Name: {} has confidence: {}".format(company_name.value, company_name.confidence))
departments = business_card.fields.get("Departments")
if departments:
for department in departments.value:
print("Department: {} has confidence: {}".format(department.value, department.confidence))
job_titles = business_card.fields.get("JobTitles")
if job_titles:
for job_title in job_titles.value:
print("Job Title: {} has confidence: {}".format(job_title.value, job_title.confidence))
emails = business_card.fields.get("Emails")
if emails:
for email in emails.value:
print("Email: {} has confidence: {}".format(email.value, email.confidence))
websites = business_card.fields.get("Websites")
if websites:
for website in websites.value:
print("Website: {} has confidence: {}".format(website.value, website.confidence))
addresses = business_card.fields.get("Addresses")
if addresses:
for address in addresses.value:
print("Address: {} has confidence: {}".format(address.value, address.confidence))
mobile_phones = business_card.fields.get("MobilePhones")
if mobile_phones:
for phone in mobile_phones.value:
print("Mobile phone number: {} has confidence: {}".format(phone.value, phone.confidence))
faxes = business_card.fields.get("Faxes")
if faxes:
for fax in faxes.value:
print("Fax number: {} has confidence: {}".format(fax.value, fax.confidence))
work_phones = business_card.fields.get("WorkPhones")
if work_phones:
for work_phone in work_phones.value:
print("Work phone number: {} has confidence: {}".format(work_phone.value, work_phone.confidence))
other_phones = business_card.fields.get("OtherPhones")
if other_phones:
for other_phone in other_phones.value:
print("Other phone number: {} has confidence: {}".format(other_phone.value, other_phone.confidence))
# [END recognize_business_cards]
if __name__ == '__main__':
sample = RecognizeBusinessCardSample()
sample.recognize_business_card()
| Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/sample_recognize_business_cards.py | Python | mit | 5,129 | 0.003509 |
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
transform(CKernelImplementations(), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def op_kernel(self, op):
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = monosig.argtypes
if function.matches('ckernel', argtypes):
overload = function.best_match('ckernel', argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
return op
| zeeshanali/blaze | blaze/compute/air/passes/ckernel_impls.py | Python | bsd-3-clause | 1,537 | 0.003904 |
"""Common objects used throughout the project."""
import atexit
import functools
import logging
import os
import shutil
import tempfile
import weakref
import click
class Config(object):
"""The global configuration and state of the running program."""
def __init__(self):
"""Constructor."""
self._already_set = set()
self._program_state = dict()
# Booleans.
self.banner_greatest_tag = False
self.banner_recent_tag = False
self.greatest_tag = False
self.invert = False
self.no_colors = False
self.no_local_conf = False
self.recent_tag = False
self.show_banner = False
# Strings.
self.banner_main_ref = 'master'
self.chdir = None
self.git_root = None
self.local_conf = None
self.priority = None
self.push_remote = 'origin'
self.root_ref = 'master'
# Tuples.
self.grm_exclude = tuple()
self.overflow = tuple()
self.sort = tuple()
self.whitelist_branches = tuple()
self.whitelist_tags = tuple()
# Integers.
self.verbose = 0
def __contains__(self, item):
"""Implement 'key in Config'.
:param str item: Key to search for.
:return: If item in self._program_state.
:rtype: bool
"""
return item in self._program_state
def __iter__(self):
"""Yield names and current values of attributes that can be set from Sphinx config files."""
for name in (n for n in dir(self) if not n.startswith('_') and not callable(getattr(self, n))):
yield name, getattr(self, name)
def __repr__(self):
"""Class representation."""
attributes = ('_program_state', 'verbose', 'root_ref', 'overflow')
key_value_attrs = ', '.join('{}={}'.format(a, repr(getattr(self, a))) for a in attributes)
return '<{}.{} {}>'.format(self.__class__.__module__, self.__class__.__name__, key_value_attrs)
def __setitem__(self, key, value):
"""Implement Config[key] = value, updates self._program_state.
:param str key: Key to set in self._program_state.
:param value: Value to set in self._program_state.
"""
self._program_state[key] = value
@classmethod
def from_context(cls):
"""Retrieve this class' instance from the current Click context.
:return: Instance of this class.
:rtype: Config
"""
try:
ctx = click.get_current_context()
except RuntimeError:
return cls()
return ctx.find_object(cls)
def pop(self, *args):
"""Pop item from self._program_state.
:param iter args: Passed to self._program_state.
:return: Object from self._program_state.pop().
"""
return self._program_state.pop(*args)
def update(self, params, ignore_set=False, overwrite=False):
"""Set instance values from dictionary.
:param dict params: Click context params.
:param bool ignore_set: Skip already-set values instead of raising AttributeError.
:param bool overwrite: Allow overwriting already-set values.
"""
log = logging.getLogger(__name__)
valid = {i[0] for i in self}
for key, value in params.items():
if not hasattr(self, key):
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, key))
if key not in valid:
message = "'{}' object does not support item assignment on '{}'"
raise AttributeError(message.format(self.__class__.__name__, key))
if key in self._already_set:
if ignore_set:
log.debug('%s already set in config, skipping.', key)
continue
if not overwrite:
message = "'{}' object does not support item re-assignment on '{}'"
raise AttributeError(message.format(self.__class__.__name__, key))
setattr(self, key, value)
self._already_set.add(key)
class HandledError(click.ClickException):
"""Abort the program."""
def __init__(self):
"""Constructor."""
super(HandledError, self).__init__(None)
def show(self, **_):
"""Error messages should be logged before raising this exception."""
logging.critical('Failure.')
class TempDir(object):
"""Similar to TemporaryDirectory in Python 3.x but with tuned weakref implementation."""
def __init__(self, defer_atexit=False):
"""Constructor.
:param bool defer_atexit: cleanup() to atexit instead of after garbage collection.
"""
self.name = tempfile.mkdtemp('sphinxcontrib_versioning')
if defer_atexit:
atexit.register(shutil.rmtree, self.name, True)
return
try:
weakref.finalize(self, shutil.rmtree, self.name, True)
except AttributeError:
weakref.proxy(self, functools.partial(shutil.rmtree, self.name, True))
def __enter__(self):
"""Return directory path."""
return self.name
def __exit__(self, *_):
"""Cleanup when exiting context."""
self.cleanup()
def cleanup(self):
"""Recursively delete directory."""
shutil.rmtree(self.name, onerror=lambda *a: os.chmod(a[1], __import__('stat').S_IWRITE) or os.unlink(a[1]))
if os.path.exists(self.name):
raise IOError(17, "File exists: '{}'".format(self.name))
| Robpol86/sphinxcontrib-versioning | sphinxcontrib/versioning/lib.py | Python | mit | 5,599 | 0.0025 |
# -*- coding: utf-8 -*-
""" Projy template for PythonPackage. """
# system
from datetime import date
from os import mkdir, rmdir
from shutil import move
from subprocess import call
# parent class
from projy.templates.ProjyTemplate import ProjyTemplate
# collectors
from projy.collectors.AuthorCollector import AuthorCollector
from projy.collectors.AuthorMailCollector import AuthorMailCollector
class DjangoProjectTemplate(ProjyTemplate):
""" Projy template class for PythonPackage. """
def __init__(self):
ProjyTemplate.__init__(self)
def directories(self):
""" Return the names of directories to be created. """
directories_description = [
self.project_name,
self.project_name + '/conf',
self.project_name + '/static',
]
return directories_description
def files(self):
""" Return the names of files to be created. """
files_description = [
# configuration
[ self.project_name,
'Makefile',
'DjangoMakefileTemplate' ],
[ self.project_name + '/conf',
'requirements_base.txt',
'DjangoRequirementsBaseTemplate' ],
[ self.project_name + '/conf',
'requirements_dev.txt',
'DjangoRequirementsDevTemplate' ],
[ self.project_name + '/conf',
'requirements_production.txt',
'DjangoRequirementsProdTemplate' ],
[ self.project_name + '/conf',
'nginx.conf',
'DjangoNginxConfTemplate' ],
[ self.project_name + '/conf',
'supervisord.conf',
'DjangoSupervisorConfTemplate' ],
[ self.project_name,
'fabfile.py',
'DjangoFabfileTemplate' ],
[ self.project_name,
'CHANGES.txt',
'PythonPackageCHANGESFileTemplate' ],
[ self.project_name,
'LICENSE.txt',
'GPL3FileTemplate' ],
[ self.project_name,
'README.txt',
'READMEReSTFileTemplate' ],
[ self.project_name,
'.gitignore',
'DjangoGitignoreTemplate' ],
# django files
[ self.project_name,
'dev.py',
'DjangoSettingsDevTemplate' ],
[ self.project_name,
'prod.py',
'DjangoSettingsProdTemplate' ],
]
return files_description
def substitutes(self):
""" Return the substitutions for the templating replacements. """
author_collector = AuthorCollector()
mail_collector = AuthorMailCollector()
substitute_dict = {
'project': self.project_name,
'project_lower': self.project_name.lower(),
'date': date.today().isoformat(),
'author': author_collector.collect(),
'author_email': mail_collector.collect(),
}
return substitute_dict
def posthook(self):
# build the virtualenv
call(['make'])
# create the Django project
call(['./venv/bin/django-admin.py', 'startproject', self.project_name])
# transform original settings files into 3 files for different env
mkdir('{p}/settings'.format(p=self.project_name))
self.touch('{p}/settings/__init__.py'.format(p=self.project_name))
move('dev.py', '{p}/settings'.format(p=self.project_name))
move('prod.py', '{p}/settings'.format(p=self.project_name))
move('{p}/{p}/settings.py'.format(p=self.project_name), '{p}/settings/base.py'.format(p=self.project_name))
# organize files nicely
mkdir('{p}/templates'.format(p=self.project_name))
move('{p}/manage.py'.format(p=self.project_name), 'manage.py')
move('{p}/{p}/__init__.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
move('{p}/{p}/urls.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
move('{p}/{p}/wsgi.py'.format(p=self.project_name), '{p}/'.format(p=self.project_name))
rmdir('{p}/{p}'.format(p=self.project_name))
# create empty git repo
call(['git', 'init'])
# replace some lines
self.replace_in_file('{p}/wsgi.py'.format(p=self.project_name),
'"{p}.settings"'.format(p=self.project_name),
'"{p}.settings.production"'.format(p=self.project_name))
self.replace_in_file('{p}/settings/base.py'.format(p=self.project_name),
u" # ('Your Name', 'your_email@example.com'),",
u" ('{}', '{}'),".format(self.substitutes()['author'],
self.substitutes()['author_email']))
| stephanepechard/projy | projy/templates/DjangoProjectTemplate.py | Python | gpl-3.0 | 4,869 | 0.00801 |
"""
Field classes.
"""
import datetime
import os
import re
import time
import urlparse
import warnings
from decimal import Decimal, DecimalException
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.core import validators
import django.utils.copycompat as copy
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from django.utils.functional import lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from widgets import TextInput, PasswordInput, HiddenInput, MultipleHiddenInput, \
ClearableFileInput, CheckboxInput, Select, NullBooleanSelect, SelectMultiple, \
DateInput, DateTimeInput, TimeInput, SplitDateTimeWidget, SplitHiddenDateTimeWidget, \
FILE_INPUT_CONTRADICTION
__all__ = (
'Field', 'CharField', 'IntegerField',
'DEFAULT_DATE_INPUT_FORMATS', 'DateField',
'DEFAULT_TIME_INPUT_FORMATS', 'TimeField',
'DEFAULT_DATETIME_INPUT_FORMATS', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField'
)
def en_format(name):
"""
Helper function to stay backward compatible.
"""
from django.conf.locale.en import formats
warnings.warn(
"`django.forms.fields.DEFAULT_%s` is deprecated; use `django.utils.formats.get_format('%s')` instead." % (name, name),
DeprecationWarning
)
return getattr(formats, name)
DEFAULT_DATE_INPUT_FORMATS = lazy(lambda: en_format('DATE_INPUT_FORMATS'), tuple, list)()
DEFAULT_TIME_INPUT_FORMATS = lazy(lambda: en_format('TIME_INPUT_FORMATS'), tuple, list)()
DEFAULT_DATETIME_INPUT_FORMATS = lazy(lambda: en_format('DATETIME_INPUT_FORMATS'), tuple, list)()
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(max_length))
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class FloatField(IntegerField):
default_error_messages = {
'invalid': _(u'Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class DecimalField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
'max_digits': _('Ensure that there are no more than %s digits in total.'),
'max_decimal_places': _('Ensure that there are no more than %s decimal places.'),
'max_whole_digits': _('Ensure that there are no more than %s digits before the decimal point.')
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, *args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in validators.EMPTY_VALUES:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
raise ValidationError(self.error_messages['max_whole_digits'] % (self.max_digits - self.decimal_places))
return value
class DateField(Field):
widget = DateInput
default_error_messages = {
'invalid': _(u'Enter a valid date.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
for format in self.input_formats or formats.get_format('DATE_INPUT_FORMATS'):
try:
return datetime.date(*time.strptime(value, format)[:3])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class TimeField(Field):
widget = TimeInput
default_error_messages = {
'invalid': _(u'Enter a valid time.')
}
def __init__(self, input_formats=None, *args, **kwargs):
super(TimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.time):
return value
for format in self.input_formats or formats.get_format('TIME_INPUT_FORMATS'):
try:
return datetime.time(*time.strptime(value, format)[3:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class DateTimeField(Field):
widget = DateTimeInput
default_error_messages = {
'invalid': _(u'Enter a valid date/time.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
if value[0] in validators.EMPTY_VALUES and value[1] in validators.EMPTY_VALUES:
return None
value = '%s %s' % tuple(value)
for format in self.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'):
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
if isinstance(regex, basestring):
regex = re.compile(regex)
self.regex = regex
self.validators.append(validators.RegexValidator(regex=regex))
class EmailField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid e-mail address.'),
}
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _(u"No file was submitted. Check the encoding type on the form."),
'missing': _(u"No file was submitted."),
'empty': _(u"The submitted file is empty."),
'max_length': _(u'Ensure this filename has at most %(max)d characters (it has %(length)d).'),
'contradiction': _(u'Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in validators.EMPTY_VALUES:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not file_size:
raise ValidationError(self.error_messages['empty'])
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'])
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in validators.EMPTY_VALUES; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(u"Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
import Image
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() is the only method that can spot a truncated JPEG,
# but it cannot be called sanely after verify()
trial_image = Image.open(file)
trial_image.load()
# Since we're about to use the file again we have to reset the
# file object if possible.
if hasattr(file, 'reset'):
file.reset()
# verify() is the only method that can spot a corrupt PNG,
# but it must be called immediately after the constructor
trial_image = Image.open(file)
trial_image.verify()
except ImportError:
# Under PyPy, it is possible to import PIL. However, the underlying
# _imaging C module isn't available, so an ImportError will be
# raised. Catch and re-raise.
raise
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid URL.'),
'invalid_link': _(u'This URL appears to be a broken link.'),
}
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(URLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
if '://' not in value:
# If no URL scheme given, assume http://
value = u'http://%s' % value
url_fields = list(urlparse.urlsplit(value))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlparse.urlunsplit(url_fields)
return super(URLField, self).to_python(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if value in ('False', '0'):
value = False
else:
value = bool(value)
value = super(BooleanField, self).to_python(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_unicode(k2):
return True
else:
if value == smart_unicode(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validate that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).to_python(value)
super(TypedChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def validate(self, value):
pass
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [smart_unicode(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _(u'Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in validators.EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, required=True,
widget=None, label=None, initial=None, help_text=None,
*args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in os.walk(self.path):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in os.listdir(self.path):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and (self.match is None or self.match_re.search(f)):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _(u'Enter a valid date.'),
'invalid_time': _(u'Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_time'])
return datetime.datetime.combine(*data_list)
return None
class IPAddressField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid IPv4 address.'),
}
default_validators = [validators.validate_ipv4_address]
class SlugField(CharField):
default_error_messages = {
'invalid': _(u"Enter a valid 'slug' consisting of letters, numbers,"
u" underscores or hyphens."),
}
default_validators = [validators.validate_slug]
| tjsavage/rototutor_djangononrel | django/forms/fields.py | Python | bsd-3-clause | 31,759 | 0.026575 |
import warnings
from django.contrib.localflavor.cz.forms import (CZPostalCodeField,
CZRegionSelect, CZBirthNumberField, CZICNumberField)
from django.core.exceptions import ValidationError
from utils import LocalFlavorTestCase
class CZLocalFlavorTests(LocalFlavorTestCase):
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
module='django.contrib.localflavor.cz.forms'
)
def tearDown(self):
self.restore_warnings_state()
def test_CZRegionSelect(self):
f = CZRegionSelect()
out = u'''<select name="regions">
<option value="PR">Prague</option>
<option value="CE">Central Bohemian Region</option>
<option value="SO">South Bohemian Region</option>
<option value="PI">Pilsen Region</option>
<option value="CA">Carlsbad Region</option>
<option value="US">Usti Region</option>
<option value="LB">Liberec Region</option>
<option value="HK">Hradec Region</option>
<option value="PA">Pardubice Region</option>
<option value="VY">Vysocina Region</option>
<option value="SM">South Moravian Region</option>
<option value="OL">Olomouc Region</option>
<option value="ZL">Zlin Region</option>
<option value="MS">Moravian-Silesian Region</option>
</select>'''
self.assertEqual(f.render('regions', 'TT'), out)
def test_CZPostalCodeField(self):
error_format = [u'Enter a postal code in the format XXXXX or XXX XX.']
valid = {
'91909': '91909',
'917 01': '91701',
'12345': '12345',
}
invalid = {
'84545x': error_format,
'123456': error_format,
'1234': error_format,
'123 4': error_format,
}
self.assertFieldOutput(CZPostalCodeField, valid, invalid)
def test_CZBirthNumberField(self):
error_format = [u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
error_invalid = [u'Enter a valid birth number.']
valid = {
'880523/1237': '880523/1237',
'8805231237': '8805231237',
'880523/000': '880523/000',
'880523000': '880523000',
'882101/0011': '882101/0011',
}
invalid = {
'123456/12': error_format,
'123456/12345': error_format,
'12345612': error_format,
'12345612345': error_format,
'880523/1239': error_invalid,
'8805231239': error_invalid,
'990101/0011': error_invalid,
}
self.assertFieldOutput(CZBirthNumberField, valid, invalid)
# These tests should go away in 1.4.
# http://code.djangoproject.com/ticket/14593
f = CZBirthNumberField()
self.assertEqual(f.clean('880523/1237', 'm'), '880523/1237'),
self.assertEqual(f.clean('885523/1231', 'f'), '885523/1231')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
f.clean, '881523/0000', 'm')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
f.clean, '885223/0000', 'm')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
f.clean, '881523/0000', 'f')
self.assertRaisesRegexp(ValidationError, unicode(error_invalid),
f.clean, '885223/0000', 'f')
def test_CZICNumberField(self):
error_invalid = [u'Enter a valid IC number.']
valid ={
'12345679': '12345679',
'12345601': '12345601',
'12345661': '12345661',
'12345610': '12345610',
}
invalid = {
'1234567': error_invalid,
'12345660': error_invalid,
'12345600': error_invalid,
}
self.assertFieldOutput(CZICNumberField, valid, invalid)
| mitsuhiko/django | tests/regressiontests/forms/localflavor/cz.py | Python | bsd-3-clause | 3,835 | 0.001825 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_kinematics_models', 'kinematics_model', 'asset', 'unit']
attrName = 'meter'
attrVal = '1'
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeKinematicsBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeKinematicsSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeKinematicsExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
# IS UNIT ALLOWED TO TRANSFORM? IF SO, WHAT TO TEST FOR?
self.__assistant.AttributeCheck(context, self.tagList, self.attrName, self.attrVal, True, False)
self.status_exemplary = self.__assistant.DeferJudgement(context)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/library_kinematics_model/kinematics_model/asset/unit/unit.py | Python | mit | 3,954 | 0.006829 |
# -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_func op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import re
import numpy as np
from six.moves import queue
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def np_func(x, y):
return np.sinh(x) + np.cosh(y)
def matmul(x, y):
return math_ops.matmul(x, y)
class PyFuncTest(test.TestCase):
"""Encapsulates tests for py_func and eager_py_func."""
# ----- Tests for py_func -----
def testRealDataTypes(self):
def sum_func(x, y):
return x + y
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.uint8, dtypes.int8, dtypes.uint16, dtypes.int16,
dtypes.int32, dtypes.int64]:
with self.cached_session():
x = constant_op.constant(1, dtype=dtype)
y = constant_op.constant(2, dtype=dtype)
z = self.evaluate(script_ops.py_func(sum_func, [x, y], dtype))
self.assertEqual(z, 3)
def testComplexDataTypes(self):
def sub_func(x, y):
return x - y
for dtype in [dtypes.complex64, dtypes.complex128]:
with self.cached_session():
x = constant_op.constant(1 + 1j, dtype=dtype)
y = constant_op.constant(2 - 2j, dtype=dtype)
z = self.evaluate(script_ops.py_func(sub_func, [x, y], dtype))
self.assertEqual(z, -1 + 3j)
def testBoolDataTypes(self):
def and_func(x, y):
return x and y
dtype = dtypes.bool
with self.cached_session():
x = constant_op.constant(True, dtype=dtype)
y = constant_op.constant(False, dtype=dtype)
z = self.evaluate(script_ops.py_func(and_func, [x, y], dtype))
self.assertEqual(z, False)
def testSingleType(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.float32))
self.assertEqual(z, np_func(1.0, 2.0).astype(np.float32))
def testScalar(self):
with self.cached_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = self.evaluate(
script_ops.eager_py_func(np_func, [x, y], [dtypes.float32]))
self.assertEqual(z[0], np_func(1.0, 2.0).astype(np.float32))
def testArray(self):
with self.cached_session():
x = constant_op.constant([1.0, 2.0], dtypes.float64)
y = constant_op.constant([2.0, 3.0], dtypes.float64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], [dtypes.float64]))
self.assertAllEqual(z[0],
np_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
def testComplexType(self):
with self.cached_session():
x = constant_op.constant(1 + 2j, dtypes.complex64)
y = constant_op.constant(3 + 4j, dtypes.complex64)
z = self.evaluate(script_ops.py_func(np_func, [x, y], dtypes.complex64))
self.assertAllClose(z, np_func(1 + 2j, 3 + 4j))
def testRFFT(self):
with self.cached_session():
x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y = self.evaluate(script_ops.py_func(rfft, [x], dtypes.complex64))
self.assertAllClose(y, np.fft.rfft([1., 2., 3., 4.]))
def testPythonLiteral(self):
with self.cached_session():
def literal(x):
return 1.0 if float(x) == 0.0 else 0.0
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(script_ops.py_func(literal, [x], dtypes.float64))
self.assertAllClose(y, 1.0)
def testList(self):
with self.cached_session():
def list_func(x):
return [x, x + 1]
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(list_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
def testTuple(self):
# returns a tuple
with self.cached_session():
def tuple_func(x):
return x, x + 1
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2))
self.assertAllClose(y, [0.0, 1.0])
# returns a tuple, Tout and inp a tuple
with self.cached_session():
x = constant_op.constant(0.0, dtypes.float64)
y = self.evaluate(
script_ops.py_func(tuple_func, (x,),
(dtypes.float64, dtypes.float64)))
self.assertAllClose(y, [0.0, 1.0])
def testStrings(self):
def read_fixed_length_numpy_strings():
return np.array([b" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant([b"hello", b"hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
def testStringsAreConvertedToBytes(self):
def read_fixed_length_numpy_strings():
return np.array([" there"])
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y = self.evaluate(
script_ops.py_func(read_fixed_length_numpy_strings, [],
dtypes.string))
z = self.evaluate(
script_ops.py_func(read_and_return_strings, [x, y], dtypes.string))
self.assertAllEqual(z, [b"hello there", b"hi there"])
def testObjectArraysAreConvertedToBytes(self):
def read_object_array():
return np.array([b" there", u" ya"], dtype=np.object)
def read_and_return_strings(x, y):
return x + y
with self.cached_session():
x = constant_op.constant(["hello", "hi"], dtypes.string)
y, = script_ops.py_func(read_object_array, [],
[dtypes.string])
z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi ya"])
def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
def testStringPaddingAreConvertedToBytes(self):
inp = ["this", "is", "a", "test"]
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
def testLarge(self):
with self.cached_session() as sess:
x = array_ops.zeros([1000000], dtype=np.float32)
y = script_ops.py_func(lambda x: x + 1, [x], [dtypes.float32])
z = script_ops.py_func(lambda x: x * 2, [x], [dtypes.float32])
for _ in xrange(100):
sess.run([y[0].op, z[0].op])
def testNoInput(self):
with self.cached_session():
x = self.evaluate(script_ops.py_func(lambda: 42.0, [], dtypes.float64))
self.assertAllClose(x, 42.0)
def testAlias(self):
with self.cached_session():
np_array = np.array([1.0, 2.0], dtype=np.float32)
tf_array = script_ops.py_func(lambda: np_array, [], [dtypes.float32])
value = tf_array + constant_op.constant([2.0, 3.0], dtype=dtypes.float32)
value.op.run()
self.assertAllEqual(np_array, [1.0, 2.0])
def testReturnUnicodeString(self):
with self.cached_session():
correct = u"你好 世界"
def unicode_string():
return correct
z, = script_ops.py_func(unicode_string, [], [dtypes.string])
self.assertEqual(z.eval(), correct.encode("utf8"))
def testBadNumpyReturnType(self):
with self.cached_session():
def bad():
# Structured numpy arrays aren't supported.
return np.array([], dtype=[("foo", np.float32)])
y, = script_ops.py_func(bad, [], [dtypes.float32])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported numpy type"):
y.eval()
def testBadReturnType(self):
with self.cached_session():
def bad():
# Non-string python objects aren't supported.
return {"foo": dtypes.float32}
z, = script_ops.py_func(bad, [], [dtypes.int64])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported object type"):
z.eval()
def testReturnInput(self):
with self.cached_session():
def ident(x):
return x[0]
p = array_ops.placeholder(dtypes.float32)
# Create a numpy array aliasing a tensor and a tensor aliasing this array
z, = script_ops.py_func(ident, [p], [dtypes.float32])
z += 0.0 # Makes sure we release the tensor aliasing the numpy array x[0]
# above instead of using its memory as the return value of
# session.run
self.assertEqual(0.0, z.eval(feed_dict={p: [0.0]}))
def testStateful(self):
# Not using self.cached_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(lambda: next(producer), [], [dtypes.int64])
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 1)
self.assertEqual(sess.run(x), 2)
def testStateless(self):
# Not using self.cached_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(
lambda: next(producer), [], [dtypes.int64], stateful=False)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
def testGradientFunction(self):
# Input to tf.py_func is necessary, otherwise get_gradient_function()
# returns None per default.
a = constant_op.constant(0)
x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op))
def testCOrder(self):
with self.cached_session():
val = [[1, 2], [3, 4]]
x, = script_ops.py_func(lambda: np.array(val, order="F"), [],
[dtypes.int64])
self.assertAllEqual(val, x.eval())
def testParallel(self):
# Tests that tf.py_func's can run in parallel if they release the GIL.
with self.cached_session() as session:
q = queue.Queue(1)
def blocking_put():
q.put(42)
q.join() # Wait for task_done().
return 42
def blocking_get():
v = q.get(block=True) # Wait for put().
q.task_done()
return v
x, = script_ops.py_func(blocking_put, [], [dtypes.int64])
y, = script_ops.py_func(blocking_get, [], [dtypes.int64])
# This will result in a deadlock if the py_func's don't run in parallel.
session.run([x, y])
def testNoReturnValueStateful(self):
class State(object):
def __init__(self):
self._value = np.array([1], np.int64)
def _increment(self, diff):
self._value += diff
def increment(self, diff):
return script_ops.py_func(self._increment, [diff], [], stateful=True)
@property
def value(self):
return self._value
with self.cached_session():
s = State()
op = s.increment(constant_op.constant(2, dtypes.int64))
ret = self.evaluate(op)
self.assertIsNone(ret)
self.assertAllEqual([3], s.value)
def testNoReturnValueStateless(self):
def do_nothing(unused_x):
pass
f = script_ops.py_func(
do_nothing, [constant_op.constant(3, dtypes.int64)], [], stateful=False)
with self.cached_session() as sess:
self.assertEqual(sess.run(f), [])
def _testExceptionHandling(self, py_exp, tf_exp, eager=False):
def inner_exception():
raise py_exp("blah") # pylint: disable=not-callable
def raise_exception():
inner_exception()
expected_regexp = r": blah.*" # Error at the top
expected_regexp += r"in raise_exception.*" # Stacktrace outer
expected_regexp += r"in inner_exception.*" # Stacktrace inner
expected_regexp += r": blah" # Stacktrace of raise
def expected_error_check(exception):
return re.search(expected_regexp, str(exception), re.DOTALL)
if eager:
if context.executing_eagerly():
with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
f = script_ops.eager_py_func(raise_exception, [], [])
return
else:
f = script_ops.eager_py_func(raise_exception, [], [])
else:
f = script_ops.py_func(raise_exception, [], [])
with self.assertRaisesWithPredicateMatch(tf_exp, expected_error_check):
self.evaluate(f)
def testExceptionHandling(self):
with self.cached_session():
self._testExceptionHandling(ValueError, errors.InvalidArgumentError)
self._testExceptionHandling(TypeError, errors.InvalidArgumentError)
self._testExceptionHandling(StopIteration, errors.OutOfRangeError)
self._testExceptionHandling(MemoryError, errors.ResourceExhaustedError)
self._testExceptionHandling(NotImplementedError,
errors.UnimplementedError)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError)
# ----- Tests shared by py_func and eager_py_func -----
def testCleanup(self):
# Delete everything created by previous tests to avoid side effects.
ops.reset_default_graph()
gc.collect()
initial_size = script_ops._py_funcs.size()
# Encapsulate the graph generation, so locals can be deleted.
def make_graphs():
for _ in xrange(1000):
g = ops.Graph()
with g.as_default():
c = constant_op.constant([1.], dtypes.float32)
_ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + 1, [c], [dtypes.float32])
# These ops have a reference to 'c' which has a reference to the graph.
# Checks if the functions are being deleted though the graph is referenced from them.
# (see #18292)
_ = script_ops.py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + c.shape[0], [c], [dtypes.float32])
# Call garbage collector to enforce deletion.
make_graphs()
ops.reset_default_graph()
gc.collect()
self.assertEqual(initial_size, script_ops._py_funcs.size())
# ----- Tests for eager_py_func -----
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputInt32(self):
a = array_ops.ones((3, 3), dtype=dtypes.int32)
x = array_ops.ones((3, 1), dtype=dtypes.int32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.int32)
ret = self.evaluate(output)
self.assertAllEqual(ret, [[3], [3], [3]])
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputFloat32(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
ret = self.evaluate(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerArrayOutput(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(
lambda a, x: [matmul(a, x)], inp=[a, x], Tout=[dtypes.float32])
ret = self.evaluate(output)
self.assertAllEqual(ret, [[[3.0], [3.0], [3.0]]])
@test_util.run_in_graph_and_eager_modes
def testEagerReturnNone(self):
with test_util.device(use_gpu=True):
def no_return_value():
return
output = script_ops.eager_py_func(no_return_value, inp=[], Tout=[])
ret = self.evaluate(output)
if context.executing_eagerly():
self.assertEquals(len(ret), 0)
else:
self.assertIsNone(ret)
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncInDefun(self):
with test_util.device(use_gpu=True):
def wrapper():
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
return script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
wrapped = function.defun(wrapper)
ret = self.evaluate(wrapped())
self.assertAllEqual(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerExceptionHandling(self):
with test_util.device(use_gpu=True):
self._testExceptionHandling(
ValueError, errors.InvalidArgumentError, eager=True)
self._testExceptionHandling(
TypeError, errors.InvalidArgumentError, eager=True)
self._testExceptionHandling(
StopIteration, errors.OutOfRangeError, eager=True)
self._testExceptionHandling(
MemoryError, errors.ResourceExhaustedError, eager=True)
self._testExceptionHandling(
NotImplementedError, errors.UnimplementedError, eager=True)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError, eager=True)
@test_util.run_in_graph_and_eager_modes
def testEagerReturningVariableRaisesError(self):
def return_variable():
return resource_variable_ops.ResourceVariable(0.0)
with self.assertRaisesRegexp(errors.UnknownError,
"Attempting to return a variable"):
output = script_ops.eager_py_func(
return_variable, inp=[], Tout=dtypes.float32)
self.evaluate(output)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTape(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(x)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = tape.gradient(y, x)
self.assertEqual(self.evaluate(dy_dx), 6.0)
def testEagerGradientGraph(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
self.assertEqual(self.evaluate(dy_dx), 6.0)
def testEagerGradientGraphTwoOutputs(self):
def f(x, y):
return x * y, x / y
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
fa, fb = script_ops.eager_py_func(f, inp=[x, y],
Tout=[dtypes.float32, dtypes.float32])
dy_dx = gradients_impl.gradients(fa + fb, x)[0]
self.assertEqual(self.evaluate(dy_dx), 2.5)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTapeMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = tape.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
def testEagerGradientGraphMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = gradients_impl.gradients(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
def testEagerGradientGraphLogHuber(self):
def log_huber(x, m):
if math_ops.abs(x) <= m:
return x**2
else:
return m**2 * (1 - 2 * math_ops.log(m) + math_ops.log(x**2))
x = array_ops.placeholder(dtypes.float32)
m = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(
func=log_huber, inp=[x, m], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
with self.cached_session() as sess:
# Takes the first branch of log_huber.
y, dy_dx = sess.run([y, dy_dx], feed_dict={x: 1.0, m: 2.0})
self.assertEqual(y, 1.0)
self.assertEqual(dy_dx, 2.0)
def testEagerRespectsDevicePlacmentOfOp(self):
def f(x):
return math_ops.square(x)
def g(x):
return math_ops.add(x, x)
with ops.device("/CPU:0"):
# Explicitly ask for the py_funcs to execute on CPU, even if
# a GPU is available.
x = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32)
z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32)
with self.session(use_gpu=True) as sess:
output = sess.run(z, feed_dict={x: 3.0})
self.assertEqual(output, 18.0)
if __name__ == "__main__":
test.main()
| alshedivat/tensorflow | tensorflow/python/kernel_tests/py_func_test.py | Python | apache-2.0 | 22,732 | 0.012234 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-01 05:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('booker', '0025_event_wedding_options'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='expected_guests',
),
]
| luckiestlindy/osproject | booker/migrations/0026_remove_event_expected_guests.py | Python | gpl-3.0 | 403 | 0 |
from django.db.models import FieldDoesNotExist, Avg, Max, Min, Count, Sum
from django.utils.translation import ugettext as _
from nadmin.sites import site
from nadmin.views import BaseAdminPlugin, ListAdminView
from nadmin.views.list import ResultRow, ResultItem
from nadmin.util import display_for_field
AGGREGATE_METHODS = {
'min': Min, 'max': Max, 'avg': Avg, 'sum': Sum, 'count': Count
}
AGGREGATE_TITLE = {
'min': _('Min'), 'max': _('Max'), 'avg': _('Avg'), 'sum': _('Sum'), 'count': _('Count')
}
class AggregationPlugin(BaseAdminPlugin):
aggregate_fields = {}
def init_request(self, *args, **kwargs):
return bool(self.aggregate_fields)
def _get_field_aggregate(self, field_name, obj, row):
item = ResultItem(field_name, row)
item.classes = ['aggregate', ]
if field_name not in self.aggregate_fields:
item.text = ""
else:
try:
f = self.opts.get_field(field_name)
agg_method = self.aggregate_fields[field_name]
key = '%s__%s' % (field_name, agg_method)
if key not in obj:
item.text = ""
else:
item.text = display_for_field(obj[key], f)
item.wraps.append('%%s<span class="aggregate_title label label-info">%s</span>' % AGGREGATE_TITLE[agg_method])
item.classes.append(agg_method)
except FieldDoesNotExist:
item.text = ""
return item
def _get_aggregate_row(self):
queryset = self.admin_view.list_queryset._clone()
obj = queryset.aggregate(*[AGGREGATE_METHODS[method](field_name) for field_name, method in
self.aggregate_fields.items() if method in AGGREGATE_METHODS])
row = ResultRow()
row['is_display_first'] = False
row.cells = [self._get_field_aggregate(field_name, obj, row) for field_name in self.admin_view.list_display]
row.css_class = 'info aggregate'
return row
def results(self, rows):
if rows:
rows.append(self._get_aggregate_row())
return rows
# Media
def get_media(self, media):
media.add_css({'screen': [self.static(
'nadmin/css/nadmin.plugin.aggregation.css'), ]})
return media
site.register_plugin(AggregationPlugin, ListAdminView)
| A425/django-nadmin | nadmin/plugins/aggregation.py | Python | mit | 2,408 | 0.002076 |
# lint-amnesty, pylint: disable=missing-module-docstring
from xmodule.contentstore.content import StaticContent
from .django import contentstore
def empty_asset_trashcan(course_locs):
'''
This method will hard delete all assets (optionally within a course_id) from the trashcan
'''
store = contentstore('trashcan')
for course_loc in course_locs:
# first delete all of the thumbnails
thumbs = store.get_all_content_thumbnails_for_course(course_loc)
for thumb in thumbs:
print(f"Deleting {thumb}...")
store.delete(thumb['asset_key'])
# then delete all of the assets
assets, __ = store.get_all_content_for_course(course_loc)
for asset in assets:
print(f"Deleting {asset}...")
store.delete(asset['asset_key'])
def restore_asset_from_trashcan(location):
'''
This method will restore an asset which got soft deleted and put back in the original course
'''
trash = contentstore('trashcan')
store = contentstore()
loc = StaticContent.get_location_from_path(location)
content = trash.find(loc)
# ok, save the content into the courseware
store.save(content)
# see if there is a thumbnail as well, if so move that as well
if content.thumbnail_location is not None:
try:
thumbnail_content = trash.find(content.thumbnail_location)
store.save(thumbnail_content)
except Exception: # lint-amnesty, pylint: disable=broad-except
pass # OK if this is left dangling
| edx/edx-platform | common/lib/xmodule/xmodule/contentstore/utils.py | Python | agpl-3.0 | 1,571 | 0.001273 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Create CSV from AWR text reports
#
# Tested Versions
# 10.2.0.1 RAC
# 10.2.0.4 RAC
#
# Note
# extracts values specified with "===" sequence.
#
######################################################################################################
t = {}
t['Load Profile'] = ('load_profile.csv', 'Name,Per Second,Per Transaction', '=========================== =============== ===============')
t['Buffer_Hit'] = ('buffer_hit.csv', 'Buffer Hit %', ' =======')
t['Events_TopN'] = ('events_topn.csv', 'Event,Waits,Time(s),Avg Wait(ms),% Total Call Time,Wait Class', '============================== ============ =========== ====== ====== ==========')
t['Inst_Stats'] = ('inst_stats.csv', 'Statistic,Total,per Second,per Trans', '================================ ================== ============== =============')
t['PGA_Aggr'] = ('pga_aggr_stats.csv', 'B or E,PGA Aggr Target(M),Auto PGA Target(M),PGA Mem Alloc(M),W/A PGA Used(M),%PGA W/A Mem,%Auto W/A Mem,%Man W/A Mem,Global Mem Bound(K)', '= ========== ========== ========== ========== ====== ====== ====== ==========')
t['GlobalLP'] = ('load_profile_g.csv', 'Name,Per Second,Per Transaction', '============================== =============== ===============')
t['GlobalEP'] = ('efficiency_g.csv', 'Name,Value', '============================== =======')
t['SQL_Elapsed'] = ('sql_elapsed.csv', 'Elapsed Time (s),CPU Time (s),Executions,Elap per Exec (s),%Total,SQL Id,SQL Module', '========== ========== ============ ========== ======= =============')
t['SQL_CPU'] = ('sql_cpu.csv', 'CPU Time (s),Elapsed Time (s),Executions,CPU per Exec (s),%Total,SQL Id,SQL Module', '========== ========== ============ =========== ======= =============')
t['SQL_Gets'] = ('sql_gets.csv', 'Buffer Gets,Executions,Gets per Exec,%Total,CPU Time (s),Elapsed Time (s),SQL Id,SQL Module', '============== ============ ============ ====== ======== ========= =============')
t['SQL_Reads'] = ('sql_reads.csv', 'Physical Reads,Executions,Reads per Exec,%Total,CPU Time (s),Elapsed Time (s),SQL Id,SQL Module', '============== =========== ============= ====== ======== ========= =============')
t['SQL_Cluster'] = ('sql_cluster.csv', 'Cluster Wait Time (s),CWT % of Elapsed Time,Elapsed Time (s),CPU Time (s),Executions,SQL Id,SQL Module', '============ =========== =========== =========== ============== =============')
#####################################################################################################
import codecs
import glob
import os
import re
import sys
from datetime import datetime
##### extract
##### ===== ====== ======
##### aaaaa 123 12.3 4,567 -> ['aaaaa', '12.3', '4567']
def line2list(line, mask):
ret = []
re_eq = re.compile(r'=+')
for x in re_eq.finditer(mask):
(b, e) = x.span()
text = line[b:e].strip().replace(',', '')
text = re.sub(r'\s+', ' ', text)
ret.append(text)
return ret
##### parse files
def parse(filelist):
##### common header
h_base = 'DB_NAME,DB_ID,INSTANCE_NAME,INST_NUM,B_Y,B_MO,B_D,B_H,B_MI,B_S,E_Y,E_MO,E_D,E_H,E_MI,E_S,'
##### DB name, Snaptime, SQL Module extract helper
m_dbname = '============ =========== ============ ======== ==========='
m_snaptm = ' ==================='
m_module = ' ========================================================================'
##### output
output = {}
for section in t:
(csvname, header, mask) = t[section]
output[csvname] = [h_base + header]
##### iterate over files
for filename in filelist:
print('Processing {0}...'.format(filename))
db_ver = '' # DB Versoin
section = '' # section Name
l_base = [] # report-specific info (list)
d_base = '' # report-specific info (string)
b_data = False # begin data
l_data = [] # section-specific data (list)
##### iterate over lines
for line in open(filename, 'r'):
if section in t:
(csvname, header, mask) = t[section]
##### DB Name
# ============ =========== ============ ======== ===========
# DB Name DB Id Instance Inst Num Release RAC Host
# ------------ ----------- ------------ -------- ----------- --- ------------
# DB0 9901230123 DB01 1 10.2.0.1.0 YES host1
#
if line.startswith('DB Name'):
section = 'DB Name'
elif section == 'DB Name':
if not line.startswith('---'):
l_line = line2list(line, m_dbname)
l_base = l_line[:4]
db_ver = l_line[4]
print(' DB Version: ' + db_ver)
section = ''
##### Snap Time
# ===================
# Snap Id Snap Time Sessions Curs/Sess
# --------- ------------------- -------- ---------
# Begin Snap: 3726 16-2月 -13 05:00:50 640 .1
# End Snap: 3727 16-2月 -13 06:00:16 672 .2
# Elapsed: 59.43 (mins)
# DB Time: 25.21 (mins)
#
elif line.startswith('Begin Snap:') or line.startswith(' End Snap:'):
dt = datetime.strptime(line2list(line, m_snaptm)[0], '%d-%b-%y %H:%M:%S')
l_base.extend(str(x) for x in (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second))
d_base = ','.join(l_base) + ','
##### Load Profile
#
# =========================== =============== ===============
# Load Profile
# ~~~~~~~~~~~~ Per Second Per Transaction
# --------------- ---------------
# Redo size: 68,225.00 12,794.53
# Logical reads: 19,994.77 3,749.71
# Block changes: 222.80 41.78
# Physical reads: 11.35 2.13
#
# <EOS>
#
#
elif line.startswith('Load Profile'):
section = 'Load Profile'
elif section == 'Load Profile':
##### blank line => section end
if len(line.strip()) == 0:
section = ''
b_data = False
l_data = []
##### begin data
elif line.startswith(' ---------------'):
b_data = True
##### extract data
elif b_data:
l_data = line2list(line, mask)
output[csvname].append(d_base + ','.join(l_data))
##### Instance Efficiency Percentages
#
# =======
# Instance Efficiency Percentages (Target 100%)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Buffer Nowait %: 100.00 Redo NoWait %: 100.00
# Buffer Hit %: 48.43 In-memory Sort %: 100.00
# Library Hit %: 97.00 Soft Parse %: 94.66
# Execute to Parse %: 78.61 Latch Hit %: 99.99
# Parse CPU to Parse Elapsd %: 26.97 % Non-Parse CPU: 97.16
#
elif line.startswith(' Buffer Hit %'):
section = 'Buffer_Hit'
elif section in ['Buffer_Hit']:
l_data = line2list(line, mask)
output[csvname].append(d_base + ','.join(l_data))
section = ''
##### Top N Events
#
# ============================== ============ =========== ====== ====== ==========
# Top 5 Timed Events Avg %Total
# ~~~~~~~~~~~~~~~~~~ wait Call
# Event Waits Time (s) (ms) Time Wait Class
# ------------------------------ ------------ ----------- ------ ------ ----------
# CPU time 1,405 92.9
# db file sequential read 40,533 181 4 12.0 User I/O
# log file sync 36,722 56 2 3.7 Commit
# log file parallel write 36,838 50 1 3.3 System I/O
# SQL*Net more data from client 294,888 21 0 1.4 Network
# -------------------------------------------------------------
# <^L>
# <EOS>
#
elif line.startswith('Top 5 Timed Events'):
section = 'Events_TopN'
elif section == 'Events_TopN':
##### " " => section end
if line.startswith(' '):
section = ''
b_data = False
l_data = []
##### begin data
elif line.startswith('---'):
b_data = True
##### extract data
elif b_data:
l_data = line2list(line, mask)
output[csvname].append(d_base + ','.join(l_data))
##### SQL ordered by x
# ========== ========== ============ ========== ======= =============
# ========================================================================
# <^L>SQL ordered by Elapsed Time DB/Inst: DB/db1 Snaps: 914-938
# -> Resources reported for PL/SQL code includes the resources used by all SQL
# statements called by the code.
# -> :
#
# Elapsed CPU Elap per % Total
# Time (s) Time (s) Executions Exec (s) DB Time SQL Id
# ---------- ---------- ------------ ---------- ------- -------------
# 1,001 989 9,238 0.1 66.2 0gu4sdfsdfsz4
# Module: xxxxxxx
# SQL...
#
# -------------------------------------------------------------
# <EOS>
#
# ========== ========== ============ =========== ======= =============
# <^L>SQL ordered by CPU Time DB/Inst: DB/db1 Snaps: 914-938
# :
# CPU Elapsed CPU per % Total
# Time (s) Time (s) Executions Exec (s) DB Time SQL Id
# ---------- ---------- ------------ ----------- ------- -------------
# 989 1,001 9,238 0.11 66.2 0gu4sdfsdfsz4
# -------------------------------------------------------------
# <EOS>
#
# ============== ============ ============ ====== ======== ========= =============
# <^L>SQL ordered by Gets DB/Inst: DB/db1 Snaps: 936-937
# :
# Gets CPU Elapsed
# Buffer Gets Executions per Exec %Total Time (s) Time (s) SQL Id
# -------------- ------------ ------------ ------ -------- --------- -------------
# 64,090,911 9,238 6,937.7 89.9 989.48 1000.92 0gu4sdfsdfsz4
# -------------------------------------------------------------
# <EOS>
#
# ============== =========== ============= ====== ======== ========= =============
# <^L>SQL ordered by Reads DB/Inst: DB/db1 Snaps: 914-938
# :
# Reads CPU Elapsed
# Physical Reads Executions per Exec %Total Time (s) Time (s) SQL Id
# -------------- ----------- ------------- ------ -------- --------- -------------
# 9,501 18,521 0.5 23.5 27.68 51.66 0gu4sdfsdfsz4
# -------------------------------------------------------------
# <EOS>
#
# ============ =========== =========== =========== ============== =============
# <^L>SQL ordered by Cluster Wait Time DB/Inst: DB1/db1 Snaps: 726-727
#
# Cluster CWT % of Elapsed CPU
# Wait Time (s) Elapsd Tim Time(s) Time(s) Executions SQL Id
# ------------- ---------- ----------- ----------- -------------- -------------
# -------------------------------------------------------------
# <EOS>
#
elif line.startswith(chr(12) + 'SQL ordered by Elapsed Time'):
section = 'SQL_Elapsed'
elif line.startswith(chr(12) + 'SQL ordered by CPU Time'):
section = 'SQL_CPU'
elif line.startswith(chr(12) + 'SQL ordered by Gets'):
section = 'SQL_Gets'
elif line.startswith(chr(12) + 'SQL ordered by Reads'):
section = 'SQL_Reads'
elif line.startswith(chr(12) + 'SQL ordered by Cluster Wait Time'):
section = 'SQL_Cluster'
elif section.startswith('SQL_'):
##### something like " -----" => section end
if re.match(r'^ +\-+', line):
##### if missing a "Module:", extract now
if len(l_data) > 0:
output[csvname].append(d_base + ','.join(l_data + ['']))
section = ''
l_data = []
b_data = False
##### "Module:" => SQL end
elif line.startswith('Module:'):
l_data.append(line2list(line, m_module)[0])
output[csvname].append(d_base + ','.join(l_data))
##### 2 numbers with spaces in between => data
elif re.match(r'^ +[\d\.\,]+ +[\d\.\,]+', line):
##### if missing a "Module:", extract now
if len(l_data) > 0:
output[csvname].append(d_base + ','.join(l_data + ['']))
##### extract
l_data = line2list(line, mask)
##### Others (no random section headers within data)
# ================================ ================== ============== =============
# <^LInstance Activity Stats DB/Inst: DB0/DB01 Snaps: 3726-3727
#
# Statistic Total per Second per Trans
# -------------------------------- ------------------ -------------- -------------
# CPU used by this session 130,788 36.7 6.9
# CPU used when call started 128,989 36.2 6.8
# CR blocks created 8,951 2.5 0.5
# Cached Commit SCN referenced 18,654 5.2 1.0
# Commit SCN cached 2 0.0 0.0 :
# -------------------------------------------------------------
# <EOS>
#
# = ========== ========== ========== ========== ====== ====== ====== ==========
# PGA Aggr Target Stats DB/Inst: DB/db1 Snaps: 936-937
# -> B: Begin Snap E: End Snap (rows dentified with B or E contain data
# which is absolute i.e. not diffed over the interval)
# -> :
#
# %PGA %Auto %Man
# PGA Aggr Auto PGA PGA Mem W/A PGA W/A W/A W/A Global Mem
# Target(M) Target(M) Alloc(M) Used(M) Mem Mem Mem Bound(K)
# - ---------- ---------- ---------- ---------- ------ ------ ------ ----------
# B 4,096 3,536 360.8 0.0 .0 .0 .0 419,430
# E 4,096 3,530 358.4 0.0 .0 .0 .0 419,430
# --------------------------------------------------------------
# <EOS>
#
elif line.startswith(chr(12) + 'Instance Activity Stats '):
section = 'Inst_Stats'
b_data = False
l_data = []
elif line.startswith('PGA Aggr Target Stats'):
section = 'PGA_Aggr'
elif section in ['Inst_Stats', 'PGA_Aggr']:
##### something like " -----" => section end
if re.match(r'^ +\-+', line):
section = ''
b_data = False
l_data = []
##### begin data
elif line.startswith('---') or line.startswith('- -'):
b_data = True
##### extract data
elif b_data:
l_data = line2list(line, mask)
output[csvname].append(d_base + ','.join(l_data))
##### Global Cache
# ============================== =============== ===============
# Global Cache Load Profile
# ~~~~~~~~~~~~~~~~~~~~~~~~~ Per Second Per Transaction
# --------------- ---------------
# Global Cache blocks received: 0.34 0.06
# Global Cache blocks served: 2.37 0.44
# GCS/GES messages received: 14.01 2.63
# GCS/GES messages sent: 20.06 3.76
# DBWR Fusion writes: 0.06 0.01
# Estd Interconnect traffic (KB) 28.34
#
# <EOS>
#
# ============================== =======
# Global Cache Efficiency Percentages (Target local+remote 100%)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Buffer access - local cache %: 99.94
# Buffer access - remote cache %: 0.00
# Buffer access - disk %: 0.06
#
# <EOS>
#
elif line.startswith('Global Cache Load Profile'):
section = 'GlobalLP'
elif line.startswith('Global Cache Efficiency Percentages'):
section = 'GlobalEP'
elif section in ['GlobalLP', 'GlobalEP']:
##### blank line => section end
if len(line.strip()) == 0:
section = ''
b_data = False
l_data = []
##### begin data
elif line.strip()[:3] in ['~~~', '---']:
b_data = True
##### extract data
elif b_data:
l_data = line2list(line, mask)
output[csvname].append(d_base + ','.join(l_data))
##### return output
return output
if __name__ == '__main__':
##### sys.argv[] filelist does not work in Windows, use glob
filelist = sys.argv[1:]
if filelist[0].find('*') >= 0:
filelist = glob.glob(filelist[0])
##### parse & write to files
output = parse(filelist)
for csvname in output:
print(' Created: ' + csvname)
f = codecs.open(csvname, 'w', encoding='utf-8')
for line in output[csvname]:
try:
f.write(line + '\n')
except UnicodeDecodeError, e:
print("Skipped:" + line)
f.close()
| yasushiyy/awr2csv | awrtext2csv102.py | Python | mit | 21,131 | 0.007668 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page, Site
from wagtail.wagtailredirects import models
@override_settings(ALLOWED_HOSTS=['testserver', 'localhost', 'test.example.com', 'other.example.com'])
class TestRedirects(TestCase):
fixtures = ['test.json']
def test_path_normalisation(self):
# Shortcut to normalise function (to keep things tidy)
normalise_path = models.Redirect.normalise_path
# Create a path
path = normalise_path('/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2')
# Test against equivalant paths
self.assertEqual(path, normalise_path( # The exact same URL
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Scheme, hostname and port ignored
'http://mywebsite.com:8000/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Leading slash can be omitted
'Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Trailing slashes are ignored
'Hello/world.html/;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Fragments are ignored
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2#cool'
))
self.assertEqual(path, normalise_path( # Order of query string parameters is ignored
'/Hello/world.html;fizz=three;buzz=five?Baz=quux2&foo=Bar'
))
self.assertEqual(path, normalise_path( # Order of parameters is ignored
'/Hello/world.html;buzz=five;fizz=three?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Leading whitespace
' /Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual(path, normalise_path( # Trailing whitespace
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2 '
))
# Test against different paths
self.assertNotEqual(path, normalise_path( # 'hello' is lowercase
'/hello/world.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # No '.html'
'/Hello/world;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Query string parameter value has wrong case
'/Hello/world.html;fizz=three;buzz=five?foo=bar&Baz=Quux2'
))
self.assertNotEqual(path, normalise_path( # Query string parameter name has wrong case
'/Hello/world.html;fizz=three;buzz=five?foo=Bar&baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Parameter value has wrong case
'/Hello/world.html;fizz=three;buzz=Five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Parameter name has wrong case
'/Hello/world.html;Fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # Missing params
'/Hello/world.html?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # 'WORLD' is uppercase
'/Hello/WORLD.html;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertNotEqual(path, normalise_path( # '.htm' is not the same as '.html'
'/Hello/world.htm;fizz=three;buzz=five?foo=Bar&Baz=quux2'
))
self.assertEqual('/', normalise_path('/')) # '/' should stay '/'
# Normalise some rubbish to make sure it doesn't crash
normalise_path('This is not a URL')
normalise_path('//////hello/world')
normalise_path('!#@%$*')
normalise_path('C:\\Program Files (x86)\\Some random program\\file.txt')
def test_unicode_path_normalisation(self):
normalise_path = models.Redirect.normalise_path
self.assertEqual(
'/here/tésting-ünicode', # stays the same
normalise_path('/here/tésting-ünicode')
)
self.assertNotEqual( # Doesn't remove unicode characters
'/here/testing-unicode',
normalise_path('/here/tésting-ünicode')
)
def test_basic_redirect(self):
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto')
redirect.save()
# Navigate to it
response = self.client.get('/redirectme/')
# Check that we were redirected
self.assertRedirects(response, '/redirectto', status_code=301, fetch_redirect_response=False)
def test_temporary_redirect(self):
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto', is_permanent=False)
redirect.save()
# Navigate to it
response = self.client.get('/redirectme/')
# Check that we were redirected temporarily
self.assertRedirects(response, '/redirectto', status_code=302, fetch_redirect_response=False)
def test_redirect_stripping_query_string(self):
# Create a redirect which includes a query string
redirect_with_query_string = models.Redirect(
old_path='/redirectme?foo=Bar', redirect_link='/with-query-string-only'
)
redirect_with_query_string.save()
# ... and another redirect without the query string
redirect_without_query_string = models.Redirect(old_path='/redirectme', redirect_link='/without-query-string')
redirect_without_query_string.save()
# Navigate to the redirect with the query string
r_matching_qs = self.client.get('/redirectme/?foo=Bar')
self.assertRedirects(r_matching_qs, '/with-query-string-only', status_code=301, fetch_redirect_response=False)
# Navigate to the redirect with a different query string
# This should strip out the query string and match redirect_without_query_string
r_no_qs = self.client.get('/redirectme/?utm_source=irrelevant')
self.assertRedirects(r_no_qs, '/without-query-string', status_code=301, fetch_redirect_response=False)
def test_redirect_to_page(self):
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
models.Redirect.objects.create(old_path='/xmas', redirect_page=christmas_page)
response = self.client.get('/xmas/', HTTP_HOST='test.example.com')
self.assertRedirects(response, 'http://test.example.com/events/christmas/', status_code=301, fetch_redirect_response=False)
def test_redirect_from_any_site(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
models.Redirect.objects.create(old_path='/xmas', redirect_page=christmas_page)
# no site was specified on the redirect, so it should redirect regardless of hostname
response = self.client.get('/xmas/', HTTP_HOST='localhost')
self.assertRedirects(response, 'http://localhost/events/christmas/', status_code=301, fetch_redirect_response=False)
response = self.client.get('/xmas/', HTTP_HOST='other.example.com')
self.assertRedirects(response, 'http://localhost/events/christmas/', status_code=301, fetch_redirect_response=False)
def test_redirect_from_specific_site(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
other_site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
models.Redirect.objects.create(old_path='/xmas', redirect_page=christmas_page, site=other_site)
# redirect should only respond when site is other_site
response = self.client.get('/xmas/', HTTP_HOST='other.example.com')
self.assertRedirects(response, 'http://localhost/events/christmas/', status_code=301, fetch_redirect_response=False)
response = self.client.get('/xmas/', HTTP_HOST='localhost')
self.assertEqual(response.status_code, 404)
def test_duplicate_redirects_when_match_is_for_generic(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
# two redirects, one for any site, one for specific
models.Redirect.objects.create(old_path='/xmas', redirect_link='/generic')
models.Redirect.objects.create(site=site, old_path='/xmas', redirect_link='/site-specific')
response = self.client.get('/xmas/')
# the redirect which matched was /generic
self.assertRedirects(response, '/generic', status_code=301, fetch_redirect_response=False)
def test_duplicate_redirects_with_query_string_when_match_is_for_generic(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
# two redirects, one for any site, one for specific, both with query string
models.Redirect.objects.create(old_path='/xmas?foo=Bar', redirect_link='/generic-with-query-string')
models.Redirect.objects.create(site=site, old_path='/xmas?foo=Bar', redirect_link='/site-specific-with-query-string')
# and two redirects, one for any site, one for specific, without query strings
models.Redirect.objects.create(old_path='/xmas', redirect_link='/generic')
models.Redirect.objects.create(site=site, old_path='/xmas', redirect_link='/site-specific')
response = self.client.get('/xmas/?foo=Bar')
# the redirect which matched was /generic-with-query-string
self.assertRedirects(response, '/generic-with-query-string', status_code=301, fetch_redirect_response=False)
# now use a non-matching query string
response = self.client.get('/xmas/?foo=Baz')
# the redirect which matched was /generic
self.assertRedirects(response, '/generic', status_code=301, fetch_redirect_response=False)
def test_duplicate_redirects_when_match_is_for_specific(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
# two redirects, one for any site, one for specific
models.Redirect.objects.create(old_path='/xmas', redirect_link='/generic')
models.Redirect.objects.create(site=site, old_path='/xmas', redirect_link='/site-specific')
response = self.client.get('/xmas/', HTTP_HOST='other.example.com')
# the redirect which matched was /site-specific
self.assertRedirects(response, 'http://other.example.com/site-specific', status_code=301, fetch_redirect_response=False)
def test_duplicate_redirects_with_query_string_when_match_is_for_specific_with_qs(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
# two redirects, one for any site, one for specific, both with query string
models.Redirect.objects.create(old_path='/xmas?foo=Bar', redirect_link='/generic-with-query-string')
models.Redirect.objects.create(site=site, old_path='/xmas?foo=Bar', redirect_link='/site-specific-with-query-string')
# and two redirects, one for any site, one for specific, without query strings
models.Redirect.objects.create(old_path='/xmas', redirect_link='/generic')
models.Redirect.objects.create(site=site, old_path='/xmas', redirect_link='/site-specific')
response = self.client.get('/xmas/?foo=Bar', HTTP_HOST='other.example.com')
# the redirect which matched was /site-specific-with-query-string
self.assertRedirects(response, 'http://other.example.com/site-specific-with-query-string', status_code=301, fetch_redirect_response=False)
# now use a non-matching query string
response = self.client.get('/xmas/?foo=Baz', HTTP_HOST='other.example.com')
# the redirect which matched was /site-specific
self.assertRedirects(response, 'http://other.example.com/site-specific', status_code=301, fetch_redirect_response=False)
def test_duplicate_page_redirects_when_match_is_for_specific(self):
contact_page = Page.objects.get(url_path='/home/contact-us/')
site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# two redirects, one for any site, one for specific
models.Redirect.objects.create(old_path='/xmas', redirect_page=contact_page)
models.Redirect.objects.create(site=site, old_path='/xmas', redirect_page=christmas_page)
# request for specific site gets the christmas_page redirect, not accessible from other.example.com
response = self.client.get('/xmas/', HTTP_HOST='other.example.com')
self.assertRedirects(response, 'http://localhost/events/christmas/', status_code=301, fetch_redirect_response=False)
def test_redirect_with_unicode_in_url(self):
redirect = models.Redirect(old_path='/tésting-ünicode', redirect_link='/redirectto')
redirect.save()
# Navigate to it
response = self.client.get('/tésting-ünicode/')
self.assertRedirects(response, '/redirectto', status_code=301, fetch_redirect_response=False)
def test_redirect_with_encoded_url(self):
redirect = models.Redirect(old_path='/t%C3%A9sting-%C3%BCnicode', redirect_link='/redirectto')
redirect.save()
# Navigate to it
response = self.client.get('/t%C3%A9sting-%C3%BCnicode/')
self.assertRedirects(response, '/redirectto', status_code=301, fetch_redirect_response=False)
class TestRedirectsIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailredirects:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_listing_order(self):
for i in range(0, 10):
models.Redirect.objects.create(old_path="/redirect%d" % i, redirect_link="http://torchbox.com/")
models.Redirect.objects.create(old_path="/aaargh", redirect_link="http://torchbox.com/")
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['redirects'][0].old_path, "/aaargh")
class TestRedirectsAddView(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailredirects:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailredirects:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/add.html')
def test_add(self):
response = self.post({
'old_path': '/test',
'site': '',
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects:index'))
# Check that the redirect was created
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 1)
self.assertEqual(redirects.first().redirect_link, 'http://www.test.com/')
self.assertEqual(redirects.first().site, None)
def test_add_with_site(self):
localhost = Site.objects.get(hostname='localhost')
response = self.post({
'old_path': '/test',
'site': localhost.id,
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects:index'))
# Check that the redirect was created
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 1)
self.assertEqual(redirects.first().redirect_link, 'http://www.test.com/')
self.assertEqual(redirects.first().site, localhost)
def test_add_validation_error(self):
response = self.post({
'old_path': '',
'site': '',
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_cannot_add_duplicate_with_no_site(self):
models.Redirect.objects.create(old_path='/test', site=None, redirect_link='http://elsewhere.com/')
response = self.post({
'old_path': '/test',
'site': '',
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_cannot_add_duplicate_on_same_site(self):
localhost = Site.objects.get(hostname='localhost')
models.Redirect.objects.create(old_path='/test', site=localhost, redirect_link='http://elsewhere.com/')
response = self.post({
'old_path': '/test',
'site': localhost.pk,
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_can_reuse_path_on_other_site(self):
localhost = Site.objects.get(hostname='localhost')
contact_page = Page.objects.get(url_path='/home/contact-us/')
other_site = Site.objects.create(hostname='other.example.com', port=80, root_page=contact_page)
models.Redirect.objects.create(old_path='/test', site=localhost, redirect_link='http://elsewhere.com/')
response = self.post({
'old_path': '/test',
'site': other_site.pk,
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects:index'))
# Check that the redirect was created
redirects = models.Redirect.objects.filter(redirect_link='http://www.test.com/')
self.assertEqual(redirects.count(), 1)
class TestRedirectsEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a redirect to edit
self.redirect = models.Redirect(old_path='/test', redirect_link='http://www.test.com/')
self.redirect.save()
# Login
self.login()
def get(self, params={}, redirect_id=None):
return self.client.get(reverse('wagtailredirects:edit', args=(redirect_id or self.redirect.id, )), params)
def post(self, post_data={}, redirect_id=None):
return self.client.post(reverse('wagtailredirects:edit', args=(redirect_id or self.redirect.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(redirect_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'old_path': '/test',
'is_permanent': 'on',
'site': '',
'redirect_link': 'http://www.test.com/ive-been-edited',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects:index'))
# Check that the redirect was edited
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 1)
self.assertEqual(redirects.first().redirect_link, 'http://www.test.com/ive-been-edited')
self.assertEqual(redirects.first().site, None)
def test_edit_with_site(self):
localhost = Site.objects.get(hostname='localhost')
response = self.post({
'old_path': '/test',
'is_permanent': 'on',
'site': localhost.id,
'redirect_link': 'http://www.test.com/ive-been-edited',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects:index'))
# Check that the redirect was edited
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 1)
self.assertEqual(redirects.first().redirect_link, 'http://www.test.com/ive-been-edited')
self.assertEqual(redirects.first().site, localhost)
def test_edit_validation_error(self):
response = self.post({
'old_path': '',
'is_permanent': 'on',
'site': '',
'redirect_link': 'http://www.test.com/ive-been-edited',
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_edit_duplicate(self):
models.Redirect.objects.create(old_path='/othertest', site=None, redirect_link='http://elsewhere.com/')
response = self.post({
'old_path': '/othertest',
'is_permanent': 'on',
'site': '',
'redirect_link': 'http://www.test.com/ive-been-edited',
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestRedirectsDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a redirect to edit
self.redirect = models.Redirect(old_path='/test', redirect_link='http://www.test.com/')
self.redirect.save()
# Login
self.login()
def get(self, params={}, redirect_id=None):
return self.client.get(reverse('wagtailredirects:delete', args=(redirect_id or self.redirect.id, )), params)
def post(self, redirect_id=None):
return self.client.post(reverse(
'wagtailredirects:delete', args=(redirect_id or self.redirect.id, )
))
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/confirm_delete.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(redirect_id=100000).status_code, 404)
def test_delete(self):
response = self.post()
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects:index'))
# Check that the redirect was deleted
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 0)
| iansprice/wagtail | wagtail/wagtailredirects/tests.py | Python | bsd-3-clause | 23,927 | 0.003679 |
# rkivas file backupper
# Copyright (C) 2016 Daniel Getz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import logging.config
import sys
from configparser import RawConfigParser
from io import StringIO
DEFAULT_CONFIG_FILE = '/etc/rkivas.conf'
DEFAULTS = """
[sources]
[backup]
filename-format = {source}/{timestamp:%Y-%m}/{source}-{timestamp:%Y%m%d_%H%M%S}-{hash}
hash-algorithm = md5
hash-length = 8
[backup-no-timestamp]
filename-format = {source}/unknown/{source}-{hash}
hash-algorithm = md5
hash-length = 16
[extension-map]
jpeg = jpg
tiff = tif
[extension-handlers]
jpg = exif
tif = exif
"""
class ConfigParser(RawConfigParser):
def optionxform(self, optionstr):
return optionstr
def load_config_files(opts):
cfg = ConfigParser()
cfg.read_file(StringIO(DEFAULTS))
cfg.read(opts.config_file)
return cfg
def add_default_opts(parser):
parser.add_argument(
'--config-file', default=DEFAULT_CONFIG_FILE,
help='load a particular configuration file',
metavar='FILE')
parser.add_argument(
'-L', '--logging',
choices=['DEBUG', 'WARN', 'WARNING', 'INFO', 'ERROR',
'CRITICAL', 'FATAL'],
help='log to stderr with the given LEVEL', metavar='LEVEL')
parser.add_argument(
'--debug-config', action='store_true',
help='instead of running, output the combined configuration')
parser.add_argument(
'--dry-run', action='store_true',
help="don't affect filesystem, just log what would have been done")
def config_logging(opts, cfg):
if opts.logging:
level = getattr(logging, opts.logging)
logging.basicConfig(
level=level,
format='%(asctime)s %(levelname)s %(name)s - %(message)s',
)
elif (cfg.has_section('formatters') or
cfg.has_section('handlers') or
cfg.has_section('loggers') or
cfg.has_section('logger_root')):
tmp = StringIO()
cfg.write(tmp)
tmp.seek(0)
logging.config.fileConfig(tmp, disable_existing_loggers=False)
else:
logging.basicConfig(
level=logging.WARNING,
format='%(levelname)s %(name)s - %(message)s',
)
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(True)
def load_opts_into_cfg(opts, cfg, which):
for section, options in which.items():
for cfg_key, opt_key in options.items():
value = getattr(opts, opt_key, None)
if value is not None:
cfg.set(section, cfg_key, str(value))
def load_common_config(opts):
cfg = load_config_files(opts)
if not opts.debug_config:
config_logging(opts, cfg)
load_opts_into_cfg(opts, cfg, {
'backup': {
'dry-run': 'dry_run',
}
})
return cfg
def load_config(opts, opts_spec=None):
cfg = load_common_config(opts)
if opts_spec:
load_opts_into_cfg(opts, cfg, opts_spec)
if opts.debug_config:
cfg.write(sys.stdout)
sys.exit(0)
return cfg
| talflon/rkivas-python | rkivas/config.py | Python | gpl-3.0 | 3,679 | 0.000272 |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib import constants
from oslo_utils import uuidutils
from neutron.agent.linux import tc_lib
from neutron.common import utils
from neutron.services.qos import qos_consts
from neutron.tests.common.agents import l2_extensions
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
from neutron.tests.fullstack import utils as fullstack_utils
from neutron.tests.unit import testlib_api
from neutron.conf.plugins.ml2.drivers import linuxbridge as \
linuxbridge_agent_config
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as linuxbridge_agent
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import \
mech_openvswitch as mech_ovs
load_tests = testlib_api.module_load_tests
BANDWIDTH_BURST = 100
BANDWIDTH_LIMIT = 500
DSCP_MARK = 16
class BaseQoSRuleTestCase(object):
of_interface = None
ovsdb_interface = None
def setUp(self):
host_desc = [environment.HostDescription(
l3_agent=False,
of_interface=self.of_interface,
ovsdb_interface=self.ovsdb_interface,
l2_agent_type=self.l2_agent_type)]
env_desc = environment.EnvironmentDescription(qos=True)
env = environment.Environment(env_desc, host_desc)
super(BaseQoSRuleTestCase, self).setUp(env)
self.tenant_id = uuidutils.generate_uuid()
self.network = self.safe_client.create_network(self.tenant_id,
'network-test')
self.subnet = self.safe_client.create_subnet(
self.tenant_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=False)
def _create_qos_policy(self):
return self.safe_client.create_qos_policy(
self.tenant_id, 'fs_policy', 'Fullstack testing policy',
shared='False')
def _prepare_vm_with_qos_policy(self, rule_add_functions):
qos_policy = self._create_qos_policy()
qos_policy_id = qos_policy['id']
port = self.safe_client.create_port(
self.tenant_id, self.network['id'],
self.environment.hosts[0].hostname,
qos_policy_id)
for rule_add in rule_add_functions:
rule_add(qos_policy)
vm = self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[0],
self.network['id'],
self.tenant_id,
self.safe_client,
neutron_port=port))
return vm, qos_policy
class _TestBwLimitQoS(BaseQoSRuleTestCase):
def _wait_for_bw_rule_removed(self, vm):
# No values are provided when port doesn't have qos policy
self._wait_for_bw_rule_applied(vm, None, None)
def _add_bw_limit_rule(self, limit, burst, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, limit, burst)
# Make it consistent with GET reply
rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
rule['qos_policy_id'] = qos_policy_id
qos_policy['rules'].append(rule)
def test_bw_limit_qos_policy_rule_lifecycle(self):
new_limit = BANDWIDTH_LIMIT + 100
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_bw_limit_rule,
BANDWIDTH_LIMIT, BANDWIDTH_BURST)])
bw_rule = qos_policy['rules'][0]
self._wait_for_bw_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
qos_policy_id = qos_policy['id']
self.client.delete_bandwidth_limit_rule(bw_rule['id'], qos_policy_id)
self._wait_for_bw_rule_removed(vm)
# Create new rule with no given burst value, in such case ovs and lb
# agent should apply burst value as
# bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
new_expected_burst = int(
new_limit * qos_consts.DEFAULT_BURST_RATE
)
new_rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, new_limit)
self._wait_for_bw_rule_applied(vm, new_limit, new_expected_burst)
# Update qos policy rule id
self.client.update_bandwidth_limit_rule(
new_rule['id'], qos_policy_id,
body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT,
'max_burst_kbps': BANDWIDTH_BURST}})
self._wait_for_bw_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
self._wait_for_bw_rule_removed(vm)
class TestBwLimitQoSOvs(_TestBwLimitQoS, base.BaseFullStackTestCase):
l2_agent_type = constants.AGENT_TYPE_OVS
scenarios = fullstack_utils.get_ovs_interface_scenarios()
def _wait_for_bw_rule_applied(self, vm, limit, burst):
utils.wait_until_true(
lambda: vm.bridge.get_egress_bw_limit_for_port(
vm.port.name) == (limit, burst))
class TestBwLimitQoSLinuxbridge(_TestBwLimitQoS, base.BaseFullStackTestCase):
l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE
def _wait_for_bw_rule_applied(self, vm, limit, burst):
port_name = linuxbridge_agent.LinuxBridgeManager.get_tap_device_name(
vm.neutron_port['id'])
tc = tc_lib.TcCommand(
port_name,
linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE,
namespace=vm.host.host_namespace
)
utils.wait_until_true(
lambda: tc.get_filters_bw_limits() == (limit, burst))
class TestDscpMarkingQoSOvs(BaseQoSRuleTestCase, base.BaseFullStackTestCase):
scenarios = fullstack_utils.get_ovs_interface_scenarios()
l2_agent_type = constants.AGENT_TYPE_OVS
def setUp(self):
host_desc = [
environment.HostDescription(
l3_agent=False,
of_interface=self.of_interface,
ovsdb_interface=self.ovsdb_interface,
l2_agent_type=self.l2_agent_type
) for _ in range(2)]
env_desc = environment.EnvironmentDescription(
qos=True)
env = environment.Environment(env_desc, host_desc)
super(BaseQoSRuleTestCase, self).setUp(env)
self.tenant_id = uuidutils.generate_uuid()
self.network = self.safe_client.create_network(self.tenant_id,
'network-test')
self.subnet = self.safe_client.create_subnet(
self.tenant_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=False)
def _wait_for_dscp_marking_rule_applied(self, vm, dscp_mark):
l2_extensions.wait_until_dscp_marking_rule_applied(
vm.bridge, vm.port.name, dscp_mark)
def _wait_for_dscp_marking_rule_removed(self, vm):
self._wait_for_dscp_marking_rule_applied(vm, None)
def _add_dscp_rule(self, dscp_mark, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_dscp_marking_rule(
self.tenant_id, qos_policy_id, dscp_mark)
# Make it consistent with GET reply
rule['type'] = qos_consts.RULE_TYPE_DSCP_MARKING
rule['qos_policy_id'] = qos_policy_id
qos_policy['rules'].append(rule)
def test_dscp_qos_policy_rule_lifecycle(self):
new_dscp_mark = DSCP_MARK + 8
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_dscp_rule, DSCP_MARK)])
dscp_rule = qos_policy['rules'][0]
self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK)
qos_policy_id = qos_policy['id']
self.client.delete_dscp_marking_rule(dscp_rule['id'], qos_policy_id)
self._wait_for_dscp_marking_rule_removed(vm)
# Create new rule
new_rule = self.safe_client.create_dscp_marking_rule(
self.tenant_id, qos_policy_id, new_dscp_mark)
self._wait_for_dscp_marking_rule_applied(vm, new_dscp_mark)
# Update qos policy rule id
self.client.update_dscp_marking_rule(
new_rule['id'], qos_policy_id,
body={'dscp_marking_rule': {'dscp_mark': DSCP_MARK}})
self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
self._wait_for_dscp_marking_rule_removed(vm)
def test_dscp_marking_packets(self):
# Create port (vm) which will be used to received and test packets
receiver_port = self.safe_client.create_port(
self.tenant_id, self.network['id'],
self.environment.hosts[1].hostname)
receiver = self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[1],
self.network['id'],
self.tenant_id,
self.safe_client,
neutron_port=receiver_port))
# Create port with qos policy attached
sender, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_dscp_rule, DSCP_MARK)])
sender.block_until_boot()
receiver.block_until_boot()
self._wait_for_dscp_marking_rule_applied(sender, DSCP_MARK)
l2_extensions.wait_for_dscp_marked_packet(
sender, receiver, DSCP_MARK)
class TestQoSWithL2Population(base.BaseFullStackTestCase):
def setUp(self):
# We limit this test to using the openvswitch mech driver, because DSCP
# is presently not implemented for Linux Bridge. The 'rule_types' API
# call only returns rule types that are supported by all configured
# mech drivers. So in a fullstack scenario, where both the OVS and the
# Linux Bridge mech drivers are configured, the DSCP rule type will be
# unavailable since it is not implemented in Linux Bridge.
mech_driver = 'openvswitch'
host_desc = [] # No need to register agents for this test case
env_desc = environment.EnvironmentDescription(qos=True, l2_pop=True,
mech_drivers=mech_driver)
env = environment.Environment(env_desc, host_desc)
super(TestQoSWithL2Population, self).setUp(env)
def test_supported_qos_rule_types(self):
res = self.client.list_qos_rule_types()
rule_types = {t['type'] for t in res['rule_types']}
expected_rules = (
set(mech_ovs.OpenvswitchMechanismDriver.supported_qos_rule_types))
self.assertEqual(expected_rules, rule_types)
| cloudbase/neutron | neutron/tests/fullstack/test_qos.py | Python | apache-2.0 | 11,734 | 0 |
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc5649
try:
import unittest2 as unittest
except ImportError:
import unittest
class AESKeyWrapTestCase(unittest.TestCase):
kw_alg_id_pem_text = "MAsGCWCGSAFlAwQBLQ=="
def setUp(self):
self.asn1Spec = rfc5649.AlgorithmIdentifier()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.kw_alg_id_pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert asn1Object[0] == rfc5649.id_aes256_wrap
assert der_encoder.encode(asn1Object) == substrate
class AESKeyWrapWithPadTestCase(unittest.TestCase):
kw_pad_alg_id_pem_text = "MAsGCWCGSAFlAwQBMA=="
def setUp(self):
self.asn1Spec = rfc5649.AlgorithmIdentifier()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.kw_pad_alg_id_pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert asn1Object[0] == rfc5649.id_aes256_wrap_pad
assert der_encoder.encode(asn1Object) == substrate
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
import sys
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| kawamon/hue | desktop/core/ext-py/pyasn1-modules-0.2.6/tests/test_rfc5649.py | Python | apache-2.0 | 1,730 | 0.001156 |
import os
from notifications_delivery.app import create_app
from credstash import getAllSecrets
# on aws get secrets and export to env
secrets = getAllSecrets(region="eu-west-1")
for key, val in secrets.items():
os.environ[key] = val
application = create_app()
if __name__ == "__main__":
application.run()
| alphagov/notifications-delivery | wsgi.py | Python | mit | 322 | 0.003106 |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:12644")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| Tutakamimearitomomei/Kongcoin | contrib/wallettools/walletunlock.py | Python | mit | 159 | 0 |
#
# django-atompub by James Tauber <http://jtauber.com/>
# http://code.google.com/p/django-atompub/
# An implementation of the Atom format and protocol for Django
#
# For instructions on how to use this module to generate Atom feeds,
# see http://code.google.com/p/django-atompub/wiki/UserGuide
#
#
# Copyright (c) 2007, James Tauber
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from xml.sax.saxutils import XMLGenerator
from datetime import datetime
GENERATOR_TEXT = 'django-atompub'
GENERATOR_ATTR = {
'uri': 'http://code.google.com/p/django-atompub/',
'version': 'r33'
}
## based on django.utils.xmlutils.SimplerXMLGenerator
class SimplerXMLGenerator(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None: attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
## based on django.utils.feedgenerator.rfc3339_date
def rfc3339_date(date):
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
## based on django.utils.feedgenerator.get_tag_uri
def get_tag_uri(url, date):
"Creates a TagURI. See http://diveintomark.org/archives/2004/05/28/howto-atom-id"
parts = urlparse.urlparse(url)
date_part = ""
if date is not None:
date_part = ",%s:" % date.strftime("%Y-%m-%d")
return "tag:%s%s%s/%s" % (
parts.hostname,
date_part,
parts.path,
parts.fragment,
)
## based on django.contrib.syndication.feeds.Feed
class Feed(object):
VALIDATE = True
def __init__(self, slug, feed_url):
# @@@ slug and feed_url are not used yet
pass
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def get_feed(self, extra_params=None):
if extra_params:
try:
obj = self.get_object(extra_params.split('/'))
except (AttributeError, LookupError):
raise LookupError('Feed does not exist')
else:
obj = None
feed = AtomFeed(
atom_id = self.__get_dynamic_attr('feed_id', obj),
title = self.__get_dynamic_attr('feed_title', obj),
updated = self.__get_dynamic_attr('feed_updated', obj),
icon = self.__get_dynamic_attr('feed_icon', obj),
logo = self.__get_dynamic_attr('feed_logo', obj),
rights = self.__get_dynamic_attr('feed_rights', obj),
subtitle = self.__get_dynamic_attr('feed_subtitle', obj),
authors = self.__get_dynamic_attr('feed_authors', obj, default=[]),
categories = self.__get_dynamic_attr('feed_categories', obj, default=[]),
contributors = self.__get_dynamic_attr('feed_contributors', obj, default=[]),
links = self.__get_dynamic_attr('feed_links', obj, default=[]),
extra_attrs = self.__get_dynamic_attr('feed_extra_attrs', obj),
hide_generator = self.__get_dynamic_attr('hide_generator', obj, default=False)
)
items = self.__get_dynamic_attr('items', obj)
if items is None:
raise LookupError('Feed has no items field')
for item in items:
feed.add_item(
atom_id = self.__get_dynamic_attr('item_id', item),
title = self.__get_dynamic_attr('item_title', item),
updated = self.__get_dynamic_attr('item_updated', item),
content = self.__get_dynamic_attr('item_content', item),
published = self.__get_dynamic_attr('item_published', item),
rights = self.__get_dynamic_attr('item_rights', item),
source = self.__get_dynamic_attr('item_source', item),
summary = self.__get_dynamic_attr('item_summary', item),
authors = self.__get_dynamic_attr('item_authors', item, default=[]),
categories = self.__get_dynamic_attr('item_categories', item, default=[]),
contributors = self.__get_dynamic_attr('item_contributors', item, default=[]),
links = self.__get_dynamic_attr('item_links', item, default=[]),
extra_attrs = self.__get_dynamic_attr('item_extra_attrs', None, default={}),
)
if self.VALIDATE:
feed.validate()
return feed
class ValidationError(Exception):
pass
## based on django.utils.feedgenerator.SyndicationFeed and django.utils.feedgenerator.Atom1Feed
class AtomFeed(object):
mime_type = 'application/atom+xml'
ns = u'http://www.w3.org/2005/Atom'
def __init__(self, atom_id, title, updated=None, icon=None, logo=None, rights=None, subtitle=None,
authors=[], categories=[], contributors=[], links=[], extra_attrs={}, hide_generator=False):
if atom_id is None:
raise LookupError('Feed has no feed_id field')
if title is None:
raise LookupError('Feed has no feed_title field')
# if updated == None, we'll calculate it
self.feed = {
'id': atom_id,
'title': title,
'updated': updated,
'icon': icon,
'logo': logo,
'rights': rights,
'subtitle': subtitle,
'authors': authors,
'categories': categories,
'contributors': contributors,
'links': links,
'extra_attrs': extra_attrs,
'hide_generator': hide_generator,
}
self.items = []
def add_item(self, atom_id, title, updated, content=None, published=None, rights=None, source=None, summary=None,
authors=[], categories=[], contributors=[], links=[], extra_attrs={}):
if atom_id is None:
raise LookupError('Feed has no item_id method')
if title is None:
raise LookupError('Feed has no item_title method')
if updated is None:
raise LookupError('Feed has no item_updated method')
self.items.append({
'id': atom_id,
'title': title,
'updated': updated,
'content': content,
'published': published,
'rights': rights,
'source': source,
'summary': summary,
'authors': authors,
'categories': categories,
'contributors': contributors,
'links': links,
'extra_attrs': extra_attrs,
})
def latest_updated(self):
"""
Returns the latest item's updated or the current time if there are no items.
"""
updates = [item['updated'] for item in self.items]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.now() # @@@ really we should allow a feed to define its "start" for this case
def write_text_construct(self, handler, element_name, data):
if isinstance(data, tuple):
text_type, text = data
if text_type == 'xhtml':
handler.startElement(element_name, {'type': text_type})
handler._write(text) # write unescaped -- it had better be well-formed XML
handler.endElement(element_name)
else:
handler.addQuickElement(element_name, text, {'type': text_type})
else:
handler.addQuickElement(element_name, data)
def write_person_construct(self, handler, element_name, person):
handler.startElement(element_name, {})
handler.addQuickElement(u'name', person['name'])
if 'uri' in person:
handler.addQuickElement(u'uri', person['uri'])
if 'email' in person:
handler.addQuickElement(u'email', person['email'])
handler.endElement(element_name)
def write_link_construct(self, handler, link):
if 'length' in link:
link['length'] = str(link['length'])
handler.addQuickElement(u'link', None, link)
def write_category_construct(self, handler, category):
handler.addQuickElement(u'category', None, category)
def write_source(self, handler, data):
handler.startElement(u'source', {})
if data.get('id'):
handler.addQuickElement(u'id', data['id'])
if data.get('title'):
self.write_text_construct(handler, u'title', data['title'])
if data.get('subtitle'):
self.write_text_construct(handler, u'subtitle', data['subtitle'])
if data.get('icon'):
handler.addQuickElement(u'icon', data['icon'])
if data.get('logo'):
handler.addQuickElement(u'logo', data['logo'])
if data.get('updated'):
handler.addQuickElement(u'updated', rfc3339_date(data['updated']))
for category in data.get('categories', []):
self.write_category_construct(handler, category)
for link in data.get('links', []):
self.write_link_construct(handler, link)
for author in data.get('authors', []):
self.write_person_construct(handler, u'author', author)
for contributor in data.get('contributors', []):
self.write_person_construct(handler, u'contributor', contributor)
if data.get('rights'):
self.write_text_construct(handler, u'rights', data['rights'])
handler.endElement(u'source')
def write_content(self, handler, data):
if isinstance(data, tuple):
content_dict, text = data
if content_dict.get('type') == 'xhtml':
handler.startElement(u'content', content_dict)
handler._write(text) # write unescaped -- it had better be well-formed XML
handler.endElement(u'content')
else:
handler.addQuickElement(u'content', text, content_dict)
else:
handler.addQuickElement(u'content', data)
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
feed_attrs = {u'xmlns': self.ns}
if self.feed.get('extra_attrs'):
feed_attrs.update(self.feed['extra_attrs'])
handler.startElement(u'feed', feed_attrs)
handler.addQuickElement(u'id', self.feed['id'])
self.write_text_construct(handler, u'title', self.feed['title'])
if self.feed.get('subtitle'):
self.write_text_construct(handler, u'subtitle', self.feed['subtitle'])
if self.feed.get('icon'):
handler.addQuickElement(u'icon', self.feed['icon'])
if self.feed.get('logo'):
handler.addQuickElement(u'logo', self.feed['logo'])
if self.feed['updated']:
handler.addQuickElement(u'updated', rfc3339_date(self.feed['updated']))
else:
handler.addQuickElement(u'updated', rfc3339_date(self.latest_updated()))
for category in self.feed['categories']:
self.write_category_construct(handler, category)
for link in self.feed['links']:
self.write_link_construct(handler, link)
for author in self.feed['authors']:
self.write_person_construct(handler, u'author', author)
for contributor in self.feed['contributors']:
self.write_person_construct(handler, u'contributor', contributor)
if self.feed.get('rights'):
self.write_text_construct(handler, u'rights', self.feed['rights'])
if not self.feed.get('hide_generator'):
handler.addQuickElement(u'generator', GENERATOR_TEXT, GENERATOR_ATTR)
self.write_items(handler)
handler.endElement(u'feed')
def write_items(self, handler):
for item in self.items:
entry_attrs = item.get('extra_attrs', {})
handler.startElement(u'entry', entry_attrs)
handler.addQuickElement(u'id', item['id'])
self.write_text_construct(handler, u'title', item['title'])
handler.addQuickElement(u'updated', rfc3339_date(item['updated']))
if item.get('published'):
handler.addQuickElement(u'published', rfc3339_date(item['published']))
if item.get('rights'):
self.write_text_construct(handler, u'rights', item['rights'])
if item.get('source'):
self.write_source(handler, item['source'])
for author in item['authors']:
self.write_person_construct(handler, u'author', author)
for contributor in item['contributors']:
self.write_person_construct(handler, u'contributor', contributor)
for category in item['categories']:
self.write_category_construct(handler, category)
for link in item['links']:
self.write_link_construct(handler, link)
if item.get('summary'):
self.write_text_construct(handler, u'summary', item['summary'])
if item.get('content'):
self.write_content(handler, item['content'])
handler.endElement(u'entry')
def validate(self):
def validate_text_construct(obj):
if isinstance(obj, tuple):
if obj[0] not in ['text', 'html', 'xhtml']:
return False
# @@@ no validation is done that 'html' text constructs are valid HTML
# @@@ no validation is done that 'xhtml' text constructs are well-formed XML or valid XHTML
return True
if not validate_text_construct(self.feed['title']):
raise ValidationError('feed title has invalid type')
if self.feed.get('subtitle'):
if not validate_text_construct(self.feed['subtitle']):
raise ValidationError('feed subtitle has invalid type')
if self.feed.get('rights'):
if not validate_text_construct(self.feed['rights']):
raise ValidationError('feed rights has invalid type')
alternate_links = {}
for link in self.feed.get('links'):
if link.get('rel') == 'alternate' or link.get('rel') == None:
key = (link.get('type'), link.get('hreflang'))
if key in alternate_links:
raise ValidationError('alternate links must have unique type/hreflang')
alternate_links[key] = link
if self.feed.get('authors'):
feed_author = True
else:
feed_author = False
for item in self.items:
if not feed_author and not item.get('authors'):
if item.get('source') and item['source'].get('authors'):
pass
else:
raise ValidationError('if no feed author, all entries must have author (possibly in source)')
if not validate_text_construct(item['title']):
raise ValidationError('entry title has invalid type')
if item.get('rights'):
if not validate_text_construct(item['rights']):
raise ValidationError('entry rights has invalid type')
if item.get('summary'):
if not validate_text_construct(item['summary']):
raise ValidationError('entry summary has invalid type')
source = item.get('source')
if source:
if source.get('title'):
if not validate_text_construct(source['title']):
raise ValidationError('source title has invalid type')
if source.get('subtitle'):
if not validate_text_construct(source['subtitle']):
raise ValidationError('source subtitle has invalid type')
if source.get('rights'):
if not validate_text_construct(source['rights']):
raise ValidationError('source rights has invalid type')
alternate_links = {}
for link in item.get('links'):
if link.get('rel') == 'alternate' or link.get('rel') == None:
key = (link.get('type'), link.get('hreflang'))
if key in alternate_links:
raise ValidationError('alternate links must have unique type/hreflang')
alternate_links[key] = link
if not item.get('content'):
if not alternate_links:
raise ValidationError('if no content, entry must have alternate link')
if item.get('content') and isinstance(item.get('content'), tuple):
content_type = item.get('content')[0].get('type')
if item.get('content')[0].get('src'):
if item.get('content')[1]:
raise ValidationError('content with src should be empty')
if not item.get('summary'):
raise ValidationError('content with src requires a summary too')
if content_type in ['text', 'html', 'xhtml']:
raise ValidationError('content with src cannot have type of text, html or xhtml')
if content_type:
if '/' in content_type and \
not content_type.startswith('text/') and \
not content_type.endswith('/xml') and not content_type.endswith('+xml') and \
not content_type in ['application/xml-external-parsed-entity', 'application/xml-dtd']:
# @@@ check content is Base64
if not item.get('summary'):
raise ValidationError('content in Base64 requires a summary too')
if content_type not in ['text', 'html', 'xhtml'] and '/' not in content_type:
raise ValidationError('content type does not appear to be valid')
# @@@ no validation is done that 'html' text constructs are valid HTML
# @@@ no validation is done that 'xhtml' text constructs are well-formed XML or valid XHTML
return
return
class LegacySyndicationFeed(AtomFeed):
"""
Provides an SyndicationFeed-compatible interface in its __init__ and
add_item but is really a new AtomFeed object.
"""
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=[],
feed_url=None, feed_copyright=None):
atom_id = link
title = title
updated = None # will be calculated
rights = feed_copyright
subtitle = subtitle
author_dict = {'name': author_name}
if author_link:
author_dict['uri'] = author_uri
if author_email:
author_dict['email'] = author_email
authors = [author_dict]
if categories:
categories = [{'term': term} for term in categories]
links = [{'rel': 'alternate', 'href': link}]
if feed_url:
links.append({'rel': 'self', 'href': feed_url})
if language:
extra_attrs = {'xml:lang': language}
else:
extra_attrs = {}
# description ignored (as with Atom1Feed)
AtomFeed.__init__(self, atom_id, title, updated, rights=rights, subtitle=subtitle,
authors=authors, categories=categories, links=links, extra_attrs=extra_attrs)
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=[], item_copyright=None):
if unique_id:
atom_id = unique_id
else:
atom_id = get_tag_uri(link, pubdate)
title = title
updated = pubdate
if item_copyright:
rights = item_copyright
else:
rights = None
if description:
summary = 'html', description
else:
summary = None
author_dict = {'name': author_name}
if author_link:
author_dict['uri'] = author_uri
if author_email:
author_dict['email'] = author_email
authors = [author_dict]
categories = [{'term': term} for term in categories]
links = [{'rel': 'alternate', 'href': link}]
if enclosure:
links.append({'rel': 'enclosure', 'href': enclosure.url, 'length': enclosure.length, 'type': enclosure.mime_type})
AtomFeed.add_item(self, atom_id, title, updated, rights=rights, summary=summary,
authors=authors, categories=categories, links=links)
| hzlf/openbroadcast | website/_notification/atomformat.py | Python | gpl-3.0 | 22,948 | 0.009936 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import six
from heat.common import exception
from heat.common import grouputils
from heat.common import template_format
from heat.engine.resources.openstack.heat import resource_group
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as stackm
from heat.tests import common
from heat.tests import utils
template = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
}
}
}
}
}
template2 = {
"heat_template_version": "2013-05-23",
"resources": {
"dummy": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "baz"
}
},
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": {"get_attr": ["dummy", "Foo"]}
}
}
}
}
}
}
template_repl = {
"heat_template_version": "2013-05-23",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_%index%",
"listprop": [
"%index%_0",
"%index%_1",
"%index%_2"
]
}
}
}
}
}
}
template_attr = {
"heat_template_version": "2014-10-16",
"resources": {
"group1": {
"type": "OS::Heat::ResourceGroup",
"properties": {
"count": 2,
"resource_def": {
"type": "ResourceWithComplexAttributesType",
"properties": {
}
}
}
}
},
"outputs": {
"nested_strings": {
"value": {"get_attr": ["group1", "nested_dict", "string"]}
}
}
}
class ResourceGroupTest(common.HeatTestCase):
def setUp(self):
common.HeatTestCase.setUp(self)
self.m.StubOutWithMock(stackm.Stack, 'validate')
def test_assemble_nested(self):
"""Tests nested stack creation based on props.
Tests that the nested stack that implements the group is created
appropriately based on properties.
"""
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
templ = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
},
"2": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"Foo": "Bar"
}
}
}
}
self.assertEqual(templ, resg._assemble_nested(['0', '1', '2']).t)
def test_assemble_nested_include(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = None
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {}
}
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
expect['resources']["0"]['properties'] = {"Foo": None}
self.assertEqual(
expect, resg._assemble_nested(['0'], include_all=True).t)
def test_assemble_nested_include_zero(self):
templ = copy.deepcopy(template)
templ['resources']['group1']['properties']['count'] = 0
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
}
self.assertEqual(expect, resg._assemble_nested([]).t)
def test_assemble_nested_with_metadata(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = None
res_def['metadata'] = {
'priority': 'low',
'role': 'webserver'
}
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {},
"metadata": {
'priority': 'low',
'role': 'webserver'
}
}
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
def test_assemble_nested_rolling_update(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "baz"
}
}
}
}
resource_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
def test_assemble_nested_rolling_update_none(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
}
}
}
resource_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 0).t)
def test_assemble_nested_rolling_update_failed_resource(self):
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "baz"
}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {
"foo": "bar"
}
}
}
}
resource_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
resg._nested = get_fake_nested_stack(['0', '1'])
res0 = resg._nested['0']
res0.status = res0.FAILED
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
def test_assemble_nested_missing_param(self):
# Setup
# Change the standard testing template to use a get_param lookup
# within the resource definition
templ = copy.deepcopy(template)
res_def = templ['resources']['group1']['properties']['resource_def']
res_def['properties']['Foo'] = {'get_param': 'bar'}
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
# Test - This should not raise a ValueError about "bar" not being
# provided
nested_tmpl = resg._assemble_nested(['0', '1'])
# Verify
expected = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "OverwrittenFnGetRefIdType",
"properties": {}
},
"1": {
"type": "OverwrittenFnGetRefIdType",
"properties": {}
}
}
}
self.assertEqual(expected, nested_tmpl.t)
def test_index_var(self):
stack = utils.parse_stack(template_repl)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_0",
"listprop": [
"0_0", "0_1", "0_2"
]
}
},
"1": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_1",
"listprop": [
"1_0", "1_1", "1_2"
]
}
},
"2": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_2",
"listprop": [
"2_0", "2_1", "2_2"
]
}
}
}
}
nested = resg._assemble_nested(['0', '1', '2']).t
for res in nested['resources']:
res_prop = nested['resources'][res]['properties']
res_prop['listprop'] = list(res_prop['listprop'])
self.assertEqual(expect, nested)
def test_custom_index_var(self):
templ = copy.deepcopy(template_repl)
templ['resources']['group1']['properties']['index_var'] = "__foo__"
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "ResourceWithListProp%index%",
"properties": {
"Foo": "Bar_%index%",
"listprop": [
"%index%_0", "%index%_1", "%index%_2"
]
}
}
}
}
nested = resg._assemble_nested(['0']).t
res_prop = nested['resources']['0']['properties']
res_prop['listprop'] = list(res_prop['listprop'])
self.assertEqual(expect, nested)
props = copy.deepcopy(templ['resources']['group1']['properties'])
res_def = props['resource_def']
res_def['properties']['Foo'] = "Bar___foo__"
res_def['properties']['listprop'] = ["__foo___0",
"__foo___1",
"__foo___2"]
res_def['type'] = "ResourceWithListProp__foo__"
snip = snip.freeze(properties=props)
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "ResourceWithListProp__foo__",
"properties": {
"Foo": "Bar_0",
"listprop": [
"0_0", "0_1", "0_2"
]
}
}
}
}
nested = resg._assemble_nested(['0']).t
res_prop = nested['resources']['0']['properties']
res_prop['listprop'] = list(res_prop['listprop'])
self.assertEqual(expect, nested)
def test_assemble_no_properties(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
del res_def['properties']
stack = utils.parse_stack(templ)
resg = stack.resources['group1']
self.assertIsNone(resg.validate())
def test_invalid_res_type(self):
"""Test that error raised for unknown resource type."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['resource_def']['type'] = "idontexist"
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
exp_msg = 'The Resource Type (idontexist) could not be found.'
self.assertIn(exp_msg, six.text_type(exc))
def test_reference_attr(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertIsNone(resgrp.validate())
def test_invalid_removal_policies_nolist(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = 'notallowed'
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = "removal_policies: \"'notallowed'\" is not a list"
self.assertIn(errstr, six.text_type(exc))
def test_invalid_removal_policies_nomap(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = ['notallowed']
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = '"notallowed" is not a map'
self.assertIn(errstr, six.text_type(exc))
def test_child_template(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
def check_res_names(names):
self.assertEqual(list(names), ['0', '1'])
return 'tmpl'
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._assemble_nested = mock.Mock()
resgrp._assemble_nested.side_effect = check_res_names
resgrp.properties.data[resgrp.COUNT] = 2
self.assertEqual('tmpl', resgrp.child_template())
self.assertEqual(1, resgrp._assemble_nested.call_count)
def test_child_params(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertEqual({}, resgrp.child_params())
def test_handle_create(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.create_with_template = mock.Mock(return_value=None)
self.assertIsNone(resgrp.handle_create())
self.assertEqual(1, resgrp.create_with_template.call_count)
def test_handle_create_with_batching(self):
stack = utils.parse_stack(tmpl_with_default_updt_policy())
defn = stack.t.resource_definitions(stack)['group1']
props = stack.t.t['resources']['group1']['properties'].copy()
props['count'] = 10
update_policy = {'batch_create': {'max_batch_size': 3}}
snip = defn.freeze(properties=props, update_policy=update_policy)
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.patchobject(scheduler.TaskRunner, 'start')
checkers = resgrp.handle_create()
self.assertEqual(4, len(checkers))
def test_run_to_completion(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._check_status_complete = mock.Mock(side_effect=[False, True])
resgrp.update_with_template = mock.Mock(return_value=None)
next(resgrp._run_to_completion(snip, 200))
self.assertEqual(1, resgrp.update_with_template.call_count)
def test_update_in_failed(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.state_set('CREATE', 'FAILED')
resgrp._assemble_nested = mock.Mock(return_value='tmpl')
resgrp.properties.data[resgrp.COUNT] = 2
self.patchobject(scheduler.TaskRunner, 'start')
resgrp.handle_update(snip, None, None)
self.assertTrue(resgrp._assemble_nested.called)
def test_handle_delete(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.delete_nested = mock.Mock(return_value=None)
resgrp.handle_delete()
resgrp.delete_nested.assert_called_once_with()
def test_handle_update_size(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._assemble_nested = mock.Mock(return_value=None)
resgrp.properties.data[resgrp.COUNT] = 5
self.patchobject(scheduler.TaskRunner, 'start')
resgrp.handle_update(snip, None, None)
self.assertTrue(resgrp._assemble_nested.called)
class ResourceGroupBlackList(common.HeatTestCase):
"""This class tests ResourceGroup._name_blacklist()."""
# 1) no resource_list, empty blacklist
# 2) no resource_list, existing blacklist
# 3) resource_list not in nested()
# 4) resource_list (refid) not in nested()
# 5) resource_list in nested() -> saved
# 6) resource_list (refid) in nested() -> saved
scenarios = [
('1', dict(data_in=None, rm_list=[],
nested_rsrcs=[], expected=[],
saved=False)),
('2', dict(data_in='0,1,2', rm_list=[],
nested_rsrcs=[], expected=['0', '1', '2'],
saved=False)),
('3', dict(data_in='1,3', rm_list=['6'],
nested_rsrcs=['0', '1', '3'],
expected=['1', '3'],
saved=False)),
('4', dict(data_in='0,1', rm_list=['id-7'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1'],
saved=False)),
('5', dict(data_in='0,1', rm_list=['3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
saved=True)),
('6', dict(data_in='0,1', rm_list=['id-3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
saved=True)),
]
def test_blacklist(self):
stack = utils.parse_stack(template)
resg = stack['group1']
# mock properties
resg.properties = mock.MagicMock()
resg.properties.__getitem__.return_value = [
{'resource_list': self.rm_list}]
# mock data get/set
resg.data = mock.Mock()
resg.data.return_value.get.return_value = self.data_in
resg.data_set = mock.Mock()
# mock nested access
def stack_contains(name):
return name in self.nested_rsrcs
def by_refid(name):
rid = name.replace('id-', '')
if rid not in self.nested_rsrcs:
return None
res = mock.Mock()
res.name = rid
return res
nested = mock.MagicMock()
nested.__contains__.side_effect = stack_contains
nested.__iter__.side_effect = iter(self.nested_rsrcs)
nested.resource_by_refid.side_effect = by_refid
resg.nested = mock.Mock(return_value=nested)
blacklist = resg._name_blacklist()
self.assertEqual(set(self.expected), blacklist)
if self.saved:
resg.data_set.assert_called_once_with('name_blacklist',
','.join(blacklist))
class ResourceGroupEmptyParams(common.HeatTestCase):
"""This class tests ResourceGroup.build_resource_definition()."""
scenarios = [
('non_empty', dict(value='Bar', expected={'Foo': 'Bar'},
expected_include={'Foo': 'Bar'})),
('empty_None', dict(value=None, expected={},
expected_include={'Foo': None})),
('empty_boolean', dict(value=False, expected={'Foo': False},
expected_include={'Foo': False})),
('empty_string', dict(value='', expected={'Foo': ''},
expected_include={'Foo': ''})),
('empty_number', dict(value=0, expected={'Foo': 0},
expected_include={'Foo': 0})),
('empty_json', dict(value={}, expected={'Foo': {}},
expected_include={'Foo': {}})),
('empty_list', dict(value=[], expected={'Foo': []},
expected_include={'Foo': []}))
]
def test_definition(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = self.value
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exp1 = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
self.expected)
exp2 = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
self.expected_include)
rdef = resg.get_resource_def()
self.assertEqual(exp1, resg.build_resource_definition('0', rdef))
rdef = resg.get_resource_def(include_all=True)
self.assertEqual(
exp2, resg.build_resource_definition('0', rdef))
class ResourceGroupNameListTest(common.HeatTestCase):
"""This class tests ResourceGroup._resource_names()."""
# 1) no blacklist, 0 count
# 2) no blacklist, x count
# 3) blacklist (not effecting)
# 4) blacklist with pruning
scenarios = [
('1', dict(blacklist=[], count=0,
expected=[])),
('2', dict(blacklist=[], count=4,
expected=['0', '1', '2', '3'])),
('3', dict(blacklist=['5', '6'], count=3,
expected=['0', '1', '2'])),
('4', dict(blacklist=['2', '4'], count=4,
expected=['0', '1', '3', '5'])),
]
def test_names(self):
stack = utils.parse_stack(template)
resg = stack['group1']
resg.properties = mock.MagicMock()
resg.properties.get.return_value = self.count
resg._name_blacklist = mock.MagicMock(return_value=self.blacklist)
self.assertEqual(self.expected, list(resg._resource_names()))
class ResourceGroupAttrTest(common.HeatTestCase):
def test_aggregate_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
expected = ['0', '1']
self.assertEqual(expected, resg.FnGetAtt('foo'))
self.assertEqual(expected, resg.FnGetAtt('Foo'))
def test_index_dotted_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
self.assertEqual('0', resg.FnGetAtt('resource.0.Foo'))
self.assertEqual('1', resg.FnGetAtt('resource.1.Foo'))
def test_index_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
self.assertEqual('0', resg.FnGetAtt('resource.0', 'Foo'))
self.assertEqual('1', resg.FnGetAtt('resource.1', 'Foo'))
def test_index_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack(template_attr,
expect_attrs={'0': 2, '1': 2})
self.assertEqual(2, resg.FnGetAtt('resource.0',
'nested_dict', 'dict', 'b'))
self.assertEqual(2, resg.FnGetAtt('resource.1',
'nested_dict', 'dict', 'b'))
def test_aggregate_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack(template_attr,
expect_attrs={'0': 3, '1': 3})
expected = [3, 3]
self.assertEqual(expected, resg.FnGetAtt('nested_dict', 'list', 2))
def test_aggregate_refs(self):
"""Test resource id aggregation."""
resg = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected, resg.FnGetAtt("refs"))
def test_aggregate_refs_with_index(self):
"""Test resource id aggregation with index."""
resg = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected[0], resg.FnGetAtt("refs", 0))
self.assertEqual(expected[1], resg.FnGetAtt("refs", 1))
self.assertIsNone(resg.FnGetAtt("refs", 2))
def test_aggregate_refs_map(self):
resg = self._create_dummy_stack()
found = resg.FnGetAtt("refs_map")
expected = {'0': 'ID-0', '1': 'ID-1'}
self.assertEqual(expected, found)
def test_aggregate_outputs(self):
"""Test outputs aggregation."""
expected = {'0': ['foo', 'bar'], '1': ['foo', 'bar']}
resg = self._create_dummy_stack(template_attr, expect_attrs=expected)
self.assertEqual(expected, resg.FnGetAtt('attributes', 'list'))
def test_aggregate_outputs_no_path(self):
"""Test outputs aggregation with missing path."""
resg = self._create_dummy_stack(template_attr)
self.assertRaises(exception.InvalidTemplateAttribute,
resg.FnGetAtt, 'attributes')
def test_index_refs(self):
"""Tests getting ids of individual resources."""
resg = self._create_dummy_stack()
self.assertEqual("ID-0", resg.FnGetAtt('resource.0'))
self.assertEqual("ID-1", resg.FnGetAtt('resource.1'))
self.assertRaises(exception.InvalidTemplateAttribute, resg.FnGetAtt,
'resource.2')
@mock.patch.object(grouputils, 'get_rsrc_id')
def test_get_attribute(self, mock_get_rsrc_id):
stack = utils.parse_stack(template)
mock_get_rsrc_id.side_effect = ['0', '1']
rsrc = stack['group1']
self.assertEqual(['0', '1'], rsrc.FnGetAtt(rsrc.REFS))
def test_get_attribute_convg(self):
cache_data = {'group1': {
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'attrs': {'refs': ['rsrc1', 'rsrc2']}
}}
stack = utils.parse_stack(template, cache_data=cache_data)
rsrc = stack['group1']
self.assertEqual(['rsrc1', 'rsrc2'], rsrc.FnGetAtt(rsrc.REFS))
def _create_dummy_stack(self, template_data=template, expect_count=2,
expect_attrs=None):
stack = utils.parse_stack(template_data)
resg = stack['group1']
fake_res = {}
if expect_attrs is None:
expect_attrs = {}
for resc in range(expect_count):
res = str(resc)
fake_res[res] = mock.Mock()
fake_res[res].stack = stack
fake_res[res].FnGetRefId.return_value = 'ID-%s' % res
if res in expect_attrs:
fake_res[res].FnGetAtt.return_value = expect_attrs[res]
else:
fake_res[res].FnGetAtt.return_value = res
resg.nested = mock.Mock(return_value=fake_res)
names = [str(name) for name in range(expect_count)]
resg._resource_names = mock.Mock(return_value=names)
return resg
class ReplaceTest(common.HeatTestCase):
# 1. no min_in_service
# 2. min_in_service > count and existing with no blacklist
# 3. min_in_service > count and existing with blacklist
# 4. existing > count and min_in_service with blacklist
# 5. existing > count and min_in_service with no blacklist
# 6. all existing blacklisted
# 7. count > existing and min_in_service with no blacklist
# 8. count > existing and min_in_service with blacklist
# 9. count < existing - blacklisted
# 10. pause_sec > 0
scenarios = [
('1', dict(min_in_service=0, count=2,
existing=['0', '1'], black_listed=['0'],
batch_size=1, pause_sec=0, tasks=2)),
('2', dict(min_in_service=3, count=2,
existing=['0', '1'], black_listed=[],
batch_size=2, pause_sec=0, tasks=3)),
('3', dict(min_in_service=3, count=2,
existing=['0', '1'], black_listed=['0'],
batch_size=2, pause_sec=0, tasks=3)),
('4', dict(min_in_service=3, count=2,
existing=['0', '1', '2', '3'], black_listed=['2', '3'],
batch_size=1, pause_sec=0, tasks=4)),
('5', dict(min_in_service=2, count=2,
existing=['0', '1', '2', '3'], black_listed=[],
batch_size=2, pause_sec=0, tasks=2)),
('6', dict(min_in_service=2, count=3,
existing=['0', '1'], black_listed=['0', '1'],
batch_size=2, pause_sec=0, tasks=2)),
('7', dict(min_in_service=0, count=5,
existing=['0', '1'], black_listed=[],
batch_size=1, pause_sec=0, tasks=5)),
('8', dict(min_in_service=0, count=5,
existing=['0', '1'], black_listed=['0'],
batch_size=1, pause_sec=0, tasks=5)),
('9', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
black_listed=['0'],
batch_size=2, pause_sec=0, tasks=2)),
('10', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
black_listed=['0'],
batch_size=2, pause_sec=10, tasks=3))]
def setUp(self):
super(ReplaceTest, self).setUp()
templ = copy.deepcopy(template)
self.stack = utils.parse_stack(templ)
snip = self.stack.t.resource_definitions(self.stack)['group1']
self.group = resource_group.ResourceGroup('test', snip, self.stack)
self.group.update_with_template = mock.Mock()
self.group.check_update_complete = mock.Mock()
def test_rolling_updates(self):
self.group._nested = get_fake_nested_stack(self.existing)
self.group.get_size = mock.Mock(return_value=self.count)
self.group._name_blacklist = mock.Mock(
return_value=set(self.black_listed))
tasks = self.group._replace(self.min_in_service, self.batch_size,
self.pause_sec)
self.assertEqual(self.tasks,
len(tasks))
def tmpl_with_bad_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"foo": {}}
return t
def tmpl_with_default_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"rolling_update": {}}
return t
def tmpl_with_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"rolling_update": {
"min_in_service": "1",
"max_batch_size": "2",
"pause_time": "1"
}}
return t
def get_fake_nested_stack(names):
nested_t = '''
heat_template_version: 2015-04-30
description: Resource Group
resources:
'''
resource_snip = '''
'%s':
type: OverwrittenFnGetRefIdType
properties:
foo: bar
'''
resources = [nested_t]
for res_name in names:
resources.extend([resource_snip % res_name])
nested_t = ''.join(resources)
return utils.parse_stack(template_format.parse(nested_t))
class RollingUpdatePolicyTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyTest, self).setUp()
def test_parse_without_update_policy(self):
stack = utils.parse_stack(template)
stack.validate()
grp = stack['group1']
self.assertFalse(grp.update_policy['rolling_update'])
def test_parse_with_update_policy(self):
tmpl = tmpl_with_updt_policy()
stack = utils.parse_stack(tmpl)
stack.validate()
tmpl_grp = tmpl['resources']['group1']
tmpl_policy = tmpl_grp['update_policy']['rolling_update']
tmpl_batch_sz = int(tmpl_policy['max_batch_size'])
grp = stack['group1']
self.assertTrue(grp.update_policy)
self.assertEqual(2, len(grp.update_policy))
self.assertIn('rolling_update', grp.update_policy)
policy = grp.update_policy['rolling_update']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(1, int(policy['min_in_service']))
self.assertEqual(tmpl_batch_sz, int(policy['max_batch_size']))
self.assertEqual(1, policy['pause_time'])
def test_parse_with_default_update_policy(self):
tmpl = tmpl_with_default_updt_policy()
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['group1']
self.assertTrue(grp.update_policy)
self.assertEqual(2, len(grp.update_policy))
self.assertIn('rolling_update', grp.update_policy)
policy = grp.update_policy['rolling_update']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(0, int(policy['min_in_service']))
self.assertEqual(1, int(policy['max_batch_size']))
self.assertEqual(0, policy['pause_time'])
def test_parse_with_bad_update_policy(self):
tmpl = tmpl_with_bad_updt_policy()
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("foo", six.text_type(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyDiffTest, self).setUp()
def validate_update_policy_diff(self, current, updated):
# load current stack
current_stack = utils.parse_stack(current)
current_grp = current_stack['group1']
current_grp_json = current_grp.frozen_definition()
updated_stack = utils.parse_stack(updated)
updated_grp = updated_stack['group1']
updated_grp_json = updated_grp.t.freeze()
# identify the template difference
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
self.assertTrue(tmpl_diff.update_policy_changed())
# test application of the new update policy in handle_update
current_grp._try_rolling_update = mock.Mock()
current_grp._assemble_nested_for_size = mock.Mock()
self.patchobject(scheduler.TaskRunner, 'start')
current_grp.handle_update(updated_grp_json, tmpl_diff, None)
self.assertEqual(updated_grp_json._update_policy or {},
current_grp.update_policy.data)
def test_update_policy_added(self):
self.validate_update_policy_diff(template,
tmpl_with_updt_policy())
def test_update_policy_updated(self):
updt_template = tmpl_with_updt_policy()
grp = updt_template['resources']['group1']
policy = grp['update_policy']['rolling_update']
policy['min_in_service'] = '2'
policy['max_batch_size'] = '4'
policy['pause_time'] = '90'
self.validate_update_policy_diff(tmpl_with_updt_policy(),
updt_template)
def test_update_policy_removed(self):
self.validate_update_policy_diff(tmpl_with_updt_policy(),
template)
class RollingUpdateTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdateTest, self).setUp()
def check_with_update(self, with_policy=False, with_diff=False):
current = copy.deepcopy(template)
self.current_stack = utils.parse_stack(current)
self.current_grp = self.current_stack['group1']
current_grp_json = self.current_grp.frozen_definition()
prop_diff, tmpl_diff = None, None
updated = tmpl_with_updt_policy() if (
with_policy) else copy.deepcopy(template)
if with_diff:
res_def = updated['resources']['group1'][
'properties']['resource_def']
res_def['properties']['Foo'] = 'baz'
prop_diff = dict(
{'count': 2,
'resource_def': {'properties': {'Foo': 'baz'},
'type': 'OverwrittenFnGetRefIdType'}})
updated_stack = utils.parse_stack(updated)
updated_grp = updated_stack['group1']
updated_grp_json = updated_grp.t.freeze()
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
self.current_grp._replace = mock.Mock(return_value=[])
self.current_grp._assemble_nested = mock.Mock()
self.patchobject(scheduler.TaskRunner, 'start')
self.current_grp.handle_update(updated_grp_json, tmpl_diff, prop_diff)
def test_update_without_policy_prop_diff(self):
self.check_with_update(with_diff=True)
self.assertTrue(self.current_grp._assemble_nested.called)
def test_update_with_policy_prop_diff(self):
self.check_with_update(with_policy=True, with_diff=True)
self.current_grp._replace.assert_called_once_with(1, 2, 1)
self.assertTrue(self.current_grp._assemble_nested.called)
def test_update_time_not_sufficient(self):
current = copy.deepcopy(template)
self.stack = utils.parse_stack(current)
self.current_grp = self.stack['group1']
self.stack.timeout_secs = mock.Mock(return_value=200)
err = self.assertRaises(ValueError, self.current_grp._update_timeout,
3, 100)
self.assertIn('The current update policy will result in stack update '
'timeout.', six.text_type(err))
def test_update_time_sufficient(self):
current = copy.deepcopy(template)
self.stack = utils.parse_stack(current)
self.current_grp = self.stack['group1']
self.stack.timeout_secs = mock.Mock(return_value=400)
self.assertEqual(200, self.current_grp._update_timeout(3, 100))
class TestUtils(common.HeatTestCase):
# 1. No existing no blacklist
# 2. Existing with no blacklist
# 3. Existing with blacklist
scenarios = [
('1', dict(existing=[], black_listed=[], count=0)),
('2', dict(existing=['0', '1'], black_listed=[], count=0)),
('3', dict(existing=['0', '1'], black_listed=['0'], count=1)),
('4', dict(existing=['0', '1'], black_listed=['1', '2'], count=1))
]
def setUp(self):
super(TestUtils, self).setUp()
def test_count_black_listed(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._nested = get_fake_nested_stack(self.existing)
resgrp._name_blacklist = mock.Mock(return_value=set(self.black_listed))
rcount = resgrp._count_black_listed()
self.assertEqual(self.count, rcount)
class TestGetBatches(common.HeatTestCase):
scenarios = [
('4_4_1_0', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=0,
batches=[
(4, 1, ['4']),
(4, 1, ['3']),
(4, 1, ['2']),
(4, 1, ['1']),
])),
('4_4_1_4', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=4,
batches=[
(5, 1, ['5']),
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_4_1_5', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=5,
batches=[
(5, 1, ['5']),
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_4_2_0', dict(targ_cap=4, init_cap=4, bat_size=2, min_serv=0,
batches=[
(4, 2, ['4', '3']),
(4, 2, ['2', '1']),
])),
('4_4_2_4', dict(targ_cap=4, init_cap=4, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(4, 0, []),
])),
('5_5_2_0', dict(targ_cap=5, init_cap=5, bat_size=2, min_serv=0,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(5, 1, ['1']),
])),
('5_5_2_4', dict(targ_cap=5, init_cap=5, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(5, 0, []),
])),
('3_3_2_0', dict(targ_cap=3, init_cap=3, bat_size=2, min_serv=0,
batches=[
(3, 2, ['3', '2']),
(3, 1, ['1']),
])),
('3_3_2_4', dict(targ_cap=3, init_cap=3, bat_size=2, min_serv=4,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(4, 1, ['1']),
(3, 0, []),
])),
('4_4_4_0', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=0,
batches=[
(4, 4, ['4', '3', '2', '1']),
])),
('4_4_5_0', dict(targ_cap=4, init_cap=4, bat_size=5, min_serv=0,
batches=[
(4, 4, ['4', '3', '2', '1']),
])),
('4_4_4_1', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=1,
batches=[
(5, 4, ['5', '4', '3', '2']),
(4, 1, ['1']),
])),
('4_4_6_1', dict(targ_cap=4, init_cap=4, bat_size=6, min_serv=1,
batches=[
(5, 4, ['5', '4', '3', '2']),
(4, 1, ['1']),
])),
('4_4_4_2', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=2,
batches=[
(6, 4, ['6', '5', '4', '3']),
(4, 2, ['2', '1']),
])),
('4_4_4_4', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=4,
batches=[
(8, 4, ['8', '7', '6', '5']),
(8, 4, ['4', '3', '2', '1']),
(4, 0, []),
])),
('4_4_5_6', dict(targ_cap=4, init_cap=4, bat_size=5, min_serv=6,
batches=[
(8, 4, ['8', '7', '6', '5']),
(8, 4, ['4', '3', '2', '1']),
(4, 0, []),
])),
('4_7_1_0', dict(targ_cap=4, init_cap=7, bat_size=1, min_serv=0,
batches=[
(4, 1, ['4']),
(4, 1, ['3']),
(4, 1, ['2']),
(4, 1, ['1']),
])),
('4_7_1_4', dict(targ_cap=4, init_cap=7, bat_size=1, min_serv=4,
batches=[
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_7_1_5', dict(targ_cap=4, init_cap=7, bat_size=1, min_serv=5,
batches=[
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_7_2_0', dict(targ_cap=4, init_cap=7, bat_size=2, min_serv=0,
batches=[
(4, 2, ['4', '3']),
(4, 2, ['2', '1']),
])),
('4_7_2_4', dict(targ_cap=4, init_cap=7, bat_size=2, min_serv=4,
batches=[
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(4, 0, []),
])),
('5_7_2_0', dict(targ_cap=5, init_cap=7, bat_size=2, min_serv=0,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(5, 1, ['1']),
])),
('5_7_2_4', dict(targ_cap=5, init_cap=7, bat_size=2, min_serv=4,
batches=[
(6, 2, ['5', '4']),
(6, 2, ['3', '2']),
(5, 1, ['1']),
])),
('4_7_4_4', dict(targ_cap=4, init_cap=7, bat_size=4, min_serv=4,
batches=[
(8, 4, ['8', '4', '3', '2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_7_5_6', dict(targ_cap=4, init_cap=7, bat_size=5, min_serv=6,
batches=[
(8, 4, ['8', '4', '3', '2']),
(5, 1, ['1']),
(4, 0, []),
])),
('6_4_1_0', dict(targ_cap=6, init_cap=4, bat_size=1, min_serv=0,
batches=[
(5, 1, ['5']),
(6, 1, ['6']),
(6, 1, ['4']),
(6, 1, ['3']),
(6, 1, ['2']),
(6, 1, ['1']),
])),
('6_4_1_4', dict(targ_cap=6, init_cap=4, bat_size=1, min_serv=4,
batches=[
(5, 1, ['5']),
(6, 1, ['6']),
(6, 1, ['4']),
(6, 1, ['3']),
(6, 1, ['2']),
(6, 1, ['1']),
])),
('6_4_1_5', dict(targ_cap=6, init_cap=4, bat_size=1, min_serv=5,
batches=[
(5, 1, ['5']),
(6, 1, ['6']),
(6, 1, ['4']),
(6, 1, ['3']),
(6, 1, ['2']),
(6, 1, ['1']),
])),
('6_4_2_0', dict(targ_cap=6, init_cap=4, bat_size=2, min_serv=0,
batches=[
(6, 2, ['5', '6']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_2_4', dict(targ_cap=6, init_cap=4, bat_size=2, min_serv=4,
batches=[
(6, 2, ['5', '6']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_5_2_0', dict(targ_cap=6, init_cap=5, bat_size=2, min_serv=0,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_5_2_4', dict(targ_cap=6, init_cap=5, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
])),
('6_3_2_0', dict(targ_cap=6, init_cap=3, bat_size=2, min_serv=0,
batches=[
(5, 2, ['4', '5']),
(6, 2, ['6', '3']),
(6, 2, ['2', '1']),
])),
('6_3_2_4', dict(targ_cap=6, init_cap=3, bat_size=2, min_serv=4,
batches=[
(5, 2, ['4', '5']),
(6, 2, ['6', '3']),
(6, 2, ['2', '1']),
])),
('6_4_4_0', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=0,
batches=[
(6, 4, ['5', '6', '4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_5_0', dict(targ_cap=6, init_cap=4, bat_size=5, min_serv=0,
batches=[
(6, 5, ['5', '6', '4', '3', '2']),
(6, 1, ['1']),
])),
('6_4_4_1', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=1,
batches=[
(6, 4, ['5', '6', '4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_6_1', dict(targ_cap=6, init_cap=4, bat_size=6, min_serv=1,
batches=[
(7, 6, ['5', '6', '7', '4', '3', '2']),
(6, 1, ['1']),
])),
('6_4_4_2', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=2,
batches=[
(6, 4, ['5', '6', '4', '3']),
(6, 2, ['2', '1']),
])),
('6_4_4_4', dict(targ_cap=6, init_cap=4, bat_size=4, min_serv=4,
batches=[
(8, 4, ['8', '7', '6', '5']),
(8, 4, ['4', '3', '2', '1']),
(6, 0, []),
])),
('6_4_5_6', dict(targ_cap=6, init_cap=4, bat_size=5, min_serv=6,
batches=[
(9, 5, ['9', '8', '7', '6', '5']),
(10, 4, ['10', '4', '3', '2']),
(7, 1, ['1']),
(6, 0, []),
])),
]
def setUp(self):
super(TestGetBatches, self).setUp()
self.stack = utils.parse_stack(template)
self.grp = self.stack['group1']
self.grp._name_blacklist = mock.Mock(return_value={'0'})
def test_get_batches(self):
batches = list(self.grp._get_batches(self.targ_cap,
self.init_cap,
self.bat_size,
self.min_serv))
self.assertEqual([(s, u) for s, u, n in self.batches], batches)
def test_assemble(self):
old_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "baz"})
new_def = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
{"foo": "bar"})
resources = [(str(i), old_def) for i in range(self.init_cap + 1)]
self.grp.get_size = mock.Mock(return_value=self.targ_cap)
self.patchobject(grouputils, 'get_member_definitions',
return_value=resources)
self.grp.build_resource_definition = mock.Mock(return_value=new_def)
all_updated_names = set()
for size, max_upd, names in self.batches:
template = self.grp._assemble_for_rolling_update(size,
max_upd,
names)
res_dict = template.resource_definitions(self.stack)
expected_names = set(map(str, range(1, size + 1)))
self.assertEqual(expected_names, set(res_dict))
all_updated_names &= expected_names
all_updated_names |= set(names)
updated = set(n for n, v in res_dict.items() if v != old_def)
self.assertEqual(all_updated_names, updated)
resources[:] = sorted(res_dict.items(), key=lambda i: int(i[0]))
| steveb/heat | heat/tests/openstack/heat/test_resource_group.py | Python | apache-2.0 | 58,016 | 0 |
#!/usr/bin/env python
# encoding: utf-8
'''
pvaurora configuration file
'''
LATITUDE = 42.6
LONGITUDE = 12.9
API_KEY = "api_key_value"
SYSTEM_ID = -1
| yuroller/pvaurora | src/config-dist.py | Python | gpl-3.0 | 152 | 0.006579 |
""" barbell_capilar script. """
from common import info
import dolfin as df
import mshr
import os
from generate_mesh import MESHES_DIR, store_mesh_HDF5
import matplotlib.pyplot as plt
def description(**kwargs):
info("Generates mesh for a barbell capillary.")
def method(res=50, diameter=1., length=5., show=False, **kwargs):
'''
Function That Generates a mesh for a barbell capillary,
Meshing method is mshr.
Note: The generarted mesh is stored in "BERNAISE/meshes/".
'''
info("Generating mesh using the mshr tool.")
inletdiameter = diameter*5.
inletlength = diameter*4.
# Define coners of "capilar"
a = df.Point(-diameter/2., -length/2-inletlength/2.)
b = df.Point(diameter/2., length/2+inletlength/2.)
capilar = mshr.Rectangle(a, b)
# Define coners of "leftbell
c = df.Point(-inletdiameter/2., -length/2-inletlength)
d = df.Point(inletdiameter/2., -length/2)
leftbell = mshr.Rectangle(c, d)
# Define coners of "rightbell"
e = df.Point(-inletdiameter/2., length/2)
f = df.Point(inletdiameter/2., length/2+inletlength)
rightbell = mshr.Rectangle(e, f)
domain = capilar + leftbell + rightbell
mesh = mshr.generate_mesh(domain, res)
meshpath = os.path.join(MESHES_DIR,
"BarbellCapilarDolfin_d" + str(diameter) + "_l" +
str(length) + "_res" + str(res))
store_mesh_HDF5(mesh, meshpath)
if show:
df.plot(mesh)
plt.show()
| gautelinga/BERNAISE | utilities/mesh_scripts/barbell_capillary.py | Python | mit | 1,501 | 0 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.db import transaction
from django.core.mail import send_mail
from celery.utils.log import get_task_logger
from geonode.celery_app import app
try:
import pylibmc
import sherlock
from sherlock import MCLock as Lock
sherlock.configure(
expire=settings.MEMCACHED_LOCK_EXPIRE,
timeout=settings.MEMCACHED_LOCK_TIMEOUT)
memcache_client = pylibmc.Client(
[settings.MEMCACHED_LOCATION],
binary=True)
lock_type = "MEMCACHED"
except Exception:
from django.core.cache import cache
from contextlib import contextmanager
lock_type = "MEMCACHED-LOCAL-CONTEXT"
memcache_client = None
"""
ref.
http://docs.celeryproject.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time
"""
class Lock(object):
def __init__(self, lock_id, *args, **kwargs):
self.lock_id = lock_id
self.client = kwargs.get('client', None)
@contextmanager
def acquire(self, blocking=True):
if not blocking:
logger.warning("Non-blocking lock not currently available!")
# cache.add fails if the key already exists
from geonode.celery_app import app
status = cache.add(self.lock_id, app.oid, None)
try:
yield status
finally:
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
if status:
# don't release the lock if we didn't acquire it
cache.delete(self.lock_id)
logger = get_task_logger(__name__)
def memcache_lock(lock_id):
logger.info(f"Using '{lock_type}' lock type.")
lock = Lock(lock_id, client=memcache_client)
return lock
@app.task(
bind=True,
name='geonode.tasks.email.send_mail',
queue='email',
countdown=60,
# expires=120,
acks_late=True,
retry=True,
retry_policy={
'max_retries': 3,
'interval_start': 0,
'interval_step': 0.2,
'interval_max': 0.2,
})
def send_email(self, *args, **kwargs):
"""
Sends an email using django's send_mail functionality.
"""
send_mail(*args, **kwargs)
@app.task(
bind=True,
name='geonode.tasks.notifications.send_queued_notifications',
queue='email',
countdown=60,
# expires=120,
acks_late=True,
retry=True,
retry_policy={
'max_retries': 3,
'interval_start': 0,
'interval_step': 0.2,
'interval_max': 0.2,
})
def send_queued_notifications(self, *args):
"""Sends queued notifications.
settings.PINAX_NOTIFICATIONS_QUEUE_ALL needs to be true in order to take
advantage of this.
"""
from importlib import import_module
notifications = getattr(settings, 'NOTIFICATIONS_MODULE', None)
if notifications:
engine = import_module(f"{notifications}.engine")
send_all = getattr(engine, 'send_all')
# Make sure application can write to location where lock files are stored
if not args and getattr(settings, 'NOTIFICATION_LOCK_LOCATION', None):
send_all(settings.NOTIFICATION_LOCK_LOCATION)
else:
send_all(*args)
@app.task(
bind=True,
name='geonode.tasks.layers.set_permissions',
queue='update',
countdown=60,
# expires=120,
acks_late=True,
retry=True,
retry_policy={
'max_retries': 3,
'interval_start': 0,
'interval_step': 0.2,
'interval_max': 0.2,
})
def set_permissions(self, permissions_names, resources_names,
users_usernames, groups_names, delete_flag):
from geonode.layers.utils import set_layers_permissions
with transaction.atomic():
for permissions_name in permissions_names:
set_layers_permissions(
permissions_name,
resources_names,
users_usernames,
groups_names,
delete_flag,
verbose=True
)
| tomkralidis/geonode | geonode/tasks/tasks.py | Python | gpl-3.0 | 4,983 | 0.000201 |
from octopus.plugins.plugin import OctopusPlugin
class VSA(OctopusPlugin):
def __init__(self, executor):
super().__init__(executor)
self._pluginname = 'vsa.jar'
self._classname = 'bjoern.plugins.vsa.VSAPlugin'
def __setattr__(self, key, value):
if key == "project":
self._settings["database"] = value
else:
super().__setattr__(key, value)
| octopus-platform/bjoern | python/bjoern-tools/bjoern/plugins/vsa.py | Python | gpl-3.0 | 414 | 0 |
def agts(queue):
al = queue.add('surface.agts.py')
queue.add('work_function.py', ncpus=1, deps=[al])
if __name__ == '__main__':
execfile('Al100.py', {'k': 6, 'N': 5})
| qsnake/gpaw | doc/exercises/surface/surface.agts.py | Python | gpl-3.0 | 180 | 0.005556 |
# coding=utf-8
from flask import g
from mongoengine.connection import get_db
from .model_document import ModelDocument, ModelQuerySet
################################################################################
__all__ = ('MultipleDatabaseModelDocument',)
################################################################################
class MultipleDatabaseModelQuerySet(ModelQuerySet):
def __init__(self, document, collection):
# make a local copy of the Document class for this QuerySet, to prevent
# database, so that new attributes can be set on it
new_document = self._copy_class(document)
# this copies what may be class-level attributes from 'document',
# to instance-level attributes on 'new_document', freezing them
current_db_alias = document._get_db_alias()
new_document._get_db_alias = staticmethod(lambda: current_db_alias)
current_collection = document._get_collection()
new_document._get_collection = staticmethod(lambda: current_collection)
super(MultipleDatabaseModelQuerySet, self).__init__(new_document, collection)
@staticmethod
def _copy_class(cls):
# TODO: move this to a common utils
new_cls_dict = dict(cls.__dict__)
new_cls_dict['meta'] = new_cls_dict.pop('_meta')
return type(cls.__name__, cls.__bases__, new_cls_dict)
class MultipleDatabaseModelDocument(ModelDocument):
"""
An abstract class for documents that may reside in one of multiple databases.
"""
# TODO: prevent this class from being instantiated directly
meta = {
'abstract': True,
'allow_inheritance': False,
'db_alias': None, # this shouldn't actually be used
'queryset_class': MultipleDatabaseModelQuerySet,
'auto_create_index': False, # don't change; see '_get_collection' for why this is set
}
@property
def database(self):
# the import is required here to prevent circular imports
# TODO: remove this import statement
from ..image_store import MultipleDatabaseImageStore
return MultipleDatabaseImageStore.objects.with_id(self._db_alias)
@classmethod
def _get_db_alias(cls):
"""
Helper method to provide the current database, as set by a
MultipleDatabaseImageStore context manager.
This would be better as a property, but Python has poor support for
classmethod descriptors, particularly with mutators.
"""
try:
return g.multiple_database_connection_aliases[-1]
except (AttributeError, IndexError):
raise NotImplemented('A "%s" must be used inside a "MultipleDatabaseImageStoreMixin" context (\'with\' statement).' % cls.__name__)
@classmethod
def _get_db(cls):
"""
Overrides the Document._get_collection classmethod.
This will only be called on class instances, as instantiated objects
have this method patched by 'self.switch_db'.
"""
return get_db(cls._get_db_alias())
@classmethod
def _get_collection(cls):
"""
Overrides the 'Document._get_collection' classmethod.
This method attempts to provide some degree of caching, preventing a
new collection from having to be created on every access, while still
allowing the database to change.
Unlike for databases, MongoEngine doesn't store an internal cache for
multiple collections per class, so one is created here, and used
instead of the single '_collection' cache.
This will only be called on class instances, as instantiated objects
have this method patched by 'self.switch_db'.
"""
if issubclass(MultipleDatabaseModelDocument, cls):
# setting the '_collections' property on one of the common base
# classes would prevent the derived classes from having their own
# seperate instances of the property
raise NotImplementedError('"_get_collection" should only be called on concrete model classes.')
if not hasattr(cls, '_collections'):
cls._collections = dict()
db_alias = cls._get_db_alias()
try:
cls._collection = cls._collections[db_alias]
except KeyError:
cls._collection = None
# 'cls._collection' is set as a side effect of the superclass
# '_get_collection'
cls._collections[db_alias] = super(MultipleDatabaseModelDocument, cls)._get_collection()
# unless meta['auto_create_index'] is false, the superclass
# '_get_collection' will attempt to call 'ensure_indexes', which
# in turn calls '_get_collection', leading to infinite recursion
# so, wait until the necessary '_collection' / '_collections' values
# are set after the return, and only then call 'ensure_indexes'
cls.ensure_indexes()
return cls._collection
def __init__(self, *args, **kwargs):
super(MultipleDatabaseModelDocument, self).__init__(*args, **kwargs)
# make the new database persistent to this instance
# cls_db_alias = type(self)._get_db_alias()
cls_db_alias = self._get_db_alias()
self._db_alias = cls_db_alias # save the value for use in the 'database' property
self.switch_db(cls_db_alias) # this patches over 'self._get_db'
| SlideAtlas/SlideAtlas-Server | slideatlas/models/common/multiple_database_model_document.py | Python | apache-2.0 | 5,469 | 0.002377 |
"""Checks import order rule"""
# pylint: disable=unused-import,relative-import,ungrouped-imports,wrong-import-order
# pylint: disable=import-error, too-few-public-methods, missing-docstring,using-constant-test
import os.path
if True:
from astroid import are_exclusive
try:
import sys
except ImportError:
class Myclass(object):
"""docstring"""
if sys.version_info[0] == 3:
from collections import OrderedDict
else:
class OrderedDict(object):
"""Nothing to see here."""
def some_func(self):
pass
import six # [wrong-import-position]
CONSTANT = True
import datetime # [wrong-import-position]
VAR = 0
for i in range(10):
VAR += i
import scipy # [wrong-import-position]
import astroid # [wrong-import-position]
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/pylint/test/functional/wrong_import_position.py | Python | apache-2.0 | 777 | 0.006435 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
"""Utility functions to support Experiment classes
"""
import re
# this needs to be accessed from __str__ method of Param
scriptTarget = "PsychoPy"
# predefine some regex's; deepcopy complains if do in NameSpace.__init__()
unescapedDollarSign_re = re.compile(r"^\$|[^\\]\$") # detect "code wanted"
valid_var_re = re.compile(r"^[a-zA-Z_][\w]*$") # filter for legal var names
nonalphanumeric_re = re.compile(r'\W') # will match all bad var name chars
class CodeGenerationException(Exception):
"""
Exception thrown by a component when it is unable to generate its code.
"""
def __init__(self, source, message=""):
super(CodeGenerationException, self).__init__()
self.source = source
self.message = message
def __str__(self):
return "{}: ".format(self.source, self.message)
| hoechenberger/psychopy | psychopy/experiment/utils.py | Python | gpl-3.0 | 1,018 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.