text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from __future__ import unicode_literals
import unittest
from functools import partial
from beekeeper.variable_handlers import render
import beekeeper.variable_handlers
class VariableReceiver(object):
def execute(self, var_type, **kwargs):
render(self, var_type, **kwargs)
def receive(self, expected, *args, **kwargs):
if isinstance(expected, list):
if kwargs:
self.assertIn(kwargs, expected)
else:
self.assertIn(args[0], expected)
elif kwargs:
self.assertEqual(expected, kwargs)
else:
self.assertEqual(expected, args[0])
class fakeuuid:
def __init__(self):
self.hex = 'xxx'
class VariableHandlerTest(VariableReceiver, unittest.TestCase):
def test_data(self):
self.set_headers = partial(self.receive, {'Content-Type': 'text/plain'})
self.set_data = partial(self.receive, b'this is text')
self.execute('data', variable={'mimetype': 'text/plain', 'value': 'this is text'})
def test_http_auth(self):
self.set_headers = partial(self.receive, {'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ='})
username = dict(value='username')
password = dict(value='password')
self.execute('http_basic_auth', username=username, password=password)
def test_bearer_auth(self):
self.set_headers = partial(self.receive, {'Authorization': 'Bearer PUT_YOUR_TOKEN_HERE'})
var = dict(value='PUT_YOUR_TOKEN_HERE')
self.execute('bearer_token', var=var)
def test_multiple_bearer(self):
self.set_headers = partial(self.receive, {'Authorization': 'Nope'})
with self.assertRaises(Exception):
self.execute('bearer_token', var1='thing', var2='otherthing')
def test_http_form(self):
expected = [
b'y=thing&x=whatever',
b'x=whatever&y=thing'
]
self.set_headers = partial(self.receive, {'Content-Type': 'application/x-www-form-urlencoded'})
self.set_data = partial(self.receive, expected)
var = dict(x={'value':'whatever'}, y={'value':'thing'})
self.execute('http_form', **var)
def test_multipart(self):
self.old_uuid4 = beekeeper.variable_handlers.uuid4
beekeeper.variable_handlers.uuid4 = fakeuuid
should = '\n--xxx\nContent-Disposition: form-data; name="x"\n\nwhatever\n--xxx\nContent-Disposition: form-data; name="y"; filename="thing.name"\nContent-Type: text/plain\n\nplaintexthere\n--xxx--'.encode('utf-8')
othershould = '\n--xxx\nContent-Disposition: form-data; name="y"; filename="thing.name"\nContent-Type: text/plain\n\nplaintexthere\n--xxx\nContent-Disposition: form-data; name="x"\n\nwhatever\n--xxx--'.encode('utf-8')
options = [should, othershould]
self.set_headers = partial(self.receive, {'Content-Type': 'multipart/form-data; boundary=xxx'})
self.set_data = partial(self.receive, options)
var = {'x':{'value': 'whatever'}, 'y':{'value':'plaintexthere', 'mimetype':'text/plain', 'filename':'thing.name'}}
self.execute('multipart', **var)
def test_cookies(self):
expected = [{'Cookie': 'thing1; thing2'}, {'Cookie': 'thing2; thing1'}]
var = {'a': {'value': 'thing1'}, 'b': {'value': 'thing2'}}
self.set_headers = partial(self.receive, expected)
self.execute('cookie', **var)
| haikuginger/beekeeper | test/test_variable_handers.py | Python | mit | 3,412 | 0.005569 |
import pytest
import rules
from adhocracy4.projects.enums import Access
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import freeze_post_phase
from adhocracy4.test.helpers import freeze_pre_phase
from adhocracy4.test.helpers import setup_phase
from adhocracy4.test.helpers import setup_users
from meinberlin.apps.ideas import phases
perm_name = 'meinberlin_ideas.view_idea'
def test_perm_exists():
assert rules.perm_exists(perm_name)
@pytest.mark.django_db
def test_pre_phase(phase_factory, idea_factory, user):
phase, _, project, item = setup_phase(phase_factory, idea_factory,
phases.CollectPhase)
anonymous, moderator, initiator = setup_users(project)
assert project.access == Access.PUBLIC
with freeze_pre_phase(phase):
assert rules.has_perm(perm_name, anonymous, item)
assert rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_phase_active(phase_factory, idea_factory, user):
phase, _, project, item = setup_phase(phase_factory, idea_factory,
phases.CollectPhase)
anonymous, moderator, initiator = setup_users(project)
assert project.access == Access.PUBLIC
with freeze_phase(phase):
assert rules.has_perm(perm_name, anonymous, item)
assert rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_phase_active_project_private(phase_factory, idea_factory,
user, user2):
phase, _, project, item = setup_phase(
phase_factory, idea_factory, phases.CollectPhase,
module__project__access=Access.PRIVATE)
anonymous, moderator, initiator = setup_users(project)
participant = user2
project.participants.add(participant)
assert project.access == Access.PRIVATE
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, participant, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_phase_active_project_semipublic(phase_factory, idea_factory,
user, user2):
phase, _, project, item = setup_phase(
phase_factory, idea_factory, phases.CollectPhase,
module__project__access=Access.SEMIPUBLIC)
anonymous, moderator, initiator = setup_users(project)
participant = user2
project.participants.add(participant)
assert project.access == Access.SEMIPUBLIC
with freeze_phase(phase):
assert rules.has_perm(perm_name, anonymous, item)
assert rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, participant, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_phase_active_project_draft(phase_factory, idea_factory, user):
phase, _, project, item = setup_phase(phase_factory, idea_factory,
phases.CollectPhase,
module__project__is_draft=True)
anonymous, moderator, initiator = setup_users(project)
assert project.is_draft
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
@pytest.mark.django_db
def test_post_phase_project_archived(phase_factory, idea_factory, user):
phase, _, project, item = setup_phase(phase_factory, idea_factory,
phases.CollectPhase,
module__project__is_archived=True)
anonymous, moderator, initiator = setup_users(project)
assert project.is_archived
with freeze_post_phase(phase):
assert rules.has_perm(perm_name, anonymous, item)
assert rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
| liqd/a4-meinberlin | tests/ideas/rules/test_rules_view.py | Python | agpl-3.0 | 4,529 | 0 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import shutil
import time
import sickbeard
from sickbeard import common
from sickbeard import postProcessor
from sickbeard import db, helpers, exceptions
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard import logger
def delete_folder(folder, check_empty=True):
# check if it's a folder
if not ek.ek(os.path.isdir, folder):
return False
# check if it isn't TV_DOWNLOAD_DIR
if sickbeard.TV_DOWNLOAD_DIR:
if helpers.real_path(folder) == helpers.real_path(sickbeard.TV_DOWNLOAD_DIR):
return False
# check if it's empty folder when wanted checked
if check_empty:
check_files = ek.ek(os.listdir, folder)
if check_files:
return False
# try deleting folder
try:
logger.log(u"Deleting folder: " + folder)
shutil.rmtree(folder)
except (OSError, IOError), e:
logger.log(u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING)
return False
return True
def logHelper(logMessage, logLevel=logger.MESSAGE):
logger.log(logMessage, logLevel)
return logMessage + u"\n"
def processDir(dirName, nzbName=None, method=None, recurse=False, pp_options={}):
"""
Scans through the files in dirName and processes whatever media files it finds
dirName: The folder name to look in
nzbName: The NZB name which resulted in this folder being downloaded
method: The method of postprocessing: Automatic, Script, Manual
recurse: Boolean for whether we should descend into subfolders or not
"""
returnStr = u""
returnStr += logHelper(u"Processing folder: " + dirName, logger.DEBUG)
# if they passed us a real dir then assume it's the one we want
if ek.ek(os.path.isdir, dirName):
dirName = ek.ek(os.path.realpath, dirName)
# if they've got a download dir configured then use it
elif sickbeard.TV_DOWNLOAD_DIR and ek.ek(os.path.isdir, sickbeard.TV_DOWNLOAD_DIR) \
and ek.ek(os.path.normpath, dirName) != ek.ek(os.path.normpath, sickbeard.TV_DOWNLOAD_DIR):
dirName = ek.ek(os.path.join, sickbeard.TV_DOWNLOAD_DIR, ek.ek(os.path.abspath, dirName).split(os.path.sep)[-1])
returnStr += logHelper(u"Trying to use folder: " + dirName, logger.DEBUG)
# if we didn't find a real dir then quit
if not ek.ek(os.path.isdir, dirName):
returnStr += logHelper(u"Unable to figure out what folder to process. If your downloader and Sick Beard aren't on the same PC make sure you fill out your TV download dir in the config.", logger.DEBUG)
return returnStr
# TODO: check if it's failed and deal with it if it is
if ek.ek(os.path.basename, dirName).startswith('_FAILED_'):
returnStr += logHelper(u"The directory name indicates it failed to extract, cancelling", logger.DEBUG)
return returnStr
elif ek.ek(os.path.basename, dirName).startswith('_UNDERSIZED_'):
returnStr += logHelper(u"The directory name indicates that it was previously rejected for being undersized, cancelling", logger.DEBUG)
return returnStr
elif ek.ek(os.path.basename, dirName).upper().startswith('_UNPACK'):
returnStr += logHelper(u"The directory name indicates that this release is in the process of being unpacked, skipping", logger.DEBUG)
return returnStr
# make sure the dir isn't inside a show dir
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows")
for sqlShow in sqlResults:
if dirName.lower().startswith(ek.ek(os.path.realpath, sqlShow["location"]).lower() + os.sep) or dirName.lower() == ek.ek(os.path.realpath, sqlShow["location"]).lower():
returnStr += logHelper(u"You're trying to post process an existing show directory: " + dirName, logger.ERROR)
returnStr += u"\n"
return returnStr
fileList = ek.ek(os.listdir, dirName)
# split the list into video files and folders
folders = filter(lambda x: ek.ek(os.path.isdir, ek.ek(os.path.join, dirName, x)), fileList)
# videoFiles, sorted by size, process biggest file first. Leaves smaller same named file behind
mediaFiles = filter(lambda x: ek.ek(os.path.exists, ek.ek(os.path.join, dirName, x)), filter(helpers.isMediaFile, fileList))
videoFiles = sorted(mediaFiles, key=lambda x: ek.ek(os.path.getsize, ek.ek(os.path.join, dirName, x)), reverse=True)
remaining_video_files = list(videoFiles)
num_videoFiles = len(videoFiles)
# if there are no videofiles in parent and only one subfolder, pass the nzbName to child
if num_videoFiles == 0 and len(folders) == 1:
parent_nzbName = nzbName
else:
parent_nzbName = None
# recursively process all the folders
for cur_folder in folders:
returnStr += u"\n"
# use full path
cur_folder = ek.ek(os.path.join, dirName, cur_folder)
if helpers.is_hidden_folder(cur_folder):
returnStr += logHelper(u"Ignoring hidden folder: " + cur_folder, logger.DEBUG)
else:
returnStr += logHelper(u"Recursively processing a folder: " + cur_folder, logger.DEBUG)
returnStr += processDir(cur_folder, nzbName=parent_nzbName, recurse=True, method=method, pp_options=pp_options)
remainingFolders = filter(lambda x: ek.ek(os.path.isdir, ek.ek(os.path.join, dirName, x)), fileList)
if num_videoFiles == 0:
returnStr += u"\n"
returnStr += logHelper(u"There are no videofiles in folder: " + dirName, logger.DEBUG)
# if there a no videofiles, try deleting empty folder
if method != 'Manual':
if delete_folder(dirName, check_empty=True):
returnStr += logHelper(u"Deleted empty folder: " + dirName, logger.DEBUG)
# if there's more than one videofile in the folder, files can be lost (overwritten) when nzbName contains only one episode.
if num_videoFiles >= 2:
nzbName = None
# process any files in the dir
for cur_video_file in videoFiles:
cur_video_file_path = ek.ek(os.path.join, dirName, cur_video_file)
if method == 'Automatic':
# check if we processed this video file before
cur_video_file_path_size = ek.ek(os.path.getsize, cur_video_file_path)
myDB = db.DBConnection()
search_sql = "SELECT tv_episodes.tvdbid, history.resource FROM tv_episodes INNER JOIN history ON history.showid=tv_episodes.showid"
search_sql += " WHERE history.season=tv_episodes.season and history.episode=tv_episodes.episode"
search_sql += " and tv_episodes.status IN (" + ",".join([str(x) for x in common.Quality.DOWNLOADED]) + ")"
search_sql += " and history.resource LIKE ? and tv_episodes.file_size = ?"
sql_results = myDB.select(search_sql, [cur_video_file_path, cur_video_file_path_size])
if len(sql_results):
returnStr += logHelper(u"Ignoring file: " + cur_video_file_path + " looks like it's been processed already", logger.DEBUG)
continue
try:
returnStr += u"\n"
processor = postProcessor.PostProcessor(cur_video_file_path, nzb_name=nzbName, pp_options=pp_options)
process_result = processor.process()
process_fail_message = ""
except exceptions.PostProcessingFailed, e:
process_result = False
process_fail_message = ex(e)
except Exception, e:
process_result = False
process_fail_message = "Post Processor returned unhandled exception: " + ex(e)
returnStr += processor.log
# as long as the postprocessing was successful delete the old folder unless the config wants us not to
if process_result:
remaining_video_files.remove(cur_video_file)
if not sickbeard.KEEP_PROCESSED_DIR and len(remaining_video_files) == 0 and len(remainingFolders) == 0:
if delete_folder(dirName, check_empty=False):
returnStr += logHelper(u"Deleted folder: " + dirName, logger.DEBUG)
returnStr += logHelper(u"Processing succeeded for " + cur_video_file_path)
else:
returnStr += logHelper(u"Processing failed for " + cur_video_file_path + ": " + process_fail_message, logger.WARNING)
return returnStr
| VoiDeD/Sick-Beard | sickbeard/processTV.py | Python | gpl-3.0 | 9,409 | 0.004251 |
from ckan.lib.authenticator import UsernamePasswordAuthenticator
from ckan.model import User, Session
from sqlalchemy import Column, types, MetaData, DDL
from sqlalchemy.ext.declarative import declarative_base
from zope.interface import implements
from repoze.who.interfaces import IAuthenticator
Base = declarative_base()
import logging
log = logging.getLogger(__name__)
def intercept_authenticator():
meta = MetaData(bind = Session.get_bind(), reflect = True)
if not 'login_attempts' in meta.tables['user'].columns:
log.warn("'login_attempts' field does not exist, adding...")
DDL("ALTER TABLE public.user ADD COLUMN login_attempts SMALLINT DEFAULT 0").execute(Session.get_bind())
UsernamePasswordAuthenticator.authenticate = QGOVAuthenticator().authenticate
class QGOVAuthenticator(UsernamePasswordAuthenticator):
implements(IAuthenticator)
def authenticate(self, environ, identity):
if not 'login' in identity or not 'password' in identity:
return None
user = User.by_name(identity.get('login'))
if user is None:
log.debug('Login failed - username %r not found', identity.get('login'))
return None
qgovUser = Session.query(QGOVUser).filter_by(name = identity.get('login')).first()
if qgovUser.login_attempts >= 10:
log.debug('Login as %r failed - account is locked', identity.get('login'))
elif user.validate_password(identity.get('password')):
# reset attempt count to 0
qgovUser.login_attempts = 0
Session.commit()
return user.name
else:
log.debug('Login as %r failed - password not valid', identity.get('login'))
qgovUser.login_attempts += 1
Session.commit()
return None
class QGOVUser(Base):
__tablename__ = 'user'
__mapper_args__ = {'include_properties' : ['id', 'name', 'login_attempts']}
id = Column(types.UnicodeText, primary_key=True)
name = Column(types.UnicodeText, nullable=False, unique=True)
login_attempts = Column(types.SmallInteger)
| ThrawnCA/ckanext-qgov | ckanext/qgov/common/authenticator.py | Python | agpl-3.0 | 2,108 | 0.009013 |
import os
import json
from PySide2 import QtCore
from opencmiss.zinc.context import Context
from opencmiss.zinc.material import Material
from mapclientplugins.meshgeneratorstep.model.meshgeneratormodel import MeshGeneratorModel
from mapclientplugins.meshgeneratorstep.model.meshannotationmodel import MeshAnnotationModel
from mapclientplugins.meshgeneratorstep.model.segmentationdatamodel import SegmentationDataModel
from scaffoldmaker.scaffolds import Scaffolds_decodeJSON, Scaffolds_JSONEncoder
class MasterModel(object):
def __init__(self, location, identifier):
self._location = location
self._identifier = identifier
self._filenameStem = os.path.join(self._location, self._identifier)
self._context = Context("MeshGenerator")
self._timekeeper = self._context.getTimekeepermodule().getDefaultTimekeeper()
self._timer = QtCore.QTimer()
self._current_time = 0.0
self._timeValueUpdate = None
self._frameIndexUpdate = None
self._initialise()
self._region = self._context.createRegion()
self._generator_model = MeshGeneratorModel(self._context, self._region, self._materialmodule)
self._segmentation_data_model = SegmentationDataModel(self._region, self._materialmodule)
self._annotation_model = MeshAnnotationModel()
self._settings = {
'segmentation_data_settings' : self._segmentation_data_model.getSettings()
}
self._makeConnections()
# self._loadSettings()
def printLog(self):
logger = self._context.getLogger()
for index in range(logger.getNumberOfMessages()):
print(logger.getMessageTextAtIndex(index))
def _initialise(self):
self._filenameStem = os.path.join(self._location, self._identifier)
tess = self._context.getTessellationmodule().getDefaultTessellation()
tess.setRefinementFactors(12)
# set up standard materials and glyphs so we can use them elsewhere
self._materialmodule = self._context.getMaterialmodule()
self._materialmodule.defineStandardMaterials()
solid_blue = self._materialmodule.createMaterial()
solid_blue.setName('solid_blue')
solid_blue.setManaged(True)
solid_blue.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [ 0.0, 0.2, 0.6 ])
solid_blue.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [ 0.0, 0.7, 1.0 ])
solid_blue.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [ 0.0, 0.0, 0.0 ])
solid_blue.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [ 0.1, 0.1, 0.1 ])
solid_blue.setAttributeReal(Material.ATTRIBUTE_SHININESS , 0.2)
trans_blue = self._materialmodule.createMaterial()
trans_blue.setName('trans_blue')
trans_blue.setManaged(True)
trans_blue.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [ 0.0, 0.2, 0.6 ])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [ 0.0, 0.7, 1.0 ])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [ 0.0, 0.0, 0.0 ])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [ 0.1, 0.1, 0.1 ])
trans_blue.setAttributeReal(Material.ATTRIBUTE_ALPHA , 0.3)
trans_blue.setAttributeReal(Material.ATTRIBUTE_SHININESS , 0.2)
glyphmodule = self._context.getGlyphmodule()
glyphmodule.defineStandardGlyphs()
def _makeConnections(self):
pass
def getIdentifier(self):
return self._identifier
def getOutputModelFilename(self):
return self._filenameStem + '.exf'
def getGeneratorModel(self):
return self._generator_model
def getMeshAnnotationModel(self):
return self._annotation_model
def getSegmentationDataModel(self):
return self._segmentation_data_model
def getScene(self):
return self._region.getScene()
def getContext(self):
return self._context
def registerSceneChangeCallback(self, sceneChangeCallback):
self._generator_model.registerSceneChangeCallback(sceneChangeCallback)
def done(self):
self._saveSettings()
self._generator_model.done()
self._generator_model.writeModel(self.getOutputModelFilename())
self._generator_model.writeAnnotations(self._filenameStem)
self._generator_model.exportToVtk(self._filenameStem)
def _getSettings(self):
'''
Ensures master model settings includes current settings for sub models.
:return: Master setting dict.
'''
settings = self._settings
settings['generator_settings'] = self._generator_model.getSettings()
settings['segmentation_data_settings'] = self._segmentation_data_model.getSettings()
return settings
def loadSettings(self):
try:
settings = self._settings
with open(self._filenameStem + '-settings.json', 'r') as f:
savedSettings = json.loads(f.read(), object_hook=Scaffolds_decodeJSON)
settings.update(savedSettings)
if not 'generator_settings' in settings:
# migrate from old settings before named generator_settings
settings = {'generator_settings': settings}
except:
# no settings saved yet, following gets defaults
settings = self._getSettings()
self._generator_model.setSettings(settings['generator_settings'])
self._segmentation_data_model.setSettings(settings['segmentation_data_settings'])
self._annotation_model.setScaffoldTypeByName(self._generator_model.getEditScaffoldTypeName())
self._getSettings()
def _saveSettings(self):
self._generator_model.updateSettingsBeforeWrite()
settings = self._getSettings()
with open(self._filenameStem + '-settings.json', 'w') as f:
f.write(json.dumps(settings, cls=Scaffolds_JSONEncoder, sort_keys=True, indent=4))
def setSegmentationDataFile(self, data_filename):
self._segmentation_data_model.setDataFilename(data_filename)
| rchristie/mapclientplugins.meshgeneratorstep | mapclientplugins/meshgeneratorstep/model/mastermodel.py | Python | apache-2.0 | 6,058 | 0.007098 |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from models import Session, SessionForm, SessionForms, SessionTypes
from models import Speaker, SpeakerForm, SpeakerForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_FEATURED_SPEAKER_KEY = "FEATURED_SPEAKER"
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CONFERENCE_DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
SESSION_DEFAULTS = {
'highlights': 'To be announced',
'duration': 60,
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
)
SESSIONS_BY_SPEAKER = endpoints.ResourceContainer(
message_types.VoidMessage,
speakerKey=messages.StringField(1),
)
SESSIONS_BY_TYPE = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
type=messages.StringField(2),
)
SESSION_WISH_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in CONFERENCE_DEFAULTS:
if data[df] in (None, []):
data[df] = CONFERENCE_DEFAULTS[df]
setattr(request, df, CONFERENCE_DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conf.key is a Conference key and it exists
self._checkKey(conf.key, request.websafeConferenceKey, 'Conference')
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conf.key is a Conference key and it exists
self._checkKey(conf.key, request.websafeConferenceKey, 'Conference')
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
# check that conf.key is a Conference key and it exists
self._checkKey(conf.key, request.websafeConferenceKey, 'Conference')
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
# - - - Task 1: Add Sessions to a Conference - - - - - - - - - - - - - - - - - - - -
def _ndbKey(self, *args, **kwargs):
# this try except clause is needed for NDB issue 143
# https://code.google.com/p/appengine-ndb-experiment/issues/detail?id=143
try:
key = ndb.Key(**kwargs)
except Exception as e:
if e.__class__.__name__ == 'ProtocolBufferDecodeError':
key = 'Invalid Key'
return key
def _checkKey(self, key, websafeKey, kind):
'''Check that key exists and is the right Kind'''
if key == 'Invalid Key':
raise endpoints.NotFoundException(
'Invalid key: %s' % websafeKey)
if not key:
raise endpoints.NotFoundException(
'No %s found with key: %s' % (kind, websafeKey))
if key.kind() != kind:
raise endpoints.NotFoundException(
'Not a key of the %s Kind: %s' % (kind, websafeKey))
def _copySessionToForm(self, session, name=None):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert typeOfSession to enum SessionTypes; just copy others
if field.name == 'typeOfSession':
setattr(sf, field.name, getattr(SessionTypes, str(getattr(session,field.name))))
else:
setattr(sf, field.name, getattr(session,field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, session.key.urlsafe())
elif field.name == "speakerDisplayName":
setattr(sf, field.name, name)
# convert startDateTime from session model to date and startTime for session Form
startDateTime = getattr(session, 'startDateTime')
if startDateTime:
if field.name == 'date':
setattr(sf, field.name, str(startDateTime.date()))
if hasattr(session, 'startDateTime') and field.name == 'startTime':
setattr(sf, field.name, str(startDateTime.time().strftime('%H:%M')))
sf.check_initialized()
return sf
def _createSessionObject(self, request):
"""Create session object."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeConferenceKey']
del data['websafeKey']
# add default values for those missing (both data model & outbound Message)
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
setattr(request, df, SESSION_DEFAULTS[df])
if data['typeOfSession']==None:
del data['typeOfSession']
else:
data['typeOfSession'] = str(data['typeOfSession'])
# set start time and date to be next available if not specified
# convert dates from strings to Date objects;
if data['startTime'] and data['date']:
data['startDateTime'] = datetime.strptime(data['date'][:10] + ' ' + data['startTime'][:5], "%Y-%m-%d %H:%M")
del data['startTime']
del data['date']
# get the conference for where the session will be added
conf = self._ndbKey(urlsafe=request.websafeConferenceKey).get()
# check that conf.key is a Conference key and it exists
self._checkKey(conf.key, request.websafeConferenceKey, 'Conference')
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# generate Session key as child of Conference
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
s_key = ndb.Key(Session, s_id, parent=conf.key)
data['key'] = s_key
# get the speakerDisplayName from Speaker entity if a speakerKey was provided
if data['speakerKey']:
speaker = self._ndbKey(urlsafe=request.speakerKey).get()
# check that speaker.key is a speaker key and it exists
self._checkKey(speaker.key, request.speakerKey, 'Speaker')
data['speakerDisplayName'] = speaker.displayName
# - - - Task 4: Add a Task - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The task will check if there is more than one session by this speaker at this conference,
# also add a new Memcache entry that features the speaker and session names.
taskqueue.add(
params={
'sessionKey': s_key.urlsafe(),
'speakerKey': data['speakerKey'],
'speakerDisplayName': data['speakerDisplayName'],
'confKey' : conf.key.urlsafe()
},
url='/tasks/check_featuredSpeaker'
)
# - - - End Task 4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# create Session
s = Session(**data)
s.put()
return self._copySessionToForm(s)
#createSession(SessionForm, websafeConferenceKey) -- open only to the organizer of the conference
@endpoints.method(SESSION_POST_REQUEST, SessionForm,
path='conference/{websafeConferenceKey}/sessions',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create a new session for a conference. Open only to the organizer of the conference"""
return self._createSessionObject(request)
#getConferenceSessions(websafeConferenceKey) -- Given a conference, return all sessions
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Get list of all sessions for a conference."""
c_key = self._ndbKey(urlsafe=request.websafeConferenceKey)
# check that c_key is a Conference key and it exists
self._checkKey(c_key, request.websafeConferenceKey, 'Conference')
sessions = Session.query(ancestor=c_key)
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
#getConferenceSessionsByType(websafeConferenceKey, typeOfSession) Given a conference, return all sessions of a specified type (eg lecture, keynote, workshop)
@endpoints.method(SESSIONS_BY_TYPE, SessionForms,
path='conference/{websafeConferenceKey}/sessions/{type}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Get list of all sessions for a conference by type."""
c_key = self._ndbKey(urlsafe=request.websafeConferenceKey)
# check that c_key is a Conference key and it exists
self._checkKey(c_key, request.websafeConferenceKey, 'Conference')
sessions = Session.query(ancestor=c_key).filter(Session.typeOfSession==str(getattr(SessionTypes, request.type)))
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
#getSessionsBySpeaker(speaker) -- Given a speaker, return all sessions given by this particular speaker, across all conferences
@endpoints.method(SESSIONS_BY_SPEAKER, SessionForms,
path='sessions/bySpeaker',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Get list of all sessions for a speaker accross all conferences.
If no speakerKey is provided, all sessions are returned"""
sessions = Session.query()
if request.speakerKey:
sessions = sessions.filter(Session.speakerKey==request.speakerKey)
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
# - - - Task 1: Speaker entity creation - - - - - - - - - - - - - - - - - - - -
def _copySpeakerToForm(self, speaker):
"""Copy relevant fields from Speaker to SpeakerForm."""
sf = SpeakerForm()
for field in sf.all_fields():
if hasattr(speaker, field.name):
setattr(sf, field.name, getattr(speaker,field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, speaker.key.urlsafe())
sf.check_initialized()
return sf
def _createSpeakerObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.displayName:
raise endpoints.BadRequestException("Speaker 'diplayName' field required")
# copy SpeakerForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
# generate Speaker key
sp_id = Speaker.allocate_ids(size=1)[0]
sp_key = ndb.Key(Speaker, sp_id)
data['key'] = sp_key
# create Speaker
sp = Speaker(**data)
sp.put()
return self._copySpeakerToForm(sp)
@endpoints.method(SpeakerForm, SpeakerForm,
path='speaker',
http_method='POST', name='addSpeaker')
def addSpeaker(self, request):
"""Create a new speaker. Anyone can add a speaker, speaker does not need to be a user"""
return self._createSpeakerObject(request)
# - - - Task 2: Add Sessions to User Wishlist - - - - - - - - - - - - - - - - - - - -
#addSessionToWishlist(SessionKey) -- adds the session to the user's list of sessions they are interested in attending
@ndb.transactional(xg=True)
def _sessionAddIt(self, request):
"""Add a session to the user Profile session wish list."""
prof = self._getProfileFromUser() # get user Profile
# get session;
wssk = request.websafeSessionKey
s_key = ndb.Key(urlsafe=wssk)
# check that session is a Session key and it exists
self._checkKey(s_key, wssk, 'Session')
# check if user already added session otherwise add
if wssk in prof.sessionKeysWishList:
raise ConflictException(
"This session is already in your wishlist")
# add the session to the users session wish list
prof.sessionKeysWishList.append(wssk)
# write Profile back to the datastore & return
prof.put()
return BooleanMessage(data=True)
@endpoints.method(SESSION_WISH_REQUEST, BooleanMessage,
path='sessions/wishList/{websafeSessionKey}',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add a session to the user wishlist."""
return self._sessionAddIt(request)
#getSessionsInWishlist() -- query for all the sessions in a conference that the user is interested in
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/wishList',
http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Get list of sesions that user wishes to attend."""
prof = self._getProfileFromUser() # get user Profile
session_keys = [ndb.Key(urlsafe=wssk) for wssk in prof.sessionKeysWishList]
sessions = ndb.get_multi(session_keys)
# get speakers
speakerKeys = [ndb.Key(urlsafe=session.speakerKey) for session in sessions]
speakers = ndb.get_multi(speakerKeys)
# put display names in a dict for easier fetching
names = {}
for speaker in speakers:
names[speaker.key.id()] = speaker.displayName
# return set of SessionForm objects per Session
return SessionForms(items=[self._copySessionToForm(session, names[speaker.key.id()])\
for session in sessions]
)
# - - - Task 3: Come up with 2 additional queries - - - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/incomplete',
http_method='GET', name='getIncompleteConferences')
def getIncompleteConferences(self, request):
"""Get list of all conferences that need additional information"""
q = Conference.query(ndb.OR(
Conference.description==None,
Conference.startDate==None,
Conference.endDate==None))
items = [self._copyConferenceToForm(conf, getattr(conf.key.parent().get(), 'displayName')) for conf in q]
return ConferenceForms(items=items)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/incompleteSessions',
http_method='GET', name='getIncompleteConferenceSessions')
def getIncompleteConferenceSessions(self, request):
"""Get list of all sessions for a conference that have incomplete information."""
c_key = self._ndbKey(urlsafe=request.websafeConferenceKey)
# check that c_key is a Conference key and it exists
self._checkKey(c_key, request.websafeConferenceKey, 'Conference')
sessions = Session.query(ndb.OR(
Session.highlights=='To be announced',
Session.speakerKey==None,
Session.typeOfSession=='TBA'), ancestor=c_key)
return SessionForms(items=[self._copySessionToForm(session) for session in sessions])
@endpoints.method(message_types.VoidMessage, SpeakerForms,
path='speakers',
http_method='GET', name='getSpeakers')
def getSpeakers(self, request):
"""Get list of all speakers"""
speakers = Speaker.query()
return SpeakerForms(items=[self._copySpeakerToForm(speaker) for speaker in speakers])
# - - - Task 3: Work on indexes and queries - - - - - - - - - - - - - - - - - - - - -
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/NotWorkshopSessionsBefore7pm',
http_method='GET', name='getNotWorkshopSessionsBefore7pm')
def getNotWorkshopSessionsBefore7pm(self, request):
"""Returns all conference non-workshop sessions before 7pm."""
c_key = self._ndbKey(urlsafe=request.websafeConferenceKey)
# check that c_key is a Conference key and it exists
self._checkKey(c_key, request.websafeConferenceKey, 'Conference')
sessions = Session.query(ndb.AND(
Session.typeOfSession!='WORKSHOP',
Session.typeOfSession!='TBA'), ancestor=c_key)
#Fix for BadRequestError: Only one inequality filter per query is supported. Encountered both typeOfSession and startDateTime
items = []
for session in sessions:
if session.startDateTime and \
session.startDateTime.hour + session.startDateTime.minute/60.0 <= 19:
items += [self._copySessionToForm(session)]
return SessionForms(items=items)
# - - - Task 4: Featured Speaker get handler - - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, StringMessage,
path='featuredSpeaker',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) or "")
api = endpoints.api_server([ConferenceApi]) # register API
| lbarahona/UdacityProject4 | conference.py | Python | apache-2.0 | 37,472 | 0.005444 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import mox
import testtools
from oslo.config import cfg
from heat.tests import fakes
from heat.tests import generic_resource
from heat.tests.common import HeatTestCase
from heat.tests import utils
from heat.common import exception
from heat.common import template_format
from heat.openstack.common.importutils import try_import
from heat.engine import clients
from heat.engine import parser
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.properties import schemata
from heat.engine.resources.ceilometer import alarm
ceilometerclient = try_import('ceilometerclient.v2')
alarm_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Alarm Test",
"Parameters" : {},
"Resources" : {
"MEMAlarmHigh": {
"Type": "OS::Ceilometer::Alarm",
"Properties": {
"description": "Scale-up if MEM > 50% for 1 minute",
"meter_name": "MemoryUtilization",
"statistic": "avg",
"period": "60",
"evaluation_periods": "1",
"threshold": "50",
"alarm_actions": [],
"matching_metadata": {},
"comparison_operator": "gt"
}
},
"signal_handler" : {
"Type" : "SignalResourceType"
}
}
}
'''
not_string_alarm_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Alarm Test",
"Parameters" : {},
"Resources" : {
"MEMAlarmHigh": {
"Type": "OS::Ceilometer::Alarm",
"Properties": {
"description": "Scale-up if MEM > 50% for 1 minute",
"meter_name": "MemoryUtilization",
"statistic": "avg",
"period": 60,
"evaluation_periods": 1,
"threshold": 50,
"alarm_actions": [],
"matching_metadata": {},
"comparison_operator": "gt"
}
},
"signal_handler" : {
"Type" : "SignalResourceType"
}
}
}
'''
combination_alarm_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Combination Alarm Test",
"Resources" : {
"CombinAlarm": {
"Type": "OS::Ceilometer::CombinationAlarm",
"Properties": {
"description": "Do stuff in combination",
"alarm_ids": ["alarm1", "alarm2"],
"operator": "and",
"alarm_actions": [],
}
}
}
}
'''
class FakeCeilometerAlarm(object):
alarm_id = 'foo'
class FakeCeilometerAlarms(object):
def create(self, **kwargs):
pass
def update(self, **kwargs):
pass
def delete(self, alarm_id):
pass
class FakeCeilometerClient(object):
alarms = FakeCeilometerAlarms()
@testtools.skipIf(ceilometerclient is None, 'ceilometerclient unavailable')
class CeilometerAlarmTest(HeatTestCase):
def setUp(self):
super(CeilometerAlarmTest, self).setUp()
utils.setup_dummy_db()
resource._register_class('SignalResourceType',
generic_resource.SignalResource)
cfg.CONF.set_default('heat_waitcondition_server_url',
'http://server.test:8000/v1/waitcondition')
self.fc = fakes.FakeKeystoneClient()
self.fa = FakeCeilometerClient()
# Note tests creating a stack should be decorated with @stack_delete_after
# to ensure the stack is properly cleaned up
def create_stack(self, template=None):
if template is None:
template = alarm_template
temp = template_format.parse(template)
template = parser.Template(temp)
ctx = utils.dummy_context()
ctx.tenant_id = 'test_tenant'
stack = parser.Stack(ctx, utils.random_name(), template,
disable_rollback=True)
stack.store()
self.m.StubOutWithMock(resource.Resource, 'keystone')
resource.Resource.keystone().MultipleTimes().AndReturn(
self.fc)
self.m.StubOutWithMock(alarm.CeilometerAlarm, 'ceilometer')
alarm.CeilometerAlarm.ceilometer().MultipleTimes().AndReturn(
self.fa)
al = copy.deepcopy(temp['Resources']['MEMAlarmHigh']['Properties'])
al['description'] = mox.IgnoreArg()
al['name'] = mox.IgnoreArg()
al['alarm_actions'] = mox.IgnoreArg()
self.m.StubOutWithMock(self.fa.alarms, 'create')
self.fa.alarms.create(**al).AndReturn(FakeCeilometerAlarm())
return stack
@utils.stack_delete_after
def test_mem_alarm_high_update_no_replace(self):
'''
Make sure that we can change the update-able properties
without replacing the Alarm rsrc.
'''
#short circuit the alarm's references
t = template_format.parse(alarm_template)
properties = t['Resources']['MEMAlarmHigh']['Properties']
properties['alarm_actions'] = ['signal_handler']
properties['matching_metadata'] = {'a': 'v'}
self.stack = self.create_stack(template=json.dumps(t))
self.m.StubOutWithMock(self.fa.alarms, 'update')
schema = schemata(alarm.CeilometerAlarm.properties_schema)
al2 = dict((k, mox.IgnoreArg())
for k, s in schema.items() if s.update_allowed)
al2['alarm_id'] = mox.IgnoreArg()
self.fa.alarms.update(**al2).AndReturn(None)
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['MEMAlarmHigh']
snippet = copy.deepcopy(rsrc.parsed_template())
snippet['Properties']['comparison_operator'] = 'lt'
snippet['Properties']['description'] = 'fruity'
snippet['Properties']['evaluation_periods'] = '2'
snippet['Properties']['period'] = '90'
snippet['Properties']['enabled'] = 'true'
snippet['Properties']['repeat_actions'] = True
snippet['Properties']['statistic'] = 'max'
snippet['Properties']['threshold'] = '39'
snippet['Properties']['insufficient_data_actions'] = []
snippet['Properties']['alarm_actions'] = []
snippet['Properties']['ok_actions'] = ['signal_handler']
scheduler.TaskRunner(rsrc.update, snippet)()
self.m.VerifyAll()
@utils.stack_delete_after
def test_mem_alarm_high_update_replace(self):
'''
Make sure that the Alarm resource IS replaced when non-update-able
properties are changed.
'''
t = template_format.parse(alarm_template)
properties = t['Resources']['MEMAlarmHigh']['Properties']
properties['alarm_actions'] = ['signal_handler']
properties['matching_metadata'] = {'a': 'v'}
self.stack = self.create_stack(template=json.dumps(t))
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['MEMAlarmHigh']
snippet = copy.deepcopy(rsrc.parsed_template())
snippet['Properties']['meter_name'] = 'temp'
updater = scheduler.TaskRunner(rsrc.update, snippet)
self.assertRaises(resource.UpdateReplace, updater)
self.m.VerifyAll()
@utils.stack_delete_after
def test_mem_alarm_suspend_resume(self):
"""
Make sure that the Alarm resource gets disabled on suspend
and reenabled on resume.
"""
self.stack = self.create_stack()
self.m.StubOutWithMock(self.fa.alarms, 'update')
al_suspend = {'alarm_id': mox.IgnoreArg(),
'enabled': False}
self.fa.alarms.update(**al_suspend).AndReturn(None)
al_resume = {'alarm_id': mox.IgnoreArg(),
'enabled': True}
self.fa.alarms.update(**al_resume).AndReturn(None)
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['MEMAlarmHigh']
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
@utils.stack_delete_after
def test_mem_alarm_high_correct_int_parameters(self):
self.stack = self.create_stack(not_string_alarm_template)
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['MEMAlarmHigh']
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertIsNone(rsrc.validate())
self.assertIsInstance(rsrc.properties['evaluation_periods'], int)
self.assertIsInstance(rsrc.properties['period'], int)
self.assertIsInstance(rsrc.properties['threshold'], int)
self.m.VerifyAll()
def test_mem_alarm_high_not_correct_string_parameters(self):
snippet = template_format.parse(not_string_alarm_template)
for p in ('period', 'evaluation_periods'):
snippet['Resources']['MEMAlarmHigh']['Properties'][p] = '60a'
stack = utils.parse_stack(snippet)
rsrc = alarm.CeilometerAlarm(
'MEMAlarmHigh', snippet['Resources']['MEMAlarmHigh'], stack)
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual(
"Property error : MEMAlarmHigh: %s Value '60a' is not an "
"integer" % p, str(error))
def test_mem_alarm_high_not_integer_parameters(self):
snippet = template_format.parse(not_string_alarm_template)
for p in ('period', 'evaluation_periods'):
snippet['Resources']['MEMAlarmHigh']['Properties'][p] = [60]
stack = utils.parse_stack(snippet)
rsrc = alarm.CeilometerAlarm(
'MEMAlarmHigh', snippet['Resources']['MEMAlarmHigh'], stack)
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual(
"Property error : MEMAlarmHigh: %s int() argument must be "
"a string or a number, not 'list'" % p, str(error))
def test_mem_alarm_high_check_not_required_parameters(self):
snippet = template_format.parse(not_string_alarm_template)
snippet['Resources']['MEMAlarmHigh']['Properties'].pop('meter_name')
stack = utils.parse_stack(snippet)
rsrc = alarm.CeilometerAlarm(
'MEMAlarmHigh', snippet['Resources']['MEMAlarmHigh'], stack)
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual(
"Property error : MEMAlarmHigh: Property meter_name not assigned",
str(error))
for p in ('period', 'evaluation_periods', 'statistic',
'comparison_operator'):
snippet = template_format.parse(not_string_alarm_template)
snippet['Resources']['MEMAlarmHigh']['Properties'].pop(p)
stack = utils.parse_stack(snippet)
rsrc = alarm.CeilometerAlarm(
'MEMAlarmHigh', snippet['Resources']['MEMAlarmHigh'], stack)
self.assertIsNone(rsrc.validate())
@testtools.skipIf(ceilometerclient is None, 'ceilometerclient unavailable')
class CombinationAlarmTest(HeatTestCase):
def setUp(self):
super(CombinationAlarmTest, self).setUp()
self.fc = FakeCeilometerClient()
self.m.StubOutWithMock(clients.OpenStackClients, 'ceilometer')
utils.setup_dummy_db()
def create_alarm(self):
clients.OpenStackClients.ceilometer().MultipleTimes().AndReturn(
self.fc)
self.m.StubOutWithMock(self.fc.alarms, 'create')
self.fc.alarms.create(
alarm_actions=[],
description=u'Do stuff in combination',
name=mox.IgnoreArg(), type='combination',
combination_rule={'alarm_ids': [u'alarm1', u'alarm2'],
'operator': u'and'}
).AndReturn(FakeCeilometerAlarm())
snippet = template_format.parse(combination_alarm_template)
stack = utils.parse_stack(snippet)
return alarm.CombinationAlarm(
'CombinAlarm', snippet['Resources']['CombinAlarm'], stack)
def test_create(self):
rsrc = self.create_alarm()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('foo', rsrc.resource_id)
self.m.VerifyAll()
def test_invalid_alarm_list(self):
snippet = template_format.parse(combination_alarm_template)
snippet['Resources']['CombinAlarm']['Properties']['alarm_ids'] = []
stack = utils.parse_stack(snippet)
rsrc = alarm.CombinationAlarm(
'CombinAlarm', snippet['Resources']['CombinAlarm'], stack)
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual(
"Property error : CombinAlarm: alarm_ids length (0) is out of "
"range (min: 1, max: None)", str(error))
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(
alarm_id='foo',
combination_rule={'alarm_ids': [u'alarm1', u'alarm3']})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['alarm_ids'] = ['alarm1', 'alarm3']
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_suspend(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(alarm_id='foo', enabled=False)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_resume(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(alarm_id='foo', enabled=True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'delete')
self.fc.alarms.delete('foo')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
| ntt-sic/heat | heat/tests/test_ceilometer_alarm.py | Python | apache-2.0 | 15,354 | 0.000065 |
"""
Motion correction of image sequences by 'efficient subpixel image registration
by cross correlation'. A reference image is iteratively computed by aligning
and averaging a subset of images/frames.
2015 Lloyd Russell, Christoph Schmidt-Hieber
*******************************************************************************
Credit to Marius Pachitariu for concept of registering to aligned mean image.
Credit to Olivier Dupont-Therrien, Doric Lenses Inc., for concept of applying
Gaussian blur & Laplacian to eliminate static inhomogeneities.
Parts of the code are based on:
skimage.feature.register_translation, which is a port of MATLAB code by Manuel
Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup, "Efficient subpixel
image registration algorithms," Optics Letters 33, 156-158 (2008).
Relating to implementation of skimage.feature.register_translation:
Copyright (C) 2011, the scikit-image team
All rights reserved.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*******************************************************************************
@author: llerussell
"""
from __future__ import absolute_import, division
from builtins import map, range
from functools import partial
import multiprocessing
import numpy as np
from scipy.ndimage.interpolation import shift
from scipy.ndimage import laplace
from scipy.ndimage import gaussian_filter
import time
from . import motion
try:
from pyfftw.interfaces.numpy_fft import fftn, ifftn
except ImportError:
from numpy.fft import fftn, ifftn
class DiscreteFourier2D(motion.MotionEstimationStrategy):
"""
Motion correction of image sequences by 'efficient subpixel image
registration by cross correlation'. A reference image is iteratively
computed by aligning and averaging a subset of images/frames.
Parameters
----------
upsample_factor : int, optional
upsample factor. final pixel alignment has resolution of
1/upsample_factor. if 1 only pixel level shifts are made - faster -
and no interpolation. Default: 1.
max_displacement : array of int, optional
The maximum allowed displacement magnitudes in [y,x]. Default: None.
num_images_for_mean : int, optional
number of images to use to make the aligned mean image. Default: 100.
randomise_frames : bool, optional
randomise the images selected to make the mean image? if false the
first 'num_frames_for_mean' frames will be used. Default: True.
err_thresh : float, optional
the threshold of mean pixel offset at which to stop aligning the mean
image. Default: 0.01.
max_iterations : int, optional
the maximum number of iterations to compute the aligned mean image.
Default: 5.
rotation_scaling : bool, optional
not yet implemented. Default: False.
save_name : string, optional
the file name for saving the final registered array of images to disk
from within method. If None or 'none', the array will not be saved.
Default: None.
save_fmt : string, optional
the tiff format to save as. options include 'mptiff', 'bigtiff',
'singles'. Default: 'mptiff'.
n_processes : int, optional
number of workers to use (multiprocessing). Default: 1.
verbose : bool, optional
enable verbose mode. Default: False.
return_registered : bool, optional
return registered frames? Default: False.
laplace : float, optional
Sigma of Gaussian. If positive, apply Gaussian blur & laplacian to all
images before computing the cross correlation. This step is useful to
eliminate static inhomogeneities (such as vignetting) from images.
Typical use case includes single-photon widefield microendoscope imaging
through a GRIN lens. Default: 0.0
References
----------
Parts of the code are based on:
skimage.feature.register_translation, which is a port of MATLAB code
by Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms," Optics Letters 33,
156-158 (2008).
"""
def __init__(self, upsample_factor=1, max_displacement=None,
num_images_for_mean=100,
randomise_frames=True, err_thresh=0.01, max_iterations=5,
rotation_scaling=False, save_fmt='mptiff', save_name=None,
n_processes=1, verbose=False, return_registered=False,
laplace=0.0):
self._params = dict(locals())
del self._params['self']
def _estimate(self, dataset):
"""
Parameters
----------
Returns
-------
displacements : array
(2, num_frames*num_cycles)-array of integers giving the
estimated displacement of each frame
"""
params = self._params
verbose = params['verbose']
n_processes = params['n_processes']
if verbose:
print('Using ' + str(n_processes) + ' worker(s)')
displacements = []
for sequence in dataset:
num_planes = sequence.shape[1]
num_channels = sequence.shape[4]
if num_channels > 1:
raise NotImplementedError("Error: only one colour channel \
can be used for DFT motion correction. Using channel 1.")
for plane_idx in range(num_planes):
# load into memory... need to pass numpy array to dftreg.
# could(should?) rework it to instead accept tiff array
if verbose:
print('Loading plane ' + str(plane_idx + 1) + ' of ' +
str(num_planes) + ' into numpy array')
t0 = time.time()
# reshape, one plane at a time
frames = np.array(sequence[:, plane_idx, :, :, 0])
frames = np.squeeze(frames)
e1 = time.time() - t0
if verbose:
print(' Loaded in: ' + str(e1) + ' s')
# do the registering
# registered_frames return is useless, sima later uses the
# displacements to shift the image (apply_displacements in
# sima/sequence.py: _align method of _MotionCorrectedSequence
# class) but this shifting is only pixel-level, much better
# results if sub-pixel were possible - replace sima's way of
# shifting? this may run into problems when sima then crops the
# final image so no empty rows/columns at edge of any frame in
# the video (trim_criterion)
if params['laplace'] > 0:
framesl = np.array([
np.abs(laplace(gaussian_filter(frame, params['laplace'])))
for frame in frames])
else:
framesl = frames
output = _register(
framesl,
upsample_factor=params['upsample_factor'],
max_displacement=params['max_displacement'],
num_images_for_mean=params['num_images_for_mean'],
randomise_frames=params['randomise_frames'],
err_thresh=params['err_thresh'],
max_iterations=params['max_iterations'],
n_processes=params['n_processes'],
save_fmt=params['save_fmt'],
save_name=params['save_name'],
verbose=params['verbose'],
return_registered=params['return_registered'])
# sort results
if params['return_registered']:
dy, dx, registered_frames = output
else:
dy, dx = output
# get results into a shape sima likes
frame_shifts = np.zeros([len(frames), num_planes, 2])
for idx, frame in enumerate(sequence):
frame_shifts[idx, plane_idx] = [dy[idx], dx[idx]]
displacements.append(frame_shifts)
total_time = time.time() - t0
if verbose:
print(' Total time for plane ' + str(plane_idx + 1) + ': ' +
str(total_time) + ' s')
return displacements
def _register(frames, upsample_factor=1, max_displacement=None,
num_images_for_mean=100, randomise_frames=True, err_thresh=0.01,
max_iterations=5, rotation_scaling=False, save_fmt='mptiff',
save_name=None, n_processes=1, verbose=False,
return_registered=False):
"""
Master function. Make aligned mean image. Register each frame in input
array to aligned mean image.
Parameters
----------
frames : np.ndarray
the frames to align (shape: frames, 1, rows, columns)
upsample : int, optional
upsample factor. final pixel alignment has resolution of
1/upsample_factor. if 1 only pixel level shifts are made - faster -
and no interpolation. Default: 1.
num_images_for_mean : int, optional
number of images to use to make the aligned mean image. Default: 100.
randomise_frames : bool, optional
randomise the images selected to make the mean image? if false the
first 'num_frames_for_mean' frames will be used. Default: True.
err_thresh : float, optional
the threshold of mean pixel offset at which to stop aligning the mean
image. Default: 0.01.
max_iterations : int, optional
the maximum number of iterations to compute the aligned mean image.
Default: 5.
rotation_scaling : bool, optional
not yet implemented. Default: False.
save_name : string, optional
the file name for saving the final registered array of images to disk
from within method. If None or 'none', the array will not be saved.
Default: None.
save_fmt : string, optional
the tiff format to save as. options include 'mptiff', 'bigtiff',
'singles'. Default: 'mptiff'
n_processes : int, optional
number of workers to use (multiprocessing). Default: 1.
verbose : bool, optional
enable verbose mode. Default: False.
return_registered : bool, optional
return registered frames? Default: False.
Returns
-------
dx : float array
horizontal pixel offsets. shift the target image by this amount to
align with reference
dy : float array
vertical pixel offsets. shift the target image by this amount to align
with reference
registered_frames : np.ndarray
the aligned frames
"""
# start timer
t0 = time.time()
# make a mean image
mean_img = _make_mean_img(frames,
num_images_for_mean=num_images_for_mean,
randomise_frames=randomise_frames,
err_thresh=err_thresh,
max_iterations=max_iterations,
upsample_factor=upsample_factor,
n_processes=n_processes,
max_displacement=max_displacement,
verbose=verbose)
e1 = time.time() - t0
if verbose:
print(' Time taken: ' + str(e1) + ' s')
# register all frames
output = _register_all_frames(frames, mean_img,
upsample_factor=upsample_factor,
n_processes=n_processes,
max_displacement=max_displacement,
verbose=verbose,
return_registered=return_registered)
# sort results
if return_registered:
dy, dx, registered_frames = output
else:
dy, dx = output
e2 = time.time() - t0 - e1
if verbose:
print(' Time taken: ' + str(e2) + ' s')
# save?
if return_registered:
if save_name is not None and save_name != 'none':
_save_registered_frames(registered_frames, save_name, save_fmt,
verbose=verbose)
e3 = time.time() - t0 - e1 - e2
if verbose:
print(' Time taken: ' + str(e3) + ' s')
total_time = time.time() - t0
if verbose:
print(' Completed in: ' + str(total_time) + ' s')
if return_registered:
return dy, dx, registered_frames
else:
return dy, dx
def _make_mean_img(frames, num_images_for_mean=100, randomise_frames=True,
err_thresh=0.01, max_iterations=5, upsample_factor=1,
n_processes=1, max_displacement=None, verbose=False):
"""
Make an aligned mean image to use as reference to which all frames are
later aligned.
Parameters
----------
frames : np.ndarray
the frames to align (shape: frames, 1, rows, columns)
num_images_for_mean : int, optional
how many images are used to make the mean reference image.
Default: 100.
randomise_frames : bool, optional
randomise the frames used to make the mean image? If False the first
N images are used Default: True.
err_thresh : float, optional
the threshold of mean pixel offset at which to stop aligning the mean
image. Default: 0.01.
max_iterations : int, optional
number of maximum iterations, if error threshold is never met
Default: 5.
n_processes : int, optional
number of processes to work on the registration in parallel
Default: 1
Returns
-------
mean_img : np.ndarray (size of input images)
the final aligned mean image
"""
input_shape = frames.shape
input_dtype = np.array(frames[0]).dtype
if num_images_for_mean > input_shape[0]:
num_images_for_mean = input_shape[0]
frames_for_mean = np.zeros([num_images_for_mean, input_shape[1],
input_shape[2]], dtype=input_dtype)
if randomise_frames:
if verbose:
print(' Making aligned mean image from ' +
str(num_images_for_mean) + ' random frames...')
for idx, frame_num in enumerate(np.random.choice(input_shape[0],
size=num_images_for_mean,
replace=False)):
frames_for_mean[idx] = frames[frame_num]
else:
if verbose:
print(' Making aligned mean image from first ' +
str(num_images_for_mean) + ' frames...')
frames_for_mean = frames[0:num_images_for_mean]
mean_img = np.mean(frames_for_mean, 0)
iteration = 1
mean_img_err = 9999
while mean_img_err > err_thresh and iteration < max_iterations:
map_function = partial(_register_frame, mean_img=mean_img,
upsample_factor=upsample_factor,
max_displacement=max_displacement,
return_registered=True)
if n_processes > 1:
# configure pool of workers (multiprocessing)
pool = multiprocessing.Pool(n_processes)
results = pool.map(map_function, frames_for_mean)
pool.close()
else:
results = map(map_function, frames_for_mean)
# preallocate the results array
mean_img_dx = np.zeros(num_images_for_mean, dtype=np.float)
mean_img_dy = np.zeros(num_images_for_mean, dtype=np.float)
# get results (0: dy, 1: dx, 2: registered image)
for idx, result in enumerate(results):
mean_img_dy[idx] = result[0]
mean_img_dx[idx] = result[1]
frames_for_mean[idx] = result[2]
# make the new (improved) mean image
mean_img = np.mean(frames_for_mean, 0)
mean_img_err = np.mean(
np.absolute(mean_img_dx)) + np.mean(np.absolute(mean_img_dy))
if verbose:
print(' Iteration ' + str(iteration) +
', average error: ' + str(mean_img_err) + ' pixels')
iteration += 1
return mean_img
def _register_all_frames(frames, mean_img, upsample_factor=1,
n_processes=1, max_displacement=None,
return_registered=False,
verbose=False):
"""
Register all input frames to the computed aligned mean image.
Returns
-------
dx : float array
array of x pixel offsets for each frame
dy : float array
array of y pixel offsets for each frame
registered_frames : np.ndarray (size of input images)
array containing each aligned frame
n_processes : int, optional
number of processes to work on the registration in parallel
"""
input_shape = frames.shape
input_dtype = np.array(frames[0]).dtype
if verbose:
print(' Registering all ' + str(frames.shape[0]) + ' frames...')
map_function = partial(_register_frame, mean_img=mean_img,
upsample_factor=upsample_factor,
max_displacement=max_displacement,
return_registered=return_registered)
if n_processes > 1:
# configure pool of workers (multiprocessing)
pool = multiprocessing.Pool(n_processes)
results = pool.map(map_function, frames)
pool.close()
else:
results = map(map_function, frames)
# preallocate arrays
dx = np.zeros(input_shape[0], dtype=np.float)
dy = np.zeros(input_shape[0], dtype=np.float)
if return_registered:
registered_frames = np.zeros([input_shape[0], input_shape[1],
input_shape[2]], dtype=input_dtype)
# get results (0: dy, 1: dx, 2: registered image)
for idx, result in enumerate(results):
dy[idx] = result[0]
dx[idx] = result[1]
registered_frames[idx] = result[2]
return dy, dx, registered_frames
else:
# get results (0: dy, 1: dx)
for idx, result in enumerate(results):
dy[idx] = result[0]
dx[idx] = result[1]
return dy, dx
def _register_frame(frame, mean_img, upsample_factor=1,
max_displacement=None,
return_registered=False):
"""
Called by _make_mean_img and _register_all_frames
"""
# compute the offsets
dy, dx = _register_translation(mean_img, frame,
upsample_factor=upsample_factor)
if max_displacement is not None:
if dy > max_displacement[0]:
dy = max_displacement[0]
# dy = 0
if dx > max_displacement[1]:
dx = max_displacement[1]
# dx = 0
if return_registered:
registered_frame = shift(frame,
[dy, dx],
order=3,
mode='constant',
cval=0,
output=frame.dtype)
return dy, dx, registered_frame
else:
return dy, dx
def _upsampled_dft(data, upsampled_region_size,
upsample_factor=1, axis_offsets=None):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following
operations were performed:
- Embed the array "data" in an array that is ``upsample_factor`` times
larger in each dimension. ifftshift to bring the center of the
image to (1,1).
- Take the FFT of the larger array.
- Extract an ``[upsampled_region_size]`` region of the result, starting
with the ``[axis_offsets+1]`` element.
It achieves this result by computing the DFT in the output array without
the need to zeropad. Much faster and memory efficient than the zero-padded
FFT approach if ``upsampled_region_size`` is much smaller than
``data.size * upsample_factor``.
Parameters
----------
data : 2D ndarray
The input data array (DFT of original data) to upsample.
upsampled_region_size : integer or tuple of integers
The size of the region to be sampled. If one integer is provided, it
is duplicated up to the dimensionality of ``data``.
upsample_factor : integer, optional
The upsampling factor. Default: 1.
axis_offsets : tuple of integers, optional
The offsets of the region to be sampled. Default: None (uses
image center)
Returns
-------
output : 2D ndarray
The upsampled DFT of the specified region.
"""
# if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size, ] * data.ndim
else:
if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal "
"to input data's number of dimensions.")
if axis_offsets is None:
axis_offsets = [0, ] * data.ndim
else:
if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input "
"data's number of dimensions.")
col_kernel = np.exp(
(-1j * 2 * np.pi / (data.shape[1] * upsample_factor)) *
(np.fft.ifftshift(np.arange(data.shape[1]))[:, None] -
np.floor(data.shape[1] / 2)).dot(
np.arange(upsampled_region_size[1])[None, :] - axis_offsets[1])
)
row_kernel = np.exp(
(-1j * 2 * np.pi / (data.shape[0] * upsample_factor)) *
(np.arange(upsampled_region_size[0])[:, None] - axis_offsets[0]).dot(
np.fft.ifftshift(np.arange(data.shape[0]))[None, :] -
np.floor(data.shape[0] / 2))
)
row_kernel_dot = row_kernel.dot(data)
return row_kernel_dot.dot(col_kernel) # hangs here when multiprocessing
def _compute_phasediff(cross_correlation_max):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Compute global phase difference between the two images (should be
zero if images are non-negative).
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
"""
return np.arctan2(cross_correlation_max.imag, cross_correlation_max.real)
def _compute_error(cross_correlation_max, src_amp, target_amp):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Compute RMS error metric between ``src_image`` and ``target_image``.
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
src_amp : float
The normalized average image intensity of the source image
target_amp : float
The normalized average image intensity of the target image
"""
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\
(src_amp * target_amp)
return np.sqrt(np.abs(error))
def _register_translation(src_image, target_image, upsample_factor=1,
space="real"):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
Parameters
----------
src_image : ndarray
Reference image.
target_image : ndarray
Image to register. Must be same dimensionality as ``src_image``.
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default: 1 (no upsampling).
space : string, one of "real" or "fourier"
Defines how the algorithm interprets input data. "real" means data
will be FFT'd to compute the correlation, while "fourier" data will
bypass FFT of input data. Case insensitive. Default: "real".
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``target_image`` with
``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between ``src_image`` and
``target_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008).
"""
# images must be the same shape
if src_image.shape != target_image.shape:
raise ValueError("Error: images must be same size for "
"register_translation")
# only 2D data makes sense right now
if src_image.ndim != 2 and upsample_factor > 1:
raise NotImplementedError("Error: register_translation only supports "
"subpixel registration for 2D images")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = src_image
target_freq = target_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_image = np.array(src_image, dtype=np.complex128, copy=False)
target_image = np.array(target_image, dtype=np.complex128, copy=False)
src_freq = fftn(src_image)
target_freq = fftn(target_image)
else:
raise ValueError("Error: register_translation only knows the \"real\" "
"and \"fourier\" values for the ``space`` argument.")
# Whole-pixel shift - Compute cross-correlation by an IFFT
shape = src_freq.shape
image_product = src_freq * target_freq.conj()
cross_correlation = ifftn(image_product)
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
if upsample_factor == 1:
src_amp = np.sum(np.abs(src_freq) ** 2) / src_freq.size
target_amp = np.sum(np.abs(target_freq) ** 2) / target_freq.size
# CCmax = cross_correlation.max()
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * upsample_factor) / upsample_factor
upsampled_region_size = np.ceil(upsample_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
normalization = (src_freq.size * upsample_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
cross_correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.array(np.unravel_index(
np.argmax(np.abs(cross_correlation)),
cross_correlation.shape),
dtype=np.float64)
maxima -= dftshift
shifts = shifts + maxima / upsample_factor
# CCmax = cross_correlation.max()
src_amp = _upsampled_dft(src_freq * src_freq.conj(),
1, upsample_factor)[0, 0]
src_amp /= normalization
target_amp = _upsampled_dft(target_freq * target_freq.conj(),
1, upsample_factor)[0, 0]
target_amp /= normalization
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(src_freq.ndim):
if shape[dim] == 1:
shifts[dim] = 0
return shifts
# _compute_error(CCmax, src_amp, target_amp)
# _compute_phasediff(CCmax)
def _save_registered_frames(frames, save_name, save_fmt, verbose=False):
"""
Save. Only use for debugging.
Parameters
----------
Returns
-------
"""
if verbose:
print(' Saving...')
try: # this is ugly
import tifffile
except ImportError:
try:
from sima.misc import tifffile
except ImportError:
if verbose:
print(' Cannot find tifffile')
if save_fmt == 'singles':
for idx in range(frames.shape[0]):
tifffile.imsave(
save_name + '_' + '{number:05d}'.format(number=idx) +
'_DFTreg.tif', frames[idx].astype(np.uint16))
if save_fmt == 'mptiff':
tifffile.imsave(save_name + '_DFTreg.tif',
frames.astype(np.uint16))
elif save_fmt == 'bigtiff':
tifffile.imsave(save_name + '_DFTreg.tif',
frames.astype(np.uint16), bigtiff=True)
| jzaremba/sima | sima/motion/dftreg.py | Python | gpl-2.0 | 31,079 | 0.000064 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
from desktop.lib.conf import Config, coerce_bool
SOLR_URL = Config(
key="solr_url",
help=_("URL of the Solr Server."),
default="http://localhost:8983/solr/")
EMPTY_QUERY = Config(
key="empty_query",
help=_("Query sent when no term is entered."),
default="*:*")
SECURITY_ENABLED = Config(
key="security_enabled",
help=_("Whether Solr requires client to perform Kerberos authentication."),
default=False,
type=coerce_bool)
# Unused: deprecated by dashboard
LATEST = Config(
key="latest",
help=_("Use latest Solr 5.2+ features."),
default=False,
type=coerce_bool)
| jayceyxc/hue | apps/search/src/search/conf.py | Python | apache-2.0 | 1,443 | 0 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ...testing import assert_equal
from ..meshfix import MeshFix
def test_MeshFix_inputs():
input_map = dict(args=dict(argstr='%s',
),
cut_inner=dict(argstr='--cut-inner %d',
),
cut_outer=dict(argstr='--cut-outer %d',
),
decouple_inin=dict(argstr='--decouple-inin %d',
),
decouple_outin=dict(argstr='--decouple-outin %d',
),
decouple_outout=dict(argstr='--decouple-outout %d',
),
dilation=dict(argstr='--dilate %d',
),
dont_clean=dict(argstr='--no-clean',
),
environ=dict(nohash=True,
usedefault=True,
),
epsilon_angle=dict(argstr='-a %f',
),
finetuning_distance=dict(argstr='%f',
requires=['finetuning_substeps'],
),
finetuning_inwards=dict(argstr='--fineTuneIn ',
requires=['finetuning_distance', 'finetuning_substeps'],
),
finetuning_outwards=dict(argstr='--fineTuneIn ',
requires=['finetuning_distance', 'finetuning_substeps'],
xor=['finetuning_inwards'],
),
finetuning_substeps=dict(argstr='%d',
requires=['finetuning_distance'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file1=dict(argstr='%s',
mandatory=True,
position=1,
),
in_file2=dict(argstr='%s',
position=2,
),
join_closest_components=dict(argstr='-jc',
xor=['join_closest_components'],
),
join_overlapping_largest_components=dict(argstr='-j',
xor=['join_closest_components'],
),
laplacian_smoothing_steps=dict(argstr='--smooth %d',
),
number_of_biggest_shells=dict(argstr='--shells %d',
),
out_filename=dict(argstr='-o %s',
genfile=True,
),
output_type=dict(usedefault=True,
),
quiet_mode=dict(argstr='-q',
),
remove_handles=dict(argstr='--remove-handles',
),
save_as_freesurfer_mesh=dict(argstr='--fsmesh',
xor=['save_as_vrml', 'save_as_stl'],
),
save_as_stl=dict(argstr='--stl',
xor=['save_as_vmrl', 'save_as_freesurfer_mesh'],
),
save_as_vmrl=dict(argstr='--wrl',
xor=['save_as_stl', 'save_as_freesurfer_mesh'],
),
set_intersections_to_one=dict(argstr='--intersect',
),
terminal_output=dict(nohash=True,
),
uniform_remeshing_steps=dict(argstr='-u %d',
requires=['uniform_remeshing_vertices'],
),
uniform_remeshing_vertices=dict(argstr='--vertices %d',
requires=['uniform_remeshing_steps'],
),
x_shift=dict(argstr='--smooth %d',
),
)
inputs = MeshFix.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MeshFix_outputs():
output_map = dict(mesh_file=dict(),
)
outputs = MeshFix.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| sgiavasis/nipype | nipype/interfaces/tests/test_auto_MeshFix.py | Python | bsd-3-clause | 3,030 | 0.024092 |
import numpy as np
import copy as copy
import tensorflow as tf
from pybullet_envs.deep_mimic.learning.pg_agent import PGAgent
from pybullet_envs.deep_mimic.learning.solvers.mpi_solver import MPISolver
import pybullet_envs.deep_mimic.learning.tf_util as TFUtil
import pybullet_envs.deep_mimic.learning.rl_util as RLUtil
from pybullet_utils.logger import Logger
import pybullet_utils.mpi_util as MPIUtil
import pybullet_utils.math_util as MathUtil
from pybullet_envs.deep_mimic.env.env import Env
'''
Proximal Policy Optimization Agent
'''
class PPOAgent(PGAgent):
NAME = "PPO"
EPOCHS_KEY = "Epochs"
BATCH_SIZE_KEY = "BatchSize"
RATIO_CLIP_KEY = "RatioClip"
NORM_ADV_CLIP_KEY = "NormAdvClip"
TD_LAMBDA_KEY = "TDLambda"
TAR_CLIP_FRAC = "TarClipFrac"
ACTOR_STEPSIZE_DECAY = "ActorStepsizeDecay"
def __init__(self, world, id, json_data):
super().__init__(world, id, json_data)
return
def _load_params(self, json_data):
super()._load_params(json_data)
self.epochs = 1 if (self.EPOCHS_KEY not in json_data) else json_data[self.EPOCHS_KEY]
self.batch_size = 1024 if (
self.BATCH_SIZE_KEY not in json_data) else json_data[self.BATCH_SIZE_KEY]
self.ratio_clip = 0.2 if (
self.RATIO_CLIP_KEY not in json_data) else json_data[self.RATIO_CLIP_KEY]
self.norm_adv_clip = 5 if (
self.NORM_ADV_CLIP_KEY not in json_data) else json_data[self.NORM_ADV_CLIP_KEY]
self.td_lambda = 0.95 if (
self.TD_LAMBDA_KEY not in json_data) else json_data[self.TD_LAMBDA_KEY]
self.tar_clip_frac = -1 if (
self.TAR_CLIP_FRAC not in json_data) else json_data[self.TAR_CLIP_FRAC]
self.actor_stepsize_decay = 0.5 if (
self.ACTOR_STEPSIZE_DECAY not in json_data) else json_data[self.ACTOR_STEPSIZE_DECAY]
num_procs = MPIUtil.get_num_procs()
local_batch_size = int(self.batch_size / num_procs)
min_replay_size = 2 * local_batch_size # needed to prevent buffer overflow
assert (self.replay_buffer_size > min_replay_size)
self.replay_buffer_size = np.maximum(min_replay_size, self.replay_buffer_size)
return
def _build_nets(self, json_data):
assert self.ACTOR_NET_KEY in json_data
assert self.CRITIC_NET_KEY in json_data
actor_net_name = json_data[self.ACTOR_NET_KEY]
critic_net_name = json_data[self.CRITIC_NET_KEY]
actor_init_output_scale = 1 if (self.ACTOR_INIT_OUTPUT_SCALE_KEY not in json_data
) else json_data[self.ACTOR_INIT_OUTPUT_SCALE_KEY]
s_size = self.get_state_size()
g_size = self.get_goal_size()
a_size = self.get_action_size()
# setup input tensors
self.s_tf = tf.placeholder(tf.float32, shape=[None, s_size], name="s")
self.a_tf = tf.placeholder(tf.float32, shape=[None, a_size], name="a")
self.tar_val_tf = tf.placeholder(tf.float32, shape=[None], name="tar_val")
self.adv_tf = tf.placeholder(tf.float32, shape=[None], name="adv")
self.g_tf = tf.placeholder(tf.float32,
shape=([None, g_size] if self.has_goal() else None),
name="g")
self.old_logp_tf = tf.placeholder(tf.float32, shape=[None], name="old_logp")
self.exp_mask_tf = tf.placeholder(tf.float32, shape=[None], name="exp_mask")
with tf.variable_scope('main'):
with tf.variable_scope('actor'):
self.a_mean_tf = self._build_net_actor(actor_net_name, actor_init_output_scale)
with tf.variable_scope('critic'):
self.critic_tf = self._build_net_critic(critic_net_name)
if (self.a_mean_tf != None):
Logger.print2('Built actor net: ' + actor_net_name)
if (self.critic_tf != None):
Logger.print2('Built critic net: ' + critic_net_name)
self.norm_a_std_tf = self.exp_params_curr.noise * tf.ones(a_size)
norm_a_noise_tf = self.norm_a_std_tf * tf.random_normal(shape=tf.shape(self.a_mean_tf))
norm_a_noise_tf *= tf.expand_dims(self.exp_mask_tf, axis=-1)
self.sample_a_tf = self.a_mean_tf + norm_a_noise_tf * self.a_norm.std_tf
self.sample_a_logp_tf = TFUtil.calc_logp_gaussian(x_tf=norm_a_noise_tf,
mean_tf=None,
std_tf=self.norm_a_std_tf)
return
def _build_losses(self, json_data):
actor_weight_decay = 0 if (
self.ACTOR_WEIGHT_DECAY_KEY not in json_data) else json_data[self.ACTOR_WEIGHT_DECAY_KEY]
critic_weight_decay = 0 if (
self.CRITIC_WEIGHT_DECAY_KEY not in json_data) else json_data[self.CRITIC_WEIGHT_DECAY_KEY]
norm_val_diff = self.val_norm.normalize_tf(self.tar_val_tf) - self.val_norm.normalize_tf(
self.critic_tf)
self.critic_loss_tf = 0.5 * tf.reduce_mean(tf.square(norm_val_diff))
if (critic_weight_decay != 0):
self.critic_loss_tf += critic_weight_decay * self._weight_decay_loss('main/critic')
norm_tar_a_tf = self.a_norm.normalize_tf(self.a_tf)
self._norm_a_mean_tf = self.a_norm.normalize_tf(self.a_mean_tf)
self.logp_tf = TFUtil.calc_logp_gaussian(norm_tar_a_tf, self._norm_a_mean_tf,
self.norm_a_std_tf)
ratio_tf = tf.exp(self.logp_tf - self.old_logp_tf)
actor_loss0 = self.adv_tf * ratio_tf
actor_loss1 = self.adv_tf * tf.clip_by_value(ratio_tf, 1.0 - self.ratio_clip,
1 + self.ratio_clip)
self.actor_loss_tf = -tf.reduce_mean(tf.minimum(actor_loss0, actor_loss1))
norm_a_bound_min = self.a_norm.normalize(self.a_bound_min)
norm_a_bound_max = self.a_norm.normalize(self.a_bound_max)
a_bound_loss = TFUtil.calc_bound_loss(self._norm_a_mean_tf, norm_a_bound_min, norm_a_bound_max)
self.actor_loss_tf += a_bound_loss
if (actor_weight_decay != 0):
self.actor_loss_tf += actor_weight_decay * self._weight_decay_loss('main/actor')
# for debugging
self.clip_frac_tf = tf.reduce_mean(
tf.to_float(tf.greater(tf.abs(ratio_tf - 1.0), self.ratio_clip)))
return
def _build_solvers(self, json_data):
actor_stepsize = 0.001 if (
self.ACTOR_STEPSIZE_KEY not in json_data) else json_data[self.ACTOR_STEPSIZE_KEY]
actor_momentum = 0.9 if (
self.ACTOR_MOMENTUM_KEY not in json_data) else json_data[self.ACTOR_MOMENTUM_KEY]
critic_stepsize = 0.01 if (
self.CRITIC_STEPSIZE_KEY not in json_data) else json_data[self.CRITIC_STEPSIZE_KEY]
critic_momentum = 0.9 if (
self.CRITIC_MOMENTUM_KEY not in json_data) else json_data[self.CRITIC_MOMENTUM_KEY]
critic_vars = self._tf_vars('main/critic')
critic_opt = tf.train.MomentumOptimizer(learning_rate=critic_stepsize,
momentum=critic_momentum)
self.critic_grad_tf = tf.gradients(self.critic_loss_tf, critic_vars)
self.critic_solver = MPISolver(self.sess, critic_opt, critic_vars)
self._actor_stepsize_tf = tf.get_variable(dtype=tf.float32,
name='actor_stepsize',
initializer=actor_stepsize,
trainable=False)
self._actor_stepsize_ph = tf.get_variable(dtype=tf.float32, name='actor_stepsize_ph', shape=[])
self._actor_stepsize_update_op = self._actor_stepsize_tf.assign(self._actor_stepsize_ph)
actor_vars = self._tf_vars('main/actor')
actor_opt = tf.train.MomentumOptimizer(learning_rate=self._actor_stepsize_tf,
momentum=actor_momentum)
self.actor_grad_tf = tf.gradients(self.actor_loss_tf, actor_vars)
self.actor_solver = MPISolver(self.sess, actor_opt, actor_vars)
return
def _decide_action(self, s, g):
with self.sess.as_default(), self.graph.as_default():
self._exp_action = self._enable_stoch_policy() and MathUtil.flip_coin(
self.exp_params_curr.rate)
#print("_decide_action._exp_action=",self._exp_action)
a, logp = self._eval_actor(s, g, self._exp_action)
return a[0], logp[0]
def _eval_actor(self, s, g, enable_exp):
s = np.reshape(s, [-1, self.get_state_size()])
g = np.reshape(g, [-1, self.get_goal_size()]) if self.has_goal() else None
feed = {self.s_tf: s, self.g_tf: g, self.exp_mask_tf: np.array([1 if enable_exp else 0])}
a, logp = self.sess.run([self.sample_a_tf, self.sample_a_logp_tf], feed_dict=feed)
return a, logp
def _train_step(self):
adv_eps = 1e-5
start_idx = self.replay_buffer.buffer_tail
end_idx = self.replay_buffer.buffer_head
assert (start_idx == 0)
assert (self.replay_buffer.get_current_size() <= self.replay_buffer.buffer_size
) # must avoid overflow
assert (start_idx < end_idx)
idx = np.array(list(range(start_idx, end_idx)))
end_mask = self.replay_buffer.is_path_end(idx)
end_mask = np.logical_not(end_mask)
vals = self._compute_batch_vals(start_idx, end_idx)
new_vals = self._compute_batch_new_vals(start_idx, end_idx, vals)
valid_idx = idx[end_mask]
exp_idx = self.replay_buffer.get_idx_filtered(self.EXP_ACTION_FLAG).copy()
num_valid_idx = valid_idx.shape[0]
num_exp_idx = exp_idx.shape[0]
exp_idx = np.column_stack([exp_idx, np.array(list(range(0, num_exp_idx)), dtype=np.int32)])
local_sample_count = valid_idx.size
global_sample_count = int(MPIUtil.reduce_sum(local_sample_count))
mini_batches = int(np.ceil(global_sample_count / self.mini_batch_size))
adv = new_vals[exp_idx[:, 0]] - vals[exp_idx[:, 0]]
new_vals = np.clip(new_vals, self.val_min, self.val_max)
adv_mean = np.mean(adv)
adv_std = np.std(adv)
adv = (adv - adv_mean) / (adv_std + adv_eps)
adv = np.clip(adv, -self.norm_adv_clip, self.norm_adv_clip)
critic_loss = 0
actor_loss = 0
actor_clip_frac = 0
for e in range(self.epochs):
np.random.shuffle(valid_idx)
np.random.shuffle(exp_idx)
for b in range(mini_batches):
batch_idx_beg = b * self._local_mini_batch_size
batch_idx_end = batch_idx_beg + self._local_mini_batch_size
critic_batch = np.array(range(batch_idx_beg, batch_idx_end), dtype=np.int32)
actor_batch = critic_batch.copy()
critic_batch = np.mod(critic_batch, num_valid_idx)
actor_batch = np.mod(actor_batch, num_exp_idx)
shuffle_actor = (actor_batch[-1] < actor_batch[0]) or (actor_batch[-1] == num_exp_idx - 1)
critic_batch = valid_idx[critic_batch]
actor_batch = exp_idx[actor_batch]
critic_batch_vals = new_vals[critic_batch]
actor_batch_adv = adv[actor_batch[:, 1]]
critic_s = self.replay_buffer.get('states', critic_batch)
critic_g = self.replay_buffer.get('goals', critic_batch) if self.has_goal() else None
curr_critic_loss = self._update_critic(critic_s, critic_g, critic_batch_vals)
actor_s = self.replay_buffer.get("states", actor_batch[:, 0])
actor_g = self.replay_buffer.get("goals", actor_batch[:, 0]) if self.has_goal() else None
actor_a = self.replay_buffer.get("actions", actor_batch[:, 0])
actor_logp = self.replay_buffer.get("logps", actor_batch[:, 0])
curr_actor_loss, curr_actor_clip_frac = self._update_actor(actor_s, actor_g, actor_a,
actor_logp, actor_batch_adv)
critic_loss += curr_critic_loss
actor_loss += np.abs(curr_actor_loss)
actor_clip_frac += curr_actor_clip_frac
if (shuffle_actor):
np.random.shuffle(exp_idx)
total_batches = mini_batches * self.epochs
critic_loss /= total_batches
actor_loss /= total_batches
actor_clip_frac /= total_batches
critic_loss = MPIUtil.reduce_avg(critic_loss)
actor_loss = MPIUtil.reduce_avg(actor_loss)
actor_clip_frac = MPIUtil.reduce_avg(actor_clip_frac)
critic_stepsize = self.critic_solver.get_stepsize()
actor_stepsize = self.update_actor_stepsize(actor_clip_frac)
self.logger.log_tabular('Critic_Loss', critic_loss)
self.logger.log_tabular('Critic_Stepsize', critic_stepsize)
self.logger.log_tabular('Actor_Loss', actor_loss)
self.logger.log_tabular('Actor_Stepsize', actor_stepsize)
self.logger.log_tabular('Clip_Frac', actor_clip_frac)
self.logger.log_tabular('Adv_Mean', adv_mean)
self.logger.log_tabular('Adv_Std', adv_std)
self.replay_buffer.clear()
return
def _get_iters_per_update(self):
return 1
def _valid_train_step(self):
samples = self.replay_buffer.get_current_size()
exp_samples = self.replay_buffer.count_filtered(self.EXP_ACTION_FLAG)
global_sample_count = int(MPIUtil.reduce_sum(samples))
global_exp_min = int(MPIUtil.reduce_min(exp_samples))
return (global_sample_count > self.batch_size) and (global_exp_min > 0)
def _compute_batch_vals(self, start_idx, end_idx):
states = self.replay_buffer.get_all("states")[start_idx:end_idx]
goals = self.replay_buffer.get_all("goals")[start_idx:end_idx] if self.has_goal() else None
idx = np.array(list(range(start_idx, end_idx)))
is_end = self.replay_buffer.is_path_end(idx)
is_fail = self.replay_buffer.check_terminal_flag(idx, Env.Terminate.Fail)
is_succ = self.replay_buffer.check_terminal_flag(idx, Env.Terminate.Succ)
is_fail = np.logical_and(is_end, is_fail)
is_succ = np.logical_and(is_end, is_succ)
vals = self._eval_critic(states, goals)
vals[is_fail] = self.val_fail
vals[is_succ] = self.val_succ
return vals
def _compute_batch_new_vals(self, start_idx, end_idx, val_buffer):
rewards = self.replay_buffer.get_all("rewards")[start_idx:end_idx]
if self.discount == 0:
new_vals = rewards.copy()
else:
new_vals = np.zeros_like(val_buffer)
curr_idx = start_idx
while curr_idx < end_idx:
idx0 = curr_idx - start_idx
idx1 = self.replay_buffer.get_path_end(curr_idx) - start_idx
r = rewards[idx0:idx1]
v = val_buffer[idx0:(idx1 + 1)]
new_vals[idx0:idx1] = RLUtil.compute_return(r, self.discount, self.td_lambda, v)
curr_idx = idx1 + start_idx + 1
return new_vals
def _update_critic(self, s, g, tar_vals):
feed = {self.s_tf: s, self.g_tf: g, self.tar_val_tf: tar_vals}
loss, grads = self.sess.run([self.critic_loss_tf, self.critic_grad_tf], feed)
self.critic_solver.update(grads)
return loss
def _update_actor(self, s, g, a, logp, adv):
feed = {self.s_tf: s, self.g_tf: g, self.a_tf: a, self.adv_tf: adv, self.old_logp_tf: logp}
loss, grads, clip_frac = self.sess.run(
[self.actor_loss_tf, self.actor_grad_tf, self.clip_frac_tf], feed)
self.actor_solver.update(grads)
return loss, clip_frac
def update_actor_stepsize(self, clip_frac):
clip_tol = 1.5
step_scale = 2
max_stepsize = 1e-2
min_stepsize = 1e-8
warmup_iters = 5
actor_stepsize = self.actor_solver.get_stepsize()
if (self.tar_clip_frac >= 0 and self.iter > warmup_iters):
min_clip = self.tar_clip_frac / clip_tol
max_clip = self.tar_clip_frac * clip_tol
under_tol = clip_frac < min_clip
over_tol = clip_frac > max_clip
if (over_tol or under_tol):
if (over_tol):
actor_stepsize *= self.actor_stepsize_decay
else:
actor_stepsize /= self.actor_stepsize_decay
actor_stepsize = np.clip(actor_stepsize, min_stepsize, max_stepsize)
self.set_actor_stepsize(actor_stepsize)
return actor_stepsize
def set_actor_stepsize(self, stepsize):
feed = {
self._actor_stepsize_ph: stepsize,
}
self.sess.run(self._actor_stepsize_update_op, feed)
return
| MadManRises/Madgine | shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/deep_mimic/learning/ppo_agent.py | Python | mit | 15,708 | 0.006175 |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import webob
from nova.api.openstack.compute.schemas.v3 import flavors_extraspecs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import objects
from nova import utils
ALIAS = 'os-flavor-extra-specs'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class FlavorExtraSpecsController(wsgi.Controller):
"""The flavor extra specs API controller for the OpenStack API."""
def __init__(self, *args, **kwargs):
super(FlavorExtraSpecsController, self).__init__(*args, **kwargs)
def _get_extra_specs(self, context, flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
return dict(extra_specs=flavor.extra_specs)
# NOTE(gmann): Max length for numeric value is being checked
# explicitly as json schema cannot have max length check for numeric value
def _check_extra_specs_value(self, specs):
for key, value in specs.iteritems():
try:
if isinstance(value, (six.integer_types, float)):
value = six.text_type(value)
utils.check_string_length(value, 'extra_specs value',
max_length=255)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(
explanation=error.format_message())
@extensions.expected_errors(())
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
authorize(context, action='index')
return self._get_extra_specs(context, flavor_id)
# NOTE(gmann): Here should be 201 instead of 200 by v2.1
# +microversions because the flavor extra specs has been created
# completely when returning a response.
@extensions.expected_errors((400, 404, 409))
@validation.schema(flavors_extraspecs.create)
def create(self, req, flavor_id, body):
context = req.environ['nova.context']
authorize(context, action='create')
specs = body['extra_specs']
self._check_extra_specs_value(specs)
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
flavor.extra_specs = dict(flavor.extra_specs, **specs)
flavor.save()
except exception.FlavorExtraSpecUpdateCreateFailed as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return body
@extensions.expected_errors((400, 404, 409))
@validation.schema(flavors_extraspecs.update)
def update(self, req, flavor_id, id, body):
context = req.environ['nova.context']
authorize(context, action='update')
self._check_extra_specs_value(body)
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
flavor.extra_specs = dict(flavor.extra_specs, **body)
flavor.save()
except exception.FlavorExtraSpecUpdateCreateFailed as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return body
@extensions.expected_errors(404)
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['nova.context']
authorize(context, action='show')
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
return {id: flavor.extra_specs[id]}
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except KeyError:
msg = _("Flavor %(flavor_id)s has no extra specs with "
"key %(key)s.") % dict(flavor_id=flavor_id,
key=id)
raise webob.exc.HTTPNotFound(explanation=msg)
# NOTE(gmann): Here should be 204(No Content) instead of 200 by v2.1
# +microversions because the flavor extra specs has been deleted
# completely when returning a response.
@extensions.expected_errors(404)
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['nova.context']
authorize(context, action='delete')
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
del flavor.extra_specs[id]
flavor.save()
except (exception.FlavorExtraSpecsNotFound,
exception.FlavorNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except KeyError:
msg = _("Flavor %(flavor_id)s has no extra specs with "
"key %(key)s.") % dict(flavor_id=flavor_id,
key=id)
raise webob.exc.HTTPNotFound(explanation=msg)
class FlavorsExtraSpecs(extensions.V3APIExtensionBase):
"""Flavors extra specs support."""
name = 'FlavorExtraSpecs'
alias = ALIAS
version = 1
def get_resources(self):
extra_specs = extensions.ResourceExtension(
'os-extra_specs',
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
return [extra_specs]
def get_controller_extensions(self):
return []
| cloudbase/nova-virtualbox | nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py | Python | apache-2.0 | 6,450 | 0 |
#!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import re
import yaml
import json
import os
import sys
import time
import logging
import requests
import platform
import imp
from argparse import ArgumentParser
from slackclient import SlackClient
def dbg(debug_string):
if debug:
logging.info(debug_string)
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
def connect(self):
"""Convenience method that creates Server instance"""
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
repeat_reply = None
while True:
for reply in self.slack_client.rtm_read():
self.input(reply)
if 'text' in reply:
words = reply['text'].split()
first_word = words[0].lower()
#Make a repeater
if first_word in ['monitor', 'monitor_id', 'monitor_text'] and len(words) > 1:
try:
webpage_response = requests.get(re.sub('<|>', '', words[1]).split('|')[0]).status_code
if webpage_response == 200:
repeat_reply = reply.copy()
start_time = time.time()
except:
pass
#stop the repeating if the user calls it quits
elif first_word == 'quit_monitor':
if repeat_reply is not None:
repeat_reply = None
self.crons()
self.output()
self.autoping()
time.sleep(.1)
#See if it's time to check the website again
if repeat_reply is not None:
time_diff = time.time() - start_time
if time_diff > 30:
self.input(repeat_reply)
start_time = time.time()
def autoping(self):
#hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def input(self, data):
if "type" in data:
function_name = "process_" + data["type"]
dbg("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, data)
def output(self):
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel != None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii','ignore')
channel.send_message("{}".format(message))
limiter = True
def crons(self):
for plugin in self.bot_plugins:
plugin.do_jobs()
def load_plugins(self):
for plugin in glob.glob(directory+'/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory+'/plugins/')
for plugin in glob.glob(directory+'/plugins/*.py') + glob.glob(directory+'/plugins/*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
# try:
self.bot_plugins.append(Plugin(name))
# except:
# print "error loading plugin %s" % name
class Plugin(object):
def __init__(self, name, plugin_config={}):
self.name = name
self.jobs = []
if platform.system() == 'Windows':
self.module = imp.load_source(name, name + '.py')
else:
self.module = __import__(name)
self.register_jobs()
self.outputs = []
if name in config:
logging.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup()
def register_jobs(self):
if 'crontable' in dir(self.module):
for interval, function in self.module.crontable:
self.jobs.append(Job(interval, eval("self.module."+function)))
logging.info(self.module.crontable)
self.module.crontable = []
else:
self.module.crontable = []
def do(self, function_name, data):
if function_name in dir(self.module):
#this makes the plugin fail with stack trace in debug mode
if not debug:
try:
eval("self.module."+function_name)(data)
except:
dbg("problem in module {} {}".format(function_name, data))
else:
eval("self.module."+function_name)(data)
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
dbg("problem in catch all")
def do_jobs(self):
for job in self.jobs:
job.check()
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
logging.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
class Job(object):
def __init__(self, interval, function):
self.function = function
self.interval = interval
self.lastrun = 0
def __str__(self):
return "{} {} {}".format(self.function, self.interval, self.lastrun)
def __repr__(self):
return self.__str__()
def check(self):
if self.lastrun + self.interval < time.time():
if not debug:
try:
self.function()
except:
dbg("problem")
else:
self.function()
self.lastrun = time.time()
pass
class UnknownChannel(Exception):
pass
def main_loop():
if "LOGFILE" in config:
logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO, format='%(asctime)s %(message)s')
logging.info(directory)
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
logging.exception('OOPS')
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = yaml.load(file(args.config or 'rtmbot.conf', 'r'))
debug = config["DEBUG"]
bot = RtmBot(config["SLACK_TOKEN"])
site_plugins = []
files_currently_downloading = []
job_hash = {}
if config.has_key("DAEMON"):
if config["DAEMON"]:
import daemon
with daemon.DaemonContext():
main_loop()
main_loop()
| zbeaver4/python-webpage-monitor-slackbot | rtmbot.py | Python | mit | 7,767 | 0.007596 |
""" Formula for building parallel """
from pakit import Archive, Recipe
class Parallel(Recipe):
"""
GNU parallel executes shell jobs in parallel
"""
def __init__(self):
super(Parallel, self).__init__()
self.homepage = 'http://www.gnu.org/software/parallel'
self.repos = {
'unstable': Archive('https://ftp.gnu.org/gnu/parallel/'
'parallel-20181022.tar.bz2',
hash='2e84dee3556cbb8f6a3794f5b21549faffb132'
'db3fc68e2e95922963adcbdbec')
}
self.repos['stable'] = self.repos['unstable']
def build(self):
self.cmd('./configure --prefix={prefix}')
self.cmd('make install')
def verify(self):
lines = self.cmd('parallel --version').output()
assert lines[0].find('GNU parallel') != -1
| pakit/recipes | parallel.py | Python | bsd-3-clause | 883 | 0 |
#!/usr/bin/env python2
from hgraph import Hgraph
import amr_graph_description_parser
#import tree
import re
import sys
import string
from collections import defaultdict as ddict
def format_tagged(s):
#return [tuple(p.split('/')) for p in s.split()]
return [p.rsplit('-',1)[0] for p in s.split()]
def format_amr(l):
amr_s = ' '.join(l)
amr_g = Hgraph.from_string(amr_s)
return amr_g
def read_to_empty(f):
lines = []
while True:
l = f.readline().strip()
if not l: return lines
lines.append(l)
def format_constituents(l):
return nltk.tree.ParentedTree("\n".join(l))
def format_alignments(l, amr):
"""
Parse alignment descriptions from file
"""
r = []
for a in l:
m = re.match(r'(\S+)\s+:(\S+)\s+(\S+)\s+(.+)\-(\d+)', a)
if m:
var = m.group(1)
role = m.group(2)
filler = m.group(3).replace('"','')
token = m.group(4)
token_id = int(m.group(5)) - 1
else:
m = re.match(r'ROOT\s+([^\-]+)\-(\d+)', a)
if m:
var = None
role = "ROOT"
filler = amr.roots[0].replace('"','')
token = m.group(1)
token_id = int(m.group(2)) - 1
else:
sys.exit(1)
amr_triple = (var, role, (filler,))
r.append((amr_triple, token_id))
return r
textlinematcher = re.compile("^(\d+)\.(.*?)\((.*)\)?$")
def format_text(l):
match = textlinematcher.match(l.strip())
if not match:
raise ValueError, "Not a valid text line in Ulf corpus: \n %s \n"%l
s_no = int(match.group(1))
text = match.group(2).strip().split(" ")
s_id = match.group(3).strip()
return s_id, s_no, text
def plain_corpus(f):
while True:
x = read_to_empty(f)
if not x:
raise StopIteration
amr = format_amr(x)
yield amr
def aligned_corpus(f):
"""
Read the next parsed sentence from an input file using the aligned AMR/tagged string format.
"""
while True:
l = f.readline()
if not l:
raise StopIteration
while l.strip().startswith("#") or l.strip().startswith("==") or not l.strip():
l = f.readline()
if not l:
raise IOError, "AMR data file ended unexpectedly."
sent_id = int(l)
l = f.readline()
amr = format_amr(read_to_empty(f))
tagged = format_tagged(f.readline())
l = f.readline()
alignments = format_alignments(read_to_empty(f), amr)
p = SentenceWithHgraph(sent_id, sent_id, amr, tagged, None, alignments)
yield p
def ulf_corpus(f):
"""
Read the next parsed sentence from an input file using Ulf's format.
"""
while True:
l = f.readline()
if not l:
raise StopIteration
while l.strip().startswith("#") or not l.strip():
l = f.readline()
if not l:
raise IOError, "AMR data file ended unexpectedly- sentence without AMR."
sent_id, sent_no, tagged = format_text(l.strip())
l = f.readline()
amr = format_amr(read_to_empty(f))
p = SentenceWithHgraph(sent_id, sent_no, amr, tagged, None, None)
yield p
def metadata_amr_corpus(f):
"""
Read the next parsed sentence from an input file using the AMR meta data format.
"""
metadata = []
sentence = ""
sent_id = ""
buff = []
idmatcher = re.compile("# ::id ([^ ]+) ")
sentmatcher = re.compile("# ::snt (.*)")
count = 1
parser = amr_graph_description_parser.GraphDescriptionParser()
while True:
l = f.readline()
if not l:
raise StopIteration
l = l.strip()
if not l:
if buff:
amr = parser.parse_string(" ".join(buff))
yield SentenceWithHgraph(sent_id, count, amr, sentence, metadata = metadata)
count += 1
buff = []
metadata = []
sentence = ""
sent_id = ""
elif l.startswith("#"):
metadata.append(l)
match = idmatcher.match(l)
if match:
sent_id = match.group(1)
match = sentmatcher.match(l)
if match:
sentence = match.group(1)
else:
buff.append(l)
class SentenceWithHgraph():
"""
A data structure to hold Hgraph <-> sentence pairs with
PTB parses and token to Hgraph edge elignments.
"""
def __init__(self, sent_id, sent_no, amr, tagged, ptb = None, edge_alignments = None, metadata = None):
self.sent_no = sent_no
self.sent_id = sent_id
self.amr = amr
self.tagged = tagged
self.ptb = ptb
self.alignments = edge_alignments
self.metadata = metadata
#in_f = open(sys.argv[1],'r')
#corpus = metadata_amr_corpus(in_f)
| isi-nlp/bolinas | common/hgraph/amr_corpus_reader.py | Python | mit | 4,973 | 0.019706 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Owais Lone <hello@owaislone.org>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from os import path as op
import re
import requests
import urlparse
import tempfile
import threading
from BeautifulSoup import BeautifulSoup, SoupStrainer
import gettext
from gettext import gettext as _
gettext.textdomain('fogger')
from gi.repository import GLib, Gtk, Gdk, GdkPixbuf, GObject, Gio # pylint: disable=E0611
import logging
logger = logging.getLogger('fogger')
from fogger_lib import Window, IconChooserDialog, ConfirmDialog
from fogger_lib import FogAppManager
from fogger_lib.exceptions import BaseFogAppException
from fogger_lib.helpers import get_network_proxies
from fogger_lib.consts import DEFAULT_APP_ICON
from fogger_lib.BackgroundLoader import get_chameleonic_pixbuf_from_svg
from fogger.AboutFoggerDialog import AboutFoggerDialog
ICON_SIZE = Gtk.icon_size_register('FoggerIconSize', 80, 80)
GLib.threads_init()
# See fogger_lib.Window.py for more details about how this class works
class FoggerWindow(Window):
__gtype_name__ = "FoggerWindow"
def finish_initializing(self, builder): # pylint: disable=E1002
"""Set up the main window"""
super(FoggerWindow, self).finish_initializing(builder)
self.AboutDialog = AboutFoggerDialog
self.url = self.builder.get_object('url_entry')
self.name = self.builder.get_object('name_entry')
self.image = self.builder.get_object('image')
self.image_eb = self.builder.get_object('image_eb')
self.create_button = self.builder.get_object('create_button')
self.spinner = self.builder.get_object('spinner')
self.error_message = self.builder.get_object('error')
self.background_image = self.builder.get_object('bgimage')
self.icon = DEFAULT_APP_ICON
self.themed_icon = None
self.icon_selected = False
self.icon_theme = Gtk.IconTheme.get_default()
self.setup_drop_targets()
self.background_image.set_from_pixbuf(get_chameleonic_pixbuf_from_svg(
'background-app.svg'))
def validate_form(self, widget, data=None):
url = self.url.get_text()
name = self.name.get_text()
sensitive = url and name
self.create_button.set_sensitive(sensitive)
def setup_drop_targets(self):
self.drag_dest_set(Gtk.DestDefaults.ALL, [], Gdk.DragAction.MOVE)
self.connect("drag-data-received", self.on_drag_data_received)
self.drag_dest_add_uri_targets()
def on_drag_data_received(self, widget, context, x, y, data, info, time):
try:
path = data.get_uris()[0]
except IndexError:
return
else:
path = path.replace('file://', '')
self.setup_icon(path)
def on_cancel(self, widget, data=None):
self.destroy()
def on_url_changed(self, widget, data=None):
pass
def on_icon_clicked(self, widget, data=None):
icon_chooser = IconChooserDialog(self)
response = icon_chooser.run()
if response == Gtk.ResponseType.OK:
path = icon_chooser.get_filename()
self.setup_icon(path)
icon_chooser.destroy()
def on_name_changed(self, widget, data=None):
if self.icon_selected:
return
name = self.name.get_text().lower().strip().replace(' ', '-')
words = name.split('-')
subnames = []
for i, word in enumerate(words):
x = '-'.join(words[:(i + 1) * -1])
if x:
subnames.append(x)
search_strings = [name] + subnames
icon = self.icon_theme.choose_icon(search_strings, 0, Gtk.IconLookupFlags.GENERIC_FALLBACK)
if icon:
filename = icon.get_filename()
path, ext = op.splitext(filename)
_, themed_icon = op.split(path)
self.setup_icon(filename, themed_icon, False)
else:
self.setup_icon(DEFAULT_APP_ICON, None, False)
def setup_icon(self, path, name=None, selected=True):
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
self.image.props.pixbuf = pixbuf.scale_simple(80, 80, GdkPixbuf.InterpType.BILINEAR)
self.icon = path
self.themed_icon = name
self.icon_selected = selected
def on_create(self, widget, data=None):
name = self.name.get_text()
manager = FogAppManager()
existing = manager.get_by_name(name)
if existing:
confirm = ConfirmDialog('Fogger', _('There\'s an app for that!'),
_('A fog app already exists by that name. '\
'Would you like to replace it with a new one?'),
existing.icon, self, _('Replace'))
response = confirm.run()
confirm.destroy()
if response != Gtk.ResponseType.YES:
self.name.grab_focus()
return
self.set_loading_url(True)
self.error_message.hide()
thread = threading.Thread(target=self.verify_url)
thread.daemon = True
thread.start()
def create_app(self, url, name):
manager = FogAppManager()
try:
app = manager.create(name, url, self.icon, self.themed_icon)
except BaseFogAppException:
logger.error("Error creating App %s" % url)
else:
app = Gio.DesktopAppInfo.new_from_filename(app.desktop_file)
app.launch([], Gio.AppLaunchContext())
self.destroy()
def set_loading_url(self, loading):
if loading:
self.spinner.show()
self.create_button.hide()
self.url.set_sensitive(False)
self.name.set_sensitive(False)
else:
self.spinner.hide()
self.create_button.show()
self.url.set_sensitive(True)
self.name.set_sensitive(True)
def set_error_message(self, message):
self.error_message.set_markup('<tt><small>%s</small></tt>' % message)
self.error_message.show()
def verify_url(self):
logger.debug('Fetching url')
url = self.url.get_text()
name = self.name.get_text()
verified = False
proxies = get_network_proxies()
try:
if url.startswith('file://'):
GObject.idle_add(self.set_loading_url, False)
GObject.idle_add(self.create_app, url, name)
return
elif not url.startswith(('http://', 'https://',)):
url = 'http://%s' % url
try:
logger.debug('starting')
response = requests.get(url, proxies=proxies)
verified = True
logger.debug('finishing')
except requests.RequestException:
logger.debug('Error downloading url %s' % url)
GObject.idle_add(self.set_loading_url, False)
GObject.idle_add(self.set_error_message,
_('The URL %s could not be reached.\nPlease double check'\
' the URL you provided and try again.' % url))
return
SkipIcon = type('SkipIcon', (Exception,), {})
if self.icon != DEFAULT_APP_ICON:
raise SkipIcon()
# Try to find the apple-touch-icon
logger.debug('parsing')
soup = BeautifulSoup(response.content, parseOnlyThese=SoupStrainer('link'))
icons = soup.findAll('link', rel=re.compile('^apple-touch-icon'))
logger.debug('finished parsing')
soup = BeautifulSoup(response.content)
if not icons:
logger.debug('No apple touch icon found')
raise SkipIcon()
icon = icons[0]
href = icon.attrMap.get('href', None)
if not href:
logger.debug('Bad apple touch icon')
raise SkipIcon()
icon_url = None
if href.startswith('/'):
parsed = urlparse.urlparse(url)
icon_url = urlparse.urljoin(
'%s://%s' % (parsed.scheme, parsed.netloc,), href)
else:
parsed = urlparse.urlparse(href)
if parsed.scheme:
icon_url = href
else:
icon_url = urlparse.urljoin(url, href)
ext = op.splitext(icon_url)[-1]
tmpf = tempfile.mktemp(ext)
logger.debug('temp file: %s' % tmpf)
headers = {'User-Agent': 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like'\
' Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko)'\
' Version/4.0.4 Mobile/7B334b Safari/531.21.10'}
try:
icon_bytes = requests.get(icon_url, headers=headers,
proxies=proxies).content
except requests.RequestException:
logger.debug('Error dowloading apple touch icon')
else:
handle = open(tmpf, 'w')
handle.write(icon_bytes)
handle.close()
self.setup_icon(tmpf)
except Exception, e:
logger.debug("Error", e)
finally:
GObject.idle_add(self.set_loading_url, False)
if verified:
GObject.idle_add(self.create_app, url, name)
| andrenam/Fogger | fogger/FoggerWindow.py | Python | gpl-3.0 | 10,132 | 0.002764 |
from collections import namedtuple
import io
import re
from six.moves.urllib.parse import urlparse
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.deployment_manager import dm_base
from ruamel.yaml import YAML
DM_OUTPUT_QUERY_REGEX = re.compile(
r'!DMOutput\s+(?P<url>\bdm://[-/a-zA-Z0-9]+\b)|'
r'\$\(out\.(?P<token>[-.a-zA-Z0-9]+)\)'
)
DMOutputQueryAttributes = namedtuple(
'DMOutputQueryAttributes',
['project',
'deployment',
'resource',
'name']
)
@dm_base.UseDmApi(dm_base.DmApiVersion.V2)
class DM_API(dm_base.DmCommand):
""" Class representing the DM API
This a proxy class only, so other modules in this project
only import this local class instead of gcloud's. Here's the source:
https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/api_lib/deployment_manager/dm_base.py
"""
API = DM_API()
def get_deployment(project, deployment):
try:
return API.client.deployments.Get(
API.messages.DeploymentmanagerDeploymentsGetRequest(
project=project,
deployment=deployment
)
)
except apitools_exceptions.HttpNotFoundError as _:
return None
def get_manifest(project, deployment):
deployment_rsp = get_deployment(project, deployment)
return API.client.manifests.Get(
API.messages.DeploymentmanagerManifestsGetRequest(
project=project,
deployment=deployment,
manifest=deployment_rsp.manifest.split('/')[-1]
)
)
def parse_dm_output_url(url, project=''):
error_msg = (
'The url must look like '
'"dm://${project}/${deployment}/${resource}/${name}" or'
'"dm://${deployment}/${resource}/${name}"'
)
parsed_url = urlparse(url)
if parsed_url.scheme != 'dm':
raise ValueError(error_msg)
path = parsed_url.path.split('/')[1:]
# path == 2 if project isn't specified in the URL
# path == 3 if project is specified in the URL
if len(path) == 2:
args = [project] + [parsed_url.netloc] + path
elif len(path) == 3:
args = [parsed_url.netloc] + path
else:
raise ValueError(error_msg)
return DMOutputQueryAttributes(*args)
def parse_dm_output_token(token, project=''):
error_msg = (
'The url must look like '
'$(out.${project}.${deployment}.${resource}.${name}" or '
'$(out.${deployment}.${resource}.${name}"'
)
parts = token.split('.')
# parts == 3 if project isn't specified in the token
# parts == 4 if project is specified in the token
if len(parts) == 3:
return DMOutputQueryAttributes(project, *parts)
elif len(parts) == 4:
return DMOutputQueryAttributes(*parts)
else:
raise ValueError(error_msg)
def get_deployment_output(project, deployment, resource, name):
manifest = get_manifest(project, deployment)
layout = YAML().load(manifest.layout)
for r in layout.get('resources', []):
if r['name'] != resource:
continue
for output in r.get('outputs', []):
if output['name'] == name:
return output['finalValue']
| aljim/deploymentmanager-samples | community/cloud-foundation/src/cloud_foundation_toolkit/dm_utils.py | Python | apache-2.0 | 3,240 | 0 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='davis-weatherlink-scraper',
version='0.1.0',
description='Scraper and parser for Davis Weatherlink data',
long_description=long_description,
url='https://github.com/ojarva/davis-weatherlink-scraper',
author='Olli Jarva',
author_email='olli@jarva.fi',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords='davis weatherlink weather',
packages=["davis_weatherlink_scraper"],
install_requires=['beautifulsoup4==4.4.1', 'requests==2.20.0', 'docopt==0.6.2', 'redis==2.10.5'],
scripts=["davis_weatherlink_scraper/weatherlink_redis_publisher", "davis_weatherlink_scraper/weatherlink"],
test_suite="tests",
extras_require={
'dev': ['twine', 'wheel'],
},
)
| ojarva/davis-weatherlink-scraper | setup.py | Python | bsd-3-clause | 1,407 | 0.001421 |
replacing='qwertyuiopasdfghjklzxcvbnm )([]\/{}!@#$%^&*'
a='\/abcdefghijklmnopqrstuvwxyz() }{][*%$&^#@!'
replacing=list(replacing)
a=list(a)
d={}
e={}
if len(replacing)==len(a):
for x in range(len(a)):
d[replacing[x]]=a[x]
e[a[x]]=replacing[x]
def encypt(dict,string):
'code'
code=[]
for x in string:
code.append(dict[x])
return ''.join(code)
def decypt(dict,string):
'uncode'
decode=[]
for x in string:
decode.append(dict[x])
return ''.join(decode)
if __name__=='__main__':
c=input('code:')
code=encypt(e,c)
decode=decypt(d,c)
print('encypts to',code)
print('decypt to',decode)
input()
| javaarchive/PIDLE | ccode.py | Python | mit | 846 | 0.049645 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/quantization/python/array_ops.py | Python | bsd-2-clause | 1,156 | 0 |
from test import test_support
from test.test_support import bigmemtest, _1G, _2G, _4G, precisionbigmemtest
import unittest
import operator
import string
import sys
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, but don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEquals or similar. It's a
# lengty operation and the errormessage will be utterly useless due to
# its size. To make sure whether a result has the right contents, better
# to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - While the bigmemtest decorator speaks of 'minsize', all tests will
# actually be called with a much smaller number too, in the normal
# test run (5Kb currently.) This is so the tests themselves get frequent
# testing. Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
class StrTest(unittest.TestCase):
@bigmemtest(minsize=_2G, memuse=2)
def test_capitalize(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
caps = s.capitalize()
self.assertEquals(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEquals(caps.lstrip('-'), SUBSTR)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.center(size)
self.assertEquals(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEquals(s[lpadsize:-rpadsize], SUBSTR)
self.assertEquals(s.strip(), SUBSTR.strip())
@precisionbigmemtest(size=_2G - 1, memuse=1)
def test_center_unicode(self, size):
SUBSTR = u' abc def ghi'
try:
s = SUBSTR.center(size)
except OverflowError:
pass # acceptable on 32-bit
else:
self.assertEquals(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEquals(s[lpadsize:-rpadsize], SUBSTR)
self.assertEquals(s.strip(), SUBSTR.strip())
del s
@bigmemtest(minsize=_2G, memuse=2)
def test_count(self, size):
SUBSTR = ' abc def ghi'
s = '.' * size + SUBSTR
self.assertEquals(s.count('.'), size)
s += '.'
self.assertEquals(s.count('.'), size + 1)
self.assertEquals(s.count(' '), 3)
self.assertEquals(s.count('i'), 1)
self.assertEquals(s.count('j'), 0)
@bigmemtest(minsize=_2G + 2, memuse=3)
def test_decode(self, size):
s = '.' * size
self.assertEquals(len(s.decode('utf-8')), size)
def basic_encode_test(self, size, enc, c=u'.', expectedsize=None):
if expectedsize is None:
expectedsize = size
s = c * size
self.assertEquals(len(s.encode(enc)), expectedsize)
@bigmemtest(minsize=_2G + 2, memuse=3)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@precisionbigmemtest(size=_4G // 6 + 2, memuse=2)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 5 + 70, memuse=3)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 4 + 5, memuse=6)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4)
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_2G-1, memuse=2)
def test_decodeascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
@precisionbigmemtest(size=_4G // 5, memuse=6+2)
def test_unicode_repr_oflw(self, size):
try:
s = u"\uAAAA"*size
r = repr(s)
except MemoryError:
pass # acceptable on 32-bit
else:
self.failUnless(s == eval(r))
@bigmemtest(minsize=_2G, memuse=2)
def test_endswith(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
self.failUnless(s.endswith(SUBSTR))
self.failUnless(s.endswith(s))
s2 = '...' + s
self.failUnless(s2.endswith(s))
self.failIf(s.endswith('a' + SUBSTR))
self.failIf(SUBSTR.endswith(s))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_expandtabs(self, size):
s = '-' * size
tabsize = 8
self.assertEquals(s.expandtabs(), s)
del s
slen, remainder = divmod(size, tabsize)
s = ' \t' * slen
s = s.expandtabs(tabsize)
self.assertEquals(len(s), size - remainder)
self.assertEquals(len(s.strip(' ')), 0)
@bigmemtest(minsize=_2G, memuse=2)
def test_find(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.find(' '), 0)
self.assertEquals(s.find(SUBSTR), 0)
self.assertEquals(s.find(' ', sublen), sublen + size)
self.assertEquals(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEquals(s.find('i'), SUBSTR.find('i'))
self.assertEquals(s.find('i', sublen),
sublen + size + SUBSTR.find('i'))
self.assertEquals(s.find('i', size),
sublen + size + SUBSTR.find('i'))
self.assertEquals(s.find('j'), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_index(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.index(' '), 0)
self.assertEquals(s.index(SUBSTR), 0)
self.assertEquals(s.index(' ', sublen), sublen + size)
self.assertEquals(s.index(SUBSTR, sublen), sublen + size)
self.assertEquals(s.index('i'), SUBSTR.index('i'))
self.assertEquals(s.index('i', sublen),
sublen + size + SUBSTR.index('i'))
self.assertEquals(s.index('i', size),
sublen + size + SUBSTR.index('i'))
self.assertRaises(ValueError, s.index, 'j')
@bigmemtest(minsize=_2G, memuse=2)
def test_isalnum(self, size):
SUBSTR = '123456'
s = 'a' * size + SUBSTR
self.failUnless(s.isalnum())
s += '.'
self.failIf(s.isalnum())
@bigmemtest(minsize=_2G, memuse=2)
def test_isalpha(self, size):
SUBSTR = 'zzzzzzz'
s = 'a' * size + SUBSTR
self.failUnless(s.isalpha())
s += '.'
self.failIf(s.isalpha())
@bigmemtest(minsize=_2G, memuse=2)
def test_isdigit(self, size):
SUBSTR = '123456'
s = '9' * size + SUBSTR
self.failUnless(s.isdigit())
s += 'z'
self.failIf(s.isdigit())
@bigmemtest(minsize=_2G, memuse=2)
def test_islower(self, size):
chars = ''.join([ chr(c) for c in range(255) if not chr(c).isupper() ])
repeats = size // len(chars) + 2
s = chars * repeats
self.failUnless(s.islower())
s += 'A'
self.failIf(s.islower())
@bigmemtest(minsize=_2G, memuse=2)
def test_isspace(self, size):
whitespace = ' \f\n\r\t\v'
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.failUnless(s.isspace())
s += 'j'
self.failIf(s.isspace())
@bigmemtest(minsize=_2G, memuse=2)
def test_istitle(self, size):
SUBSTR = '123456'
s = ''.join(['A', 'a' * size, SUBSTR])
self.failUnless(s.istitle())
s += 'A'
self.failUnless(s.istitle())
s += 'aA'
self.failIf(s.istitle())
@bigmemtest(minsize=_2G, memuse=2)
def test_isupper(self, size):
chars = ''.join([ chr(c) for c in range(255) if not chr(c).islower() ])
repeats = size // len(chars) + 2
s = chars * repeats
self.failUnless(s.isupper())
s += 'a'
self.failIf(s.isupper())
@bigmemtest(minsize=_2G, memuse=2)
def test_join(self, size):
s = 'A' * size
x = s.join(['aaaaa', 'bbbbb'])
self.assertEquals(x.count('a'), 5)
self.assertEquals(x.count('b'), 5)
self.failUnless(x.startswith('aaaaaA'))
self.failUnless(x.endswith('Abbbbb'))
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_ljust(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.failUnless(s.startswith(SUBSTR + ' '))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_lower(self, size):
s = 'A' * size
s = s.lower()
self.assertEquals(len(s), size)
self.assertEquals(s.count('a'), size)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_lstrip(self, size):
SUBSTR = 'abc def ghi'
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
stripped = s.lstrip()
self.failUnless(stripped is s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_replace(self, size):
replacement = 'a'
s = ' ' * size
s = s.replace(' ', replacement)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), size)
s = s.replace(replacement, ' ', size - 4)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), 4)
self.assertEquals(s[-10:], ' aaaa')
@bigmemtest(minsize=_2G, memuse=2)
def test_rfind(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.rfind(' '), sublen + size + SUBSTR.rfind(' '))
self.assertEquals(s.rfind(SUBSTR), sublen + size)
self.assertEquals(s.rfind(' ', 0, size), SUBSTR.rfind(' '))
self.assertEquals(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rfind('i'), sublen + size + SUBSTR.rfind('i'))
self.assertEquals(s.rfind('i', 0, sublen), SUBSTR.rfind('i'))
self.assertEquals(s.rfind('i', 0, sublen + size),
SUBSTR.rfind('i'))
self.assertEquals(s.rfind('j'), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_rindex(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEquals(s.rindex(' '),
sublen + size + SUBSTR.rindex(' '))
self.assertEquals(s.rindex(SUBSTR), sublen + size)
self.assertEquals(s.rindex(' ', 0, sublen + size - 1),
SUBSTR.rindex(' '))
self.assertEquals(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rindex('i'),
sublen + size + SUBSTR.rindex('i'))
self.assertEquals(s.rindex('i', 0, sublen), SUBSTR.rindex('i'))
self.assertEquals(s.rindex('i', 0, sublen + size),
SUBSTR.rindex('i'))
self.assertRaises(ValueError, s.rindex, 'j')
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rjust(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.failUnless(s.startswith(SUBSTR + ' '))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rstrip(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
stripped = s.rstrip()
self.failUnless(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(minsize=_2G, memuse=2.1)
def test_split_small(self, size):
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = 'a' + ' ' * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEquals(len(l), chunksize)
self.assertEquals(set(l), set(['a']))
del l
l = s.split('a')
self.assertEquals(len(l), chunksize + 1)
self.assertEquals(set(l), set(['', ' ' * chunksize]))
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(minsize=_2G + 5, memuse=10)
def test_split_large(self, size):
s = ' a' * size + ' '
l = s.split()
self.assertEquals(len(l), size)
self.assertEquals(set(l), set(['a']))
del l
l = s.split('a')
self.assertEquals(len(l), size + 1)
self.assertEquals(set(l), set([' ']))
@bigmemtest(minsize=_2G, memuse=2.1)
def test_splitlines(self, size):
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = ' ' * chunksize + '\n' + ' ' * chunksize + '\r\n'
s = SUBSTR * chunksize
l = s.splitlines()
self.assertEquals(len(l), chunksize * 2)
self.assertEquals(set(l), set([' ' * chunksize]))
@bigmemtest(minsize=_2G, memuse=2)
def test_startswith(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
self.failUnless(s.startswith(s))
self.failUnless(s.startswith('-' * size))
self.failIf(s.startswith(SUBSTR))
@bigmemtest(minsize=_2G, memuse=1)
def test_strip(self, size):
SUBSTR = ' abc def ghi '
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G, memuse=2)
def test_swapcase(self, size):
SUBSTR = "aBcDeFG12.'\xa9\x00"
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEquals(len(s), sublen * repeats)
self.assertEquals(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEquals(s[-sublen * 3:], SUBSTR.swapcase() * 3)
@bigmemtest(minsize=_2G, memuse=2)
def test_title(self, size):
SUBSTR = 'SpaaHAaaAaham'
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.failUnless(s.startswith((SUBSTR * 3).title()))
self.failUnless(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(minsize=_2G, memuse=2)
def test_translate(self, size):
trans = string.maketrans('.aZ', '-!$')
SUBSTR = 'aZz.z.Aaz.'
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEquals(len(s), repeats * sublen)
self.assertEquals(s[:sublen], SUBSTR.translate(trans))
self.assertEquals(s[-sublen:], SUBSTR.translate(trans))
self.assertEquals(s.count('.'), 0)
self.assertEquals(s.count('!'), repeats * 2)
self.assertEquals(s.count('z'), repeats * 3)
@bigmemtest(minsize=_2G + 5, memuse=2)
def test_upper(self, size):
s = 'a' * size
s = s.upper()
self.assertEquals(len(s), size)
self.assertEquals(s.count('A'), size)
@bigmemtest(minsize=_2G + 20, memuse=1)
def test_zfill(self, size):
SUBSTR = '-568324723598234'
s = SUBSTR.zfill(size)
self.failUnless(s.endswith('0' + SUBSTR[1:]))
self.failUnless(s.startswith('-0'))
self.assertEquals(len(s), size)
self.assertEquals(s.count('0'), size - len(SUBSTR))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.failUnless(s == sf)
del sf
sf = '..%s..' % (s,)
self.assertEquals(len(sf), len(s) + 4)
self.failUnless(sf.startswith('..-'))
self.failUnless(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEquals(len(s), size * 2 + 3)
self.assertEquals(s.count('.'), 3)
self.assertEquals(s.count('-'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEquals(len(s), size + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=2**32 // 5, memuse=6+2)
def test_unicode_repr(self, size):
s = u"\uAAAA" * size
self.failUnless(len(repr(s)) > size)
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_concat(self, size):
s = '.' * size
self.assertEquals(len(s), size)
s = s + s
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count('.'), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_repeat(self, size):
s = '.' * size
self.assertEquals(len(s), size)
s = s * 2
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count('.'), size * 2)
@bigmemtest(minsize=_2G + 20, memuse=1)
def test_slice_and_getitem(self, size):
SUBSTR = '0123456789'
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEquals(s[i], SUBSTR[0])
self.assertEquals(s[i:i + sublen], SUBSTR)
self.assertEquals(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEquals(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEquals(s[len(s) - 1], SUBSTR[-1])
self.assertEquals(s[-1], SUBSTR[-1])
self.assertEquals(s[len(s) - 10], SUBSTR[0])
self.assertEquals(s[-sublen], SUBSTR[0])
self.assertEquals(s[len(s):], '')
self.assertEquals(s[len(s) - 1:], SUBSTR[-1])
self.assertEquals(s[-1:], SUBSTR[-1])
self.assertEquals(s[len(s) - sublen:], SUBSTR)
self.assertEquals(s[-sublen:], SUBSTR)
self.assertEquals(len(s[:]), len(s))
self.assertEquals(len(s[:len(s) - 5]), len(s) - 5)
self.assertEquals(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(minsize=_2G, memuse=2)
def test_contains(self, size):
SUBSTR = '0123456789'
edge = '-' * (size // 2)
s = ''.join([edge, SUBSTR, edge])
del edge
self.failUnless(SUBSTR in s)
self.failIf(SUBSTR * 2 in s)
self.failUnless('-' in s)
self.failIf('a' in s)
s += 'a'
self.failUnless('a' in s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_compare(self, size):
s1 = '-' * size
s2 = '-' * size
self.failUnless(s1 == s2)
del s2
s2 = s1 + 'a'
self.failIf(s1 == s2)
del s2
s2 = '.' * size
self.failIf(s1 == s2)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
s = '\x00' * size
h1 = hash(s)
del s
s = '\x00' * (size + 1)
self.failIf(h1 == hash(s))
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = (u'',) * size
t2 = (u'',) * size
self.failUnless(t1 == t2)
del t2
t2 = (u'',) * (size + 1)
self.failIf(t1 == t2)
del t2
t2 = (1,) * size
self.failIf(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEquals(len(t), size)
t = t + t
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEquals(len(t), size * 5)
self.failUnless(5 in t)
self.failIf((1, 2, 3, 4, 5) in t)
self.failIf(0 in t)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.failIf(h1 == hash(t2))
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEquals(len(t), size)
self.assertEquals(t[-1], None)
self.assertEquals(t[5], None)
self.assertEquals(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEquals(t[:5], (None,) * 5)
self.assertEquals(t[-5:], (None,) * 5)
self.assertEquals(t[20:25], (None,) * 5)
self.assertEquals(t[-25:-20], (None,) * 5)
self.assertEquals(t[size - 5:], (None,) * 5)
self.assertEquals(t[size - 5:size], (None,) * 5)
self.assertEquals(t[size - 6:size - 2], (None,) * 4)
self.assertEquals(t[size:size], ())
self.assertEquals(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEquals(len(t), size)
t = t * 2
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@precisionbigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
try:
t = tuple(xrange(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
@precisionbigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
try:
t = tuple(xrange(size))
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '(0, 0')
self.assertEquals(s[-5:], '0, 0)')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [u''] * size
l2 = [u''] * size
self.failUnless(l1 == l2)
del l2
l2 = [u''] * (size + 1)
self.failIf(l1 == l2)
del l2
l2 = [2] * size
self.failIf(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEquals(len(l), size)
l = l + l
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEquals(len(l), size * 2)
self.failUnless(l[0] is l[-1])
self.failUnless(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(len(l), size * 5)
self.failUnless(5 in l)
self.failIf([1, 2, 3, 4, 5] in l)
self.failIf(0 in l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.failUnlessRaises(TypeError, hash, l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEquals(len(l), size)
self.assertEquals(l[-1], None)
self.assertEquals(l[5], None)
self.assertEquals(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEquals(l[:5], [None] * 5)
self.assertEquals(l[-5:], [None] * 5)
self.assertEquals(l[20:25], [None] * 5)
self.assertEquals(l[-25:-20], [None] * 5)
self.assertEquals(l[size - 5:], [None] * 5)
self.assertEquals(l[size - 5:size], [None] * 5)
self.assertEquals(l[size - 6:size - 2], [None] * 4)
self.assertEquals(l[size:size], [])
self.assertEquals(l[size:size+5], [])
l[size - 2] = 5
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [None, 5, None])
self.assertEquals(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEquals(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 4)
del l[-2:]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 2)
del l[0]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[0], 2)
del l[:2]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.failIf(l)
l = [''] * size
self.assertEquals(len(l), size)
l = l * 2
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEquals(len(l), size)
self.failUnless(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEquals(len(l), size * 2)
self.failUnless(l[size - 1] is l[-1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '[0, 0')
self.assertEquals(s[-5:], '0, 0]')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(minsize=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEquals(len(l), size+1)
self.failUnless(l[-3] is l[-2])
self.failIf(l[-2] is l[-1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(l.count(1), size)
self.assertEquals(l.count("1"), 0)
def basic_test_extend(self, size):
l = [file] * size
l.extend(l)
self.assertEquals(len(l), size * 2)
self.failUnless(l[0] is l[-1])
self.failUnless(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1L, 2L, 3L, 4L, 5L] * size
size *= 5
self.assertEquals(l.index(1), 0)
self.assertEquals(l.index(5, size - 5), size - 1)
self.assertEquals(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6L)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(minsize=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[:3], [1.0, "C", 1.0])
self.assertEquals(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = [u"a", u"b", u"c", u"d", u"e"] * size
size *= 5
self.assertEquals(len(l), size)
item = l.pop()
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"e")
self.assertEquals(l[-2:], [u"c", u"d"])
item = l.pop(0)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"a")
self.assertEquals(l[:2], [u"b", u"c"])
item = l.pop(size - 2)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, u"c")
self.assertEquals(l[-2:], [u"b", u"d"])
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEquals(len(l), size)
l.remove(10)
size -= 1
self.assertEquals(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 10])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEquals(len(l), size * 5)
self.assertEquals(l[-5:], [5, 4, 3, 2, 1])
self.assertEquals(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEquals(len(l), size * 5)
self.assertEquals(l.count(1), size)
self.assertEquals(l[:10], [1] * 10)
self.assertEquals(l[-10:], [5] * 10)
class BufferTest(unittest.TestCase):
@precisionbigmemtest(size=_1G, memuse=4)
def test_repeat(self, size):
try:
with test_support._check_py3k_warnings():
b = buffer("AAAA")*size
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for c in b:
self.assertEquals(c, 'A')
count += 1
self.assertEquals(count, size*4)
def test_main():
test_support.run_unittest(StrTest, TupleTest, ListTest, BufferTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
test_support.set_memlimit(sys.argv[1])
test_main()
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_bigmem.py | Python | mit | 39,354 | 0.001347 |
import svgutils.transform as sg
from common import load_svg, label_plot
fig = sg.SVGFigure("4.1in", "1.8in")
a = load_svg(snakemake.input[1])
b = load_svg(snakemake.input[0])
b.moveto(190, 0)
la = label_plot(5, 10, "a")
lb = label_plot(185, 10, "b")
fig.append([a, b, la, lb])
fig.save(snakemake.output[0])
| merfishtools/merfishtools-evaluation | scripts/fig-dataset-correlation.py | Python | mit | 311 | 0 |
from .. import __description__
from ..defender import VkRaidDefender, data, update_data
####################################################################################################
LOGO = '''\
_ _ _ _ __ _
__ _| | __ _ __ __ _(_) __| | __| | ___ / _| ___ _ __ __| | ___ _ __
\ \ / / |/ / | '__/ _` | |/ _` | / _` |/ _ \ |_ / _ \ '_ \ / _` |/ _ \ '__|
\ V /| < | | | (_| | | (_| | | (_| | __/ _| __/ | | | (_| | __/ |
\_/ |_|\_\ |_| \__,_|_|\__,_| \__,_|\___|_| \___|_| |_|\__,_|\___|_|
by alfred richardsn'''
####################################################################################################
from ..logger import logger
from ..settings import CLIENT_ID
import re
import os
import sys
import webbrowser
from getpass import getpass
from argparse import ArgumentParser
from vk_api.exceptions import ApiError
from requests.exceptions import InvalidSchema, ProxyError
class CLIDefender(VkRaidDefender):
def run(self, chat_ids, objectives):
self._chat_ids = chat_ids
self._objectives = objectives
start_screen()
logger.info('начинаю приём сообщений')
try:
self.listen()
except KeyboardInterrupt:
raise
except Exception as e:
start_screen()
logger.critical('произошла критическая ошибка, перезапускаюсь', exc_info=True)
self.listen()
def start_screen():
os.system('cls' if os.name == 'nt' else 'clear')
print(LOGO + '\n\n')
def ask_yes_or_no(question, true_answer='y', false_answer='n', default_answer='', default=True):
true_answer = true_answer.lower()
false_answer = false_answer.lower()
default_answer = default_answer.lower()
output = question.strip() + ' (' + (true_answer.upper() + '/' + false_answer if default else
true_answer + '/' + false_answer.upper()) + '): '
answer = None
while answer not in (true_answer, false_answer, default_answer):
answer = input(output).lower()
if answer == true_answer:
return True
elif answer == false_answer:
return False
else:
return default
def register():
use_webbrowser = ask_yes_or_no('открыть ссылку для авторизации в веб-браузере по умолчанию?')
print()
oauth_url = 'https://oauth.vk.com/authorize?client_id={}&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=69632&response_type=token'.format(CLIENT_ID)
if use_webbrowser:
webbrowser.open(oauth_url, new=2)
print('в веб-браузере только что была открыта ссылка для авторизации.')
else:
print(oauth_url + '\n')
print('открой в веб-браузере страницу по ссылке выше.')
token = None
while token is None:
user_input = getpass('авторизируйся на открытой странице при необходимости и вставь адресную строку страницы, на которую было осуществлено перенаправление: ')
token = re.search(r'(?:.*access_token=)?([a-f0-9]+).*', user_input)
return token.group(1)
def run(proxy=None, chat_ids=[], objectives=[], auto_login=False):
token = data.get('token')
proxies = data.get('proxies')
if not token or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для авторизации?')):
token = register()
proxies = None
IP_ADDRESS = re.compile(r'((socks5://)|(?:https?://))?(localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})')
if proxy:
match = IP_ADDRESS.match(proxy)
if not proxy or (not match and not auto_login):
proxy = input('введи адрес прокси-сервера при необходимости его использования: ')
while proxy:
match = IP_ADDRESS.match(proxy)
if match:
break
proxy = input('неверный формат адреса сервера, попробуй ещё раз: ')
else:
match = None
if match:
protocol, use_socks, ip = match.groups()
if not protocol:
use_socks = ask_yes_or_no('использовать протокол socks5 вместо http?') if not auto_login else False
if use_socks:
proxies = {'http': 'socks5://' + ip, 'https': 'socks5://' + ip}
else:
proxies = {'http': 'http://' + ip, 'https': 'https://' + ip}
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['token'] = token
data['proxies'] = proxies
update_data()
start_screen()
if not chat_ids:
chat_ids = data.get('chat_ids')
if not objectives:
objectives = data.get('objectives')
if chat_ids is None or objectives is None or (not auto_login and not ask_yes_or_no('использовать ранее сохранённые данные для работы?')):
chat_ids = list(map(int, input('введи айди конф, в которых нужно защищать рейдеров, через пробел: ').split()))
objectives = list(map(int, input('введи айди защищаемых рейдеров: ').split()))
if auto_login or ask_yes_or_no('сохранить введённые данные для следующих сессий?'):
data['chat_ids'] = chat_ids
data['objectives'] = objectives
update_data()
try:
defender = CLIDefender(token, proxies=proxies)
except InvalidSchema:
sys.exit('необходимо установить дополнительные зависимости для поддержки протокола socks5')
except ApiError:
del data['token']
update_data()
sys.exit('введённый токен недействителен')
except ProxyError:
del data['proxies']
update_data()
sys.exit('не удалось подключиться к прокси-серверу')
defender.run(chat_ids, objectives)
def main():
parser = ArgumentParser(prog='vk-raid-defender', description=__description__, usage='%(prog)s [опции]', add_help=False)
group = parser.add_argument_group('опциональные аргументы')
group.add_argument('-h', '--help', action='help', help='показать это сообщение о помощи и выйти')
group.add_argument('-l', '--login', action='store_true', help='осуществить автоматическую авторизацию')
group.add_argument('-p', '--proxy', metavar='proxy_address', help='адрес прокси-сервера')
group.add_argument('-c', '--chats', type=int, nargs='+', metavar='chat', help='айди конф, в которых нужно защищать рейдеров')
group.add_argument('-u', '--users', type=int, nargs='+', metavar='user', help='айди защищаемых рейдеров')
args = parser.parse_args()
try:
run(args.proxy, args.chats, args.users, args.login)
except KeyboardInterrupt:
print()
sys.exit()
if __name__ == "__main__":
main()
| r4rdsn/vk-raid-defender | vk_raid_defender/cli/cli.py | Python | mit | 7,730 | 0.007869 |
# Gradient Sliders
# Custom ControlP5 Compound Classes for percentage sliders
# For Processing in Python
import copy
add_library('controlP5')
class GradientController(object):
def __init__(self, colorList, cp5, sliderClass):
self.colorList = self.colorListFromColors(colorList)
self.cp5 = cp5
self.Slider = sliderClass
self.stopCounter = 0
self.controllerIdentity = 1
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.calculatedHeight = 0
self.sliderWidth = 50
self.sliderHeight = 10
self.backgroundColor = color(220)
self.margin = 2
self.allStops = []
self.callbackActive = True
self.testLerpSlider = None
self.needsDisplay = True
def setPosition(self, x, y):
self.x = x
self.y = y
def setSize(self, w, h):
self.width = w
self.height = h
def setSliderSize(self, w, h):
self.sliderWidth = w
self.sliderHeight = h
def addOuterColorStops(self):
beginStop = self.createColorstop(self.colorList, self.x, self.y, 0.0, False)
self.allStops.append(beginStop)
xAtEnd = (self.x + self.width) - self.sliderWidth
endStop = self.createColorstop(self.colorList, xAtEnd, self.y, 1.0)
self.allStops.append(endStop)
def insertColorStop(self, position):
for i, aStop in enumerate(self.allStops):
if position < aStop['position']:
insertX = self.positionOfSubStop(i, position, True)[0]
newStop = self.createColorstop(self.colorList, insertX, self.y, position, False)
self.allStops.insert(i, newStop)
break
self.recalcSubStopPositions()
def addStopPositionSliders(self):
# Calculate position
x = self.x + self.sliderWidth + self.margin
y = self.y + self.calculatedHeight + self.margin
w = self.width - (2 * (self.sliderWidth + self.margin))
# Callback
def positionSliderCallback(event):
if self.callbackActive:
if event.getAction() == 32:
self.positionChanges(event.getController())
if event.getAction() == 8 or event.getAction() == 16:
self.stopPositionDidChange(event.getController())
# Set the slider
sliderName = "stop_position_%d_%d" % (self.stopCounter, self.controllerIdentity)
pSlider = self.Slider(self.cp5, sliderName)
pSlider.setCaptionLabel("")
pSlider.setSliderMode(0)
pSlider.setColorForeground(color(150))
pSlider.setColorBackground(color(70))
pSlider.setColorActive(color(220))
pSlider.setSize(w, self.sliderHeight)
pSlider.setPosition(x, y)
# For testing: (was 50)
pSlider.setValue(40)
pSlider.addCallback(positionSliderCallback)
self.testLerpSlider = pSlider
def positionChanges(self, aSlider):
if self.callbackActive:
# print "change %f" % aSlider.getValue()
# Move stop
self.allStops[1]['position'] = aSlider.getValue() / 100.0
self.recalcSubStopPositions()
self.needsDisplay = True
def stopPositionDidChange(self, aSlider):
print "stopPositionDidChange"
def display(self):
# Sliders are drawn by cp5
# draw graph
if self.needsDisplay:
self.drawGraph(self.allStops)
self.needsDisplay = False
def getSliderValues(self):
stopsData = []
for cStop in self.allStops:
thisStop = {'position': cStop['position']}
sliders = {}
for i, slider in enumerate(cStop['sliders']):
sliders[self.colorList[i]['code']] = slider.getValue()
thisStop['values'] = sliders
stopsData.append(thisStop)
return {'stops': stopsData}
def setSliderValues(self, stopsData):
if len(stopsData) == len(self.allStops):
self.callbackActive = False
for i, stopValues in enumerate(stopsData):
theStop = self.allStops[i]
theStop['position'] = stopValues['position']
if stopValues.has_key('values'):
if len(theStop['sliders']) != len(stopValues['values'].keys()):
print "WARNING: Possible problem setting slider values - number of colors not matching"
for key, value in stopValues['values'].iteritems():
indexOfSlider = next(index for (index, c) in enumerate(self.colorList) if c['code'] == key)
slider = theStop['sliders'][indexOfSlider]
slider.setValue(value)
else:
print "ERROR: Setting Slider Values Failed - 'values' key missing"
self.callbackActive = True
else:
print "ERROR: Setting Slider Values Failed - number of stops not matching"
def valueForKeyAtPosition(self, key, inFloat):
# Find the index of the color with key
colorIndex = 0
for i, c in enumerate(self.colorList):
if key == c['code']:
colorIndex = i
break
# Create allStopPositions
stopPositions = []
values = []
for i, cStop in enumerate(self.allStops):
# collect stop positions
v = cStop['sliders'][colorIndex].getValue()
# set inbetween values
# if len(stopPositions) > 0:
# # TODO: fix for right position (refactor testLerpSlider)
# testLerpPosition = self.testLerpSlider.getValue() / 100.0
# prevStopPosition = stopPositions[-1]
# nextStopPosition = cStop['position']
# stopPositions.append(lerp(prevStopPosition, nextStopPosition, testLerpPosition))
# # add inbetween value
# values.append(lerp(values[-1], v, 0.5))
stopPositions.append(cStop['position'])
# add value of slider with colorIndex
values.append(v)
# Find the two stop positions which are right and left of the given position
prevStopPosition = 0.0
nextStopPosition = 0.0
prevValue = 0.0
nextValue = 0.0
relativePosition = 0.0
for i, p in enumerate(stopPositions):
if inFloat <= p:
prevP = stopPositions[i - 1]
relativePosition = (inFloat - prevP) / (p - prevP)
prevValue = values[i - 1]
nextValue = values[i]
break
else:
# inFloat is outside bounds of stopPosition range
# Return the maximum stop position value
return values[-1]
return lerp(prevValue, nextValue, relativePosition)
def recalcSubStopPositions(self):
if len(self.allStops) > 2:
for i, subStop in enumerate(self.allStops[1:-1]):
pos = self.positionOfSubStop(i + 1)
# Reposition sliders of substop
for slider in subStop['sliders']:
sliderPos = slider.getPosition()
slider.setPosition(pos[0], sliderPos.y)
def positionOfSubStop(self, indexOfStop, preInsertPosition = 0, preInsertionMode = False):
w = self.sliderWidth
numberOfStops = len(self.allStops)
thePosition = self.allStops[indexOfStop]['position']
if preInsertionMode:
numberOfStops += 1
thePosition = preInsertPosition
availableWidth = self.width - ((numberOfStops * w) + ((numberOfStops - 1) * 2 * self.margin))
leadingSpace = availableWidth * thePosition
precedingStopsWidth = indexOfStop * (w + (2 * self.margin))
x = self.x + leadingSpace + precedingStopsWidth
return (int(x), int(w))
def colorListFromColors(self, colors):
newList = copy.copy(colors)
for c in newList:
if c['code'] == "__":
c['color'] = None
else:
c['color'] = color(c['r'], c['g'], c['b'])
return newList
def emptyDataStopSet(self):
colorDataSet = []
for c in self.colorList:
colorD = {}
colorD['color'] = c['color']
colorD['name'] = c['name']
colorD['code'] = c['code']
colorD['values'] = []
colorD['hidden'] = True
colorDataSet.append(colorD)
return colorDataSet
def drawGraph(self, stopsArray):
# Collect the data
# (Assume that every stop has the same amount and order of colors)
# (The last stop has the color name)
colorData = self.emptyDataStopSet()
stopPositions = []
for cStop in stopsArray:
# collect stop positions
# # set inbetween values
# if len(stopPositions) > 0:
# # TODO: fix for right position (refactor testLerpSlider)
# testLerpPosition = self.testLerpSlider.getValue() / 100.0
# prevStopPosition = stopPositions[-1]
# nextStopPosition = cStop['position']
# stopPositions.append(lerp(prevStopPosition, nextStopPosition, testLerpPosition))
stopPositions.append(cStop['position'])
# collect values and calculate inbetween values
for i, slider in enumerate(cStop['sliders']):
v = slider.getValue()
# Make inbetween semi-stop
# if len(colorData[i]['values']) > 0:
# inbetween_v = lerp(colorData[i]['values'][-1], v, 0.5)
# colorData[i]['values'].append(inbetween_v)
colorData[i]['values'].append(v)
if v > 0:
colorData[i]['hidden'] = False
# Get height and y
y = self.y
h = self.calculatedHeight
# Draw the sub graphs
for i in range(len(stopsArray) - 1):
# calculate position
fromStop = stopsArray[i]
toStop = stopsArray[i + 1]
fromStopPos = self.positionOfSubStop(i)
toStopPos = self.positionOfSubStop(i + 1)
x = fromStopPos[0] + fromStopPos[1] + self.margin
w = (toStopPos[0] - self.margin) - x
# Normalize stop positions
normalizedStopPositions = []
for pos in stopPositions:
norm = map(pos, fromStop['position'], toStop['position'], 1, w - 2)
normalizedStopPositions.append(norm)
self.drawSubGraph(x, y, w, h, colorData, normalizedStopPositions, i, i + 1)
def drawSubGraph(self, x, y, w, h, colorData, stopXs, indexFrom, indexTo):
# Draw background
fill(self.backgroundColor)
noStroke()
rect(x, y, w, h)
# Draw lines
for c in colorData:
if not c['hidden']:
if c['color']:
stroke(c['color'])
else:
stroke(255)
for i, v in enumerate(c['values'][indexFrom:indexTo]):
index = i + indexFrom
x1 = x + stopXs[index]
x2 = x + stopXs[index + 1]
norm_v1 = (h - 2) * (v / 100)
y1 = y + ((h - 1) - norm_v1)
norm_v2 = (h - 2) * (c['values'][index + 1] / 100)
y2 = y + ((h - 1) - norm_v2)
line(x1, y1, x2, y2)
def createColorstop(self, colorsInStop, x, y, position = 0.0, showLabel = True):
# Create callback
def colorSliderCallback(event):
if self.callbackActive:
if event.getAction() == 32:
self.valueChanges(event.getController())
if event.getAction() == 8 or event.getAction() == 16:
self.sliderReleased(event.getController())
# else:
# print event.getAction()
# Create colorStop
colorStop = {}
colorStop['position'] = position
colorSliders = []
for c in colorsInStop:
sliderName = "%s_%d_%d" % (c['name'], self.stopCounter, self.controllerIdentity)
cSlider = self.Slider(self.cp5, sliderName)
if c['color']:
tileColor = c['color']
else:
tileColor = color(255)
if showLabel:
cSlider.setCaptionLabel(c['name'])
else:
cSlider.setCaptionLabel("")
cSlider.setColorForeground(tileColor)
cSlider.setColorCaptionLabel(tileColor)
if c['color']:
cSlider.setColorBackground(self.modColor(tileColor, False))
cSlider.setColorActive(self.modColor(tileColor, True))
else:
cSlider.setColorValueLabel(0)
cSlider.setColorForeground(color(200, 200, 200))
cSlider.setColorBackground(color(255, 255, 255))
cSlider.setColorActive(color(230, 230, 230))
cSlider.setSize(self.sliderWidth, self.sliderHeight)
cSlider.setPosition(x, y)
cSlider.setValue(50)
cSlider.addCallback(colorSliderCallback)
colorSliders.append(cSlider)
y += self.sliderHeight + self.margin
# Set the calculated height (if it hasn't been set)
if not self.calculatedHeight:
self.calculatedHeight = y - (self.margin + self.y)
# Load or recalculate values
self.recalculateStopValues(colorSliders, None)
self.stopCounter += 1
colorStop['sliders'] = colorSliders
return colorStop
def valueChanges(self, aSlider):
if self.callbackActive:
for colorStop in self.allStops:
if aSlider in colorStop['sliders']:
self.recalculateStopValues(colorStop['sliders'], aSlider)
self.needsDisplay = True
def sliderReleased(self, aSlider):
print "slider released -- update render"
# For testing
# v = self.valueForKeyAtPosition('G1', 0.7)
# print v
def recalculateStopValues(self, colorSliders, selectedSlider):
self.callbackActive = False
oldValues = [s.getValue() for s in colorSliders]
adjustment = 100.0 / sum(oldValues)
for i, aSlider in enumerate(colorSliders):
aSlider.setValue(oldValues[i] * adjustment)
self.callbackActive = True
def modColor(self, inColor, darker = True):
if darker:
outColor = lerpColor(inColor, color(0), 0.30)
else:
outColor = lerpColor(inColor, color(255), 0.40)
return outColor
| irlabs/BathroomTiles | GradientSliders.py | Python | mit | 12,149 | 0.033254 |
#!/usr/bin/env python
from math import (sqrt, ceil)
from insertion_sort import *
def bucket_sort(L=[]):
''' Unstable implementation of bucket sort.
:param L: list of sortable elements.
'''
if len(L) < 2: return L
# create buckets
num_bucket = sqrt(len(L))
interval = int(ceil(max(L) / num_bucket))
bucket = [ [] for x in range(int(num_bucket)+1) ]
# bucket = [[]] * int(ceil(num_bucket))
if not bucket: return L
# place each items in respective bucket
for i in range(len(L)): bucket[int(L[i]/interval)].append(L[i])
# concatenate buckets into single array
bucket = [ x for y in bucket for x in y ]
# return optimized insertion sort
return insertion_sort(bucket) | pymber/algorithms | algorithms/sorting/bucket_sort.py | Python | mit | 730 | 0.012329 |
from collections import Counter, defaultdict
import math
import pprint
import functools
# Advanced material here, feel free to ignore memoize if you like.
# Long story short, it remembers the inputs and outputs to functions,
# and if the same input is seen multiple times, then rather than
# running the function multiple times, memoization just returns the
# result of the function when it was first called with that input.
# Memory expensive because it keeps track of past results, but
# computationally nice because we don't recalculate the same thing
# over and over.
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
if args not in cache:
cache[args] = obj(*args, **kwargs)
return cache[args]
return memoizer
def evaluate_classifier(classifier, class_of_interest,
evaluation_data, verbose=False, progress=True):
if verbose:
print("Evaluating performance for class {}".format(class_of_interest))
tp, fp, tn, fn = 0, 0, 0, 0 # true positive, false positive, true negative, false negative
count = 0
for dp in evaluation_data:
count += 1
if progress:
if count % 1000 == 0:
print("progress: {} / {}".format(count, len(evaluation_data)))
prediction = classifier.predict(dp)
actual = dp.klass
if actual == prediction: # we got it right!
if prediction == class_of_interest:
tp += 1
else:
tn += 1
else: # we got it wrong :(
if prediction == class_of_interest:
fp += 1
else:
fn += 1
precision = float(tp) / (tp + fp)
recall = float(tp) / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
if verbose:
print("precision:", precision)
print("recall:", recall)
print("f1:", f1)
return f1, precision, recall
class NaiveBayesClassifier(object):
def __init__(self, laplace_smoothing_constant=0.01):
self.total_counter = 0
self.class_counter = Counter()
self.feature_given_class_counter = defaultdict(Counter)
# Hyperparameter that can be tuned via Cross Validation to improve performance
self.laplace_smoothing_constant = laplace_smoothing_constant
def _update_with_one_data_point(self, data_point):
# Increment the total counter
self.total_counter += 1
# Increment class_counter
self.class_counter[data_point.klass] += 1
# Increment feature_given_class counter for each feature in featuredict
for feature_name, feature_value in data_point.featuredict.items():
assert type(feature_value) == int, "only int typed feature values currently supported"
# Bonus: can one extend Naive Bayes to real-valued features? (hint: yes)
self.feature_given_class_counter[data_point.klass][feature_name] += feature_value
def train(self, train_set, verbose=False):
for data_point in train_set:
self._update_with_one_data_point(data_point)
if verbose:
print("Training complete. Counters:")
pprint.pprint(self.total_counter)
pprint.pprint(self.class_counter)
pprint.pprint(self.feature_given_class_counter)
@memoize # Advanced material, see note on memoize above
def _prior(self, klass):
# Laplace smoothing
numerator = self.laplace_smoothing_constant
denominator = len(self.class_counter) * self.laplace_smoothing_constant
# On top of the unsmoothed counts
numerator += self.class_counter[klass]
denominator += self.total_counter
# Gives us our smoothed prior
return float(numerator) / denominator
@memoize # Advanced material, see note on memoize above
def _vocabulary_size(self):
vocab = set()
for klass in self.class_counter: # for each class
# get all the features in class and add them to total cross-class vocabulary
vocab.update(set(self.feature_given_class_counter[klass]))
return len(vocab)
@memoize # Advanced material, see note on memoize above
def _likelihood(self, klass, feature_name):
# Laplace smoothing
numerator = self.laplace_smoothing_constant
denominator = self._vocabulary_size() * self.laplace_smoothing_constant
# On top of the unsmoothed counts
numerator += self.feature_given_class_counter[klass].get(feature_name, 0)
denominator += sum(self.feature_given_class_counter[klass].values())
# Gives us our smoothed likelihood
return float(numerator) / denominator
def predict(self, data_point, verbose=False):
# Where we'll store probabilities by class
pseudo_probability_by_class = {}
# Calculate the pseudo probability for each class
for klass in self.class_counter:
prior = self._prior(klass)
# Aggregate likelihood
likelihoods = []
for feature_name in data_point.featuredict: # for each feature
# for each time the feature appeared
for _ in range(data_point.featuredict[feature_name]):
likelihoods.append(self._likelihood(klass, feature_name))
# Add prior and likelihoods in logspace to avoid floating point underflow.
# The class with the highest log probability is still the most probable.
numerator_terms = [prior] + likelihoods
pseudo_probability_by_class[klass] = sum([math.log(t) for t in numerator_terms])
# Pick the class with the maximum probability and return it as our prediction
sorted_probability_by_class = sorted(pseudo_probability_by_class.items(),
# Sorts ascending by default, we want
# biggest probability first => descending
key=lambda x: x[1], reverse=True)
prediction = sorted_probability_by_class[0][0]
if verbose:
print("Predicting: {}".format(prediction))
return prediction
| npoznans/python_etl | bayes_evening_workshop/Naive_Bayes_Evening_Workshop/classifiers.py | Python | mit | 6,290 | 0.002226 |
from euclid import Vector3
from ..util.vectors import tuple_of_ints
class Collision(object):
'''
detects if any objects in the world collide
'''
def __init__(self, world):
world.item_added += self.world_add_item
world.item_removed += self.world_remove_item
self.occupied = {}
def world_add_item(self, item):
if hasattr(item, 'bounds'):
position = (0, 0, 0)
if hasattr(item, 'position'):
position = item.position
self.add_item(position, item)
def world_remove_item(self, item):
if hasattr(item, 'bounds'):
position = (0, 0, 0)
if hasattr(item, 'position'):
position = item.position
self.remove_item(position, item)
def get_items(self, location):
if location is None:
return set()
if isinstance(location, Vector3):
location = tuple_of_ints(location)
return self.occupied.get(location, set())
def add_item(self, location, item):
if isinstance(location, Vector3):
location = tuple_of_ints(location)
existing = self.occupied.get(location, set())
existing.add(item)
self.occupied[location] = existing
def remove_item(self, location, item):
if isinstance(location, Vector3):
location = tuple_of_ints(location)
existing = self.occupied.get(location, set())
existing.remove(item)
self.occupied[location] = existing
def can_move_to(self, location):
return not [
item for item in self.get_items(location)
if hasattr(item, 'collide') and item.collide
]
| tartley/pyweek11-cube | source/model/collision.py | Python | bsd-3-clause | 1,781 | 0.003369 |
from mosaic import app
if __name__ == '__main__':
app.run()
| BumagniyPacket/mosaic | run.py | Python | apache-2.0 | 65 | 0 |
import py.test
import unittest
from quick_sort import sort
import random
class testQuickSort(unittest.TestCase):
def test__init__(self):
pass
def test_sort_inorder(self):
arr = [i for i in xrange(0, 10000)]
sortarr = sort(arr)
for i in xrange(1, 10000):
self.assertGreaterEqual(sortarr[i], sortarr[i-1])
def test_sort_revorder(self):
arr = [i for i in xrange(10000, 0, -1)]
sortarr = sort(arr)
for i in xrange(1, 10000):
self.assertGreaterEqual(sortarr[i], sortarr[i-1])
def test_sortrandorder(self):
arr = [random.randint(0, 10000) for i in xrange(0, 10000)]
sortarr = sort(arr)
for i in xrange(1, 10000):
self.assertGreaterEqual(sortarr[i], sortarr[i-1])
| CharlesGust/data_structures | sort_quick/test_quick_sort.py | Python | mit | 794 | 0 |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of how to use recurrent networks (e.g.`LSTM`s) with `GraphNetwork`s.
Models can use the mechanism for specifying nested node, edge, or global
features to simultaneously keep inputs/embeddings together with a per-node,
per-edge or per-graph recurrent state.
In this example we show an `InteractionNetwork` that uses an LSTM to keep a
memory of the inputs to the edge model at each step of message passing, by using
separate "embedding" and "state" fields in the edge features.
Following a similar procedure, an LSTM could be added to the `node_update_fn`,
or even the `global_update_fn`, if using a full `GraphNetwork`.
Note it is recommended to use immutable container types to store nested edge,
node and global features to avoid unwanted side effects. In this example we
use `namedtuple`s.
"""
import collections
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import jax.tree_util as tree
import jraph
import numpy as np
NUM_NODES = 5
NUM_EDGES = 7
NUM_MESSAGE_PASSING_STEPS = 10
EMBEDDING_SIZE = 32
HIDDEN_SIZE = 128
# Immutable class for storing nested node/edge features containing an embedding
# and a recurrent state.
StatefulField = collections.namedtuple("StatefulField", ["embedding", "state"])
def get_random_graph() -> jraph.GraphsTuple:
return jraph.GraphsTuple(
n_node=np.asarray([NUM_NODES]),
n_edge=np.asarray([NUM_EDGES]),
nodes=np.random.normal(size=[NUM_NODES, EMBEDDING_SIZE]),
edges=np.random.normal(size=[NUM_EDGES, EMBEDDING_SIZE]),
globals=None,
senders=np.random.randint(0, NUM_NODES, [NUM_EDGES]),
receivers=np.random.randint(0, NUM_NODES, [NUM_EDGES]))
def network_definition(graph: jraph.GraphsTuple) -> jraph.ArrayTree:
"""`InteractionNetwork` with an LSTM in the edge update."""
# LSTM that will keep a memory of the inputs to the edge model.
edge_fn_lstm = hk.LSTM(hidden_size=HIDDEN_SIZE)
# MLPs used in the edge and the node model. Note that in this instance
# the output size matches the input size so the same model can be run
# iteratively multiple times. In a real model, this would usually be achieved
# by first using an encoder in the input data into a common `EMBEDDING_SIZE`.
edge_fn_mlp = hk.nets.MLP([HIDDEN_SIZE, EMBEDDING_SIZE])
node_fn_mlp = hk.nets.MLP([HIDDEN_SIZE, EMBEDDING_SIZE])
# Initialize the edge features to contain both the input edge embedding
# and initial LSTM state. Note for the nodes we only have an embedding since
# in this example nodes do not use a `node_fn_lstm`, but for analogy, we
# still put it in a `StatefulField`.
graph = graph._replace(
edges=StatefulField(
embedding=graph.edges,
state=edge_fn_lstm.initial_state(graph.edges.shape[0])),
nodes=StatefulField(embedding=graph.nodes, state=None),
)
def update_edge_fn(edges, sender_nodes, receiver_nodes):
# We will run an LSTM memory on the inputs first, and then
# process the output of the LSTM with an MLP.
edge_inputs = jnp.concatenate([edges.embedding,
sender_nodes.embedding,
receiver_nodes.embedding], axis=-1)
lstm_output, updated_state = edge_fn_lstm(edge_inputs, edges.state)
updated_edges = StatefulField(
embedding=edge_fn_mlp(lstm_output), state=updated_state,
)
return updated_edges
def update_node_fn(nodes, received_edges):
# Note `received_edges.state` will also contain the aggregated state for
# all received edges, which we may choose to use in the node update.
node_inputs = jnp.concatenate(
[nodes.embedding, received_edges.embedding], axis=-1)
updated_nodes = StatefulField(
embedding=node_fn_mlp(node_inputs),
state=None)
return updated_nodes
recurrent_graph_network = jraph.InteractionNetwork(
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn)
# Apply the model recurrently for 10 message passing steps.
# If instead we intended to use the LSTM to process a sequence of features
# for each node/edge, here we would select the corresponding inputs from the
# sequence along the sequence axis of the nodes/edges features to build the
# correct input graph for each step of the iteration.
num_message_passing_steps = 10
for _ in range(num_message_passing_steps):
graph = recurrent_graph_network(graph)
return graph
def main(_):
network = hk.without_apply_rng(hk.transform(network_definition))
input_graph = get_random_graph()
params = network.init(jax.random.PRNGKey(42), input_graph)
output_graph = network.apply(params, input_graph)
print(tree.tree_map(lambda x: x.shape, output_graph))
if __name__ == "__main__":
app.run(main)
| deepmind/jraph | jraph/examples/lstm.py | Python | apache-2.0 | 5,350 | 0.006168 |
#!/usr/bin/env python3
# Copyright (c) 2018-2019, Ulf Magnusson
# SPDX-License-Identifier: ISC
"""
Lists all user-modifiable symbols that are not given a value in the
configuration file. Usually, these are new symbols that have been added to the
Kconfig files.
The default configuration filename is '.config'. A different filename can be
passed in the KCONFIG_CONFIG environment variable.
"""
from __future__ import print_function
import argparse
import sys
from kconfiglib import Kconfig, BOOL, TRISTATE, INT, HEX, STRING, TRI_TO_STR
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument(
"--show-help", "-l",
action="store_true",
help="Show any help texts as well")
parser.add_argument(
"kconfig",
metavar="KCONFIG",
nargs="?",
default="Kconfig",
help="Top-level Kconfig file (default: Kconfig)")
args = parser.parse_args()
kconf = Kconfig(args.kconfig, suppress_traceback=True)
# Make it possible to filter this message out
print(kconf.load_config(), file=sys.stderr)
for sym in kconf.unique_defined_syms:
# Only show symbols that can be toggled. Choice symbols are a special
# case in that sym.assignable will be (2,) (length 1) for visible
# symbols in choices in y mode, but they can still be toggled by
# selecting some other symbol.
if sym.user_value is None and \
(len(sym.assignable) > 1 or
(sym.visibility and (sym.orig_type in (INT, HEX, STRING) or
sym.choice))):
# Don't reuse the 'config_string' format for bool/tristate symbols,
# to show n-valued symbols as 'CONFIG_FOO=n' instead of
# '# CONFIG_FOO is not set'. This matches the C tools.
if sym.orig_type in (BOOL, TRISTATE):
s = "{}{}={}\n".format(kconf.config_prefix, sym.name,
TRI_TO_STR[sym.tri_value])
else:
s = sym.config_string
print(s, end="")
if args.show_help:
for node in sym.nodes:
if node.help is not None:
# Indent by two spaces. textwrap.indent() is not
# available in Python 2 (it's 3.3+).
print("\n".join(" " + line
for line in node.help.split("\n")))
break
if __name__ == "__main__":
main()
| ulfalizer/Kconfiglib | listnewconfig.py | Python | isc | 2,619 | 0 |
# -*- coding: utf8 -*-
"""
The ``queue` utils
==================
Some operation will require a queue. This utils file
"""
__author__ = 'Salas'
__copyright__ = 'Copyright 2014 LTL'
__credits__ = ['Salas']
__license__ = 'MIT'
__version__ = '0.2.0'
__maintainer__ = 'Salas'
__email__ = 'Salas.106.212@gmail.com'
__status__ = 'Pre-Alpha' | salas106/irc-ltl-framework | utils/queue.py | Python | mit | 352 | 0.005682 |
#!/usr/bin/env python3
class Solution():
def numSubarrayProductLessThanK(self, nums, k):
lo, prod = 0, 1
ret = 0
for hi, n in enumerate(nums):
prod *= n
while lo <= hi and prod >= k:
prod //= nums[lo]
lo += 1
ret += (hi-lo+1)
return ret
nums = [10,5,2,6]
k = 100
sol = Solution()
print(sol.numSubarrayProductLessThanK(nums, k))
| eroicaleo/LearningPython | interview/leet/713_Subarray_Product_Less_Than_K.py | Python | mit | 434 | 0.009217 |
# encoding: utf-8
import mimetypes
import re
from django.core.urlresolvers import reverse
def order_name(name):
"""order_name -- Limit a text to 20 chars length, if necessary strips the
middle of the text and substitute it for an ellipsis.
name -- text to be limited.
"""
name = re.sub(r'^.*/', '', name)
if len(name) <= 37:
return name
return name[:37] + "..." + name[-7:]
def serialize(instance, file_attr='file'):
"""serialize -- Serialize a File instance into a dict.
instance -- File instance
file_attr -- attribute name that contains the FileField or ImageField
"""
obj = getattr(instance, file_attr)
return {
'url': obj.url,
'name': order_name(obj.name),
#'type': mimetypes.guess_type(obj.path)[0] or 'image/png',
'type': mimetypes.guess_type(obj.path)[0],
'thumbnailUrl': obj.url,
'size': obj.size,
'deleteUrl': reverse('upload-delete', args=[instance.pk]),
'deleteType': 'DELETE',
}
| extremoburo/django-jquery-file-upload | fileupload/serialize.py | Python | mit | 1,030 | 0.001942 |
'''
ps = psi4.Solver
with psi4.quite_run():
ps.prepare_chkpt(mo_coeff, fock_on_mo, nelec, e_scf, nuclear_repulsion)
ecc = ps.energy('CCSD', c.shape[1], hcore_on_mo, eri_on_mo)
rdm1, rdm2 = ps.density(mo_coeff.shape[1])
eccsdt = ps.energy('CCSD(T)', c.shape[1], hcore_on_mo, eri_on_mo)
rdm1, rdm2 = ps.density(mo_coeff.shape[1])
'''
from wrapper import *
__all__ = filter(lambda s: not s.startswith('_'), dir())
| sunqm/psi4-cc | psi4/__init__.py | Python | gpl-2.0 | 434 | 0 |
# Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CheckJSON:
def __init__(self, key, obj, ip_info=False):
self.key_error = 'Field is not applicable to this license.'
if ip_info is True:
self.key_error = 'No IP info returned.'
self.key = key
self.obj = obj
def key_valid(self):
if self.key not in self.obj:
raise KeyError(self.key_error)
else:
return self.obj[self.key] | sbarbett/ip_intelligence | src/check_json.py | Python | apache-2.0 | 945 | 0.012698 |
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Products Manufacturers",
"version" : "1.0",
"author" : "OpenERP SA",
'category': 'Purchase Management',
'complexity': "easy",
"depends" : ["stock"],
"init_xml" : [],
"demo_xml" : [],
"description": """
A module that adds manufacturers and attributes on the product form.
====================================================================
You can now define the following for a product:
* Manufacturer
* Manufacturer Product Name
* Manufacturer Product Code
* Product Attributes
""",
"update_xml" : [
"security/ir.model.access.csv",
"product_manufacturer_view.xml"
],
"auto_install": False,
"installable": True,
"certificate" : "00720153953662760781",
'images': ['images/products_manufacturer.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| crmccreary/openerp_server | openerp/addons/product_manufacturer/__openerp__.py | Python | agpl-3.0 | 1,833 | 0.004364 |
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Radon transform (ray transform) in 2d using skimage.transform."""
from odl.discr import uniform_discr_frompartition, uniform_partition
import numpy as np
try:
from skimage.transform import radon, iradon
SCIKIT_IMAGE_AVAILABLE = True
except ImportError:
SCIKIT_IMAGE_AVAILABLE = False
__all__ = ('scikit_radon_forward', 'scikit_radon_back_projector',
'SCIKIT_IMAGE_AVAILABLE')
def scikit_theta(geometry):
"""Calculate angles in degrees with ODL scikit conventions."""
return np.asarray(geometry.motion_grid).squeeze() * 180.0 / np.pi
def scikit_sinogram_space(geometry, volume_space, sinogram_space):
"""Create a range adapted to the scikit radon geometry."""
padded_size = int(np.ceil(volume_space.shape[0] * np.sqrt(2)))
det_width = volume_space.domain.extent()[0] * np.sqrt(2)
scikit_detector_part = uniform_partition(-det_width / 2.0,
det_width / 2.0,
padded_size)
scikit_range_part = geometry.motion_partition.insert(1,
scikit_detector_part)
scikit_range = uniform_discr_frompartition(scikit_range_part,
interp=sinogram_space.interp,
dtype=sinogram_space.dtype)
return scikit_range
def clamped_interpolation(scikit_range, sinogram):
"""Interpolate in a possibly smaller space
Sets all points that would be outside ofthe domain to match the boundary
values.
"""
min_x = scikit_range.domain.min()[1]
max_x = scikit_range.domain.max()[1]
def interpolation_wrapper(x):
x = (x[0], np.maximum(min_x, np.minimum(max_x, x[1])))
return sinogram.interpolation(x)
return interpolation_wrapper
def scikit_radon_forward(volume, geometry, range, out=None):
"""Calculate forward projection using scikit
Parameters
----------
volume : `DiscreteLpElement`
The volume to project
geometry : `Geometry`
The projection geometry to use
range : `DiscreteLp`
range of this projection (sinogram space)
out : ``range`` element, optional
An element in range that the result should be written to
Returns
-------
sinogram : ``range`` element
Sinogram given by the projection.
"""
# Check basic requirements. Fully checking should be in wrapper
assert volume.shape[0] == volume.shape[1]
theta = scikit_theta(geometry)
scikit_range = scikit_sinogram_space(geometry, volume.space, range)
sinogram = scikit_range.element(radon(volume.asarray(), theta=theta).T)
if out is None:
out = range.element()
out.sampling(clamped_interpolation(scikit_range, sinogram))
scale = volume.space.cell_sides[0]
out *= scale
return out
def scikit_radon_back_projector(sinogram, geometry, range, out=None):
"""Calculate forward projection using scikit
Parameters
----------
sinogram : `DiscreteLpElement`
Sinogram (projections) to backproject.
geometry : `Geometry`
The projection geometry to use.
range : `DiscreteLp`
range of this projection (volume space).
out : ``range`` element, optional
An element in range that the result should be written to.
Returns
-------
sinogram : ``range`` element
Sinogram given by the projection.
"""
theta = scikit_theta(geometry)
scikit_range = scikit_sinogram_space(geometry, range, sinogram.space)
scikit_sinogram = scikit_range.element()
scikit_sinogram.sampling(clamped_interpolation(range, sinogram))
if out is None:
out = range.element()
else:
# Only do asserts here since these are backend functions
assert out in range
out[:] = iradon(scikit_sinogram.asarray().T, theta,
output_size=range.shape[0], filter=None)
# Empirically determined value, gives correct scaling
scale = 4.0 * float(geometry.motion_params.length) / (2 * np.pi)
out *= scale
return out
| bgris/ODL_bgris | lib/python3.5/site-packages/odl/tomo/backends/scikit_radon.py | Python | gpl-3.0 | 4,830 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('campaigns', '0002_auto_20141022_1840'),
]
operations = [
migrations.AlterField(
model_name='campaignform',
name='form',
field=models.ForeignKey(related_name='campaigns', to='customforms.Form'),
preserve_default=True,
),
migrations.AlterField(
model_name='campaignproject',
name='campaign',
field=models.ForeignKey(related_name='projects', to='campaigns.Campaign'),
preserve_default=True,
),
migrations.AlterField(
model_name='campaignproject',
name='project',
field=models.ForeignKey(related_name='campaigns', to='projects.Monitorable'),
preserve_default=True,
),
migrations.AlterField(
model_name='campaignreport',
name='campaign',
field=models.ForeignKey(related_name='reports', to='campaigns.Campaign'),
preserve_default=True,
),
migrations.AlterField(
model_name='campaignreport',
name='report',
field=models.ForeignKey(related_name='campaigns', to='reports.Report'),
preserve_default=True,
),
]
| Monithon/Monithon-2.0 | campaigns/migrations/0003_auto_20141130_0858.py | Python | gpl-2.0 | 1,412 | 0.003541 |
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import json
import os
from dateutil import zoneinfo
from mock import mock
from .common import BaseTest, instance
from c7n.filters import FilterValidationError
from c7n.filters.offhours import OffHour, OnHour, ScheduleParser, Time
# Per http://blog.xelnor.net/python-mocking-datetime/
# naive implementation has issues with pypy
real_datetime_class = datetime.datetime
def mock_datetime_now(tgt, dt):
class DatetimeSubclassMeta(type):
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, real_datetime_class)
class BaseMockedDatetime(real_datetime_class):
target = tgt
@classmethod
def now(cls, tz=None):
return cls.target.replace(tzinfo=tz)
@classmethod
def utcnow(cls):
return cls.target
# Python2 & Python3 compatible metaclass
MockedDatetime = DatetimeSubclassMeta(
b'datetime' if str is bytes else 'datetime', # hack Python2/3 port
(BaseMockedDatetime,), {})
return mock.patch.object(dt, 'datetime', MockedDatetime)
class OffHoursFilterTest(BaseTest):
"""[off|on] hours testing"""
def test_offhours_records(self):
session_factory = self.replay_flight_data('test_offhours_records')
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=8, day=14, hour=19, minute=00)
with mock_datetime_now(t, datetime):
p = self.load_policy({
'name': 'offhours-records',
'resource': 'ec2',
'filters': [
{'State.Name': 'running'},
{'type': 'offhour',
'offhour': 19,
'tag': 'custodian_downtime',
'default_tz': 'est',
'weekends': False}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(resources, [])
with open(os.path.join(
p.options['output_dir'],
'offhours-records',
'parse_errors.json')) as fh:
data = json.load(fh)
self.assertEqual(len(data), 1)
self.assertEqual(data[0][0], 'i-0ee3a9bc2eeed269f')
self.assertEqual(data[0][1], 'off=[m-f,8];on=[n-f,5];pz=est')
with open(os.path.join(
p.options['output_dir'],
'offhours-records',
'opted_out.json')) as fh:
data = json.load(fh)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['InstanceId'], 'i-0a619b58a7e704a9f')
def test_validate(self):
self.assertRaises(
FilterValidationError, OffHour({'default_tz': 'zmta'}).validate)
self.assertRaises(
FilterValidationError, OffHour({'offhour': 25}).validate)
i = OffHour({})
self.assertEqual(i.validate(), i)
def test_process(self):
f = OffHour({'opt-out': True})
instances = [
instance(Tags=[]),
instance(
Tags=[{'Key': 'maid_offhours', 'Value': ''}]),
instance(
Tags=[{'Key': 'maid_offhours', 'Value': 'on'}]),
instance(
Tags=[{'Key': 'maid_offhours', 'Value': 'off'}]),
instance(
Tags=[
{'Key': 'maid_offhours',
'Value': "off=(m-f,5);zebrablue,on=(t-w,5)"}])]
t = datetime.datetime(
year=2015, month=12, day=1, hour=19, minute=5,
tzinfo=zoneinfo.gettz('America/New_York'))
with mock_datetime_now(t, datetime):
self.assertEqual(
f.process(instances), [instances[0], instances[1], instances[2]]
)
def test_opt_out_behavior(self):
# Some users want to match based on policy filters to
# a resource subset with default opt out behavior
t = datetime.datetime(
year=2015, month=12, day=1, hour=19, minute=5,
tzinfo=zoneinfo.gettz('America/New_York'))
f = OffHour({'opt-out': True})
with mock_datetime_now(t, datetime):
i = instance(Tags=[])
self.assertEqual(f(i), True)
i = instance(
Tags=[{'Key': 'maid_offhours', 'Value': ''}]
)
self.assertEqual(f(i), True)
i = instance(
Tags=[{'Key': 'maid_offhours', 'Value': 'on'}]
)
self.assertEqual(f(i), True)
i = instance(
Tags=[{'Key': 'maid_offhours', 'Value': 'off'}])
self.assertEqual(f(i), False)
self.assertEqual(f.opted_out, [i])
def test_opt_in_behavior(self):
# Given the addition of opt out behavior, verify if its
# not configured that we don't touch an instance that
# has no downtime tag
i = instance(Tags=[])
i2 = instance(Tags=[{'Key': 'maid_offhours', 'Value': ''}])
i3 = instance(Tags=[{'Key': 'maid_offhours', 'Value': 'on'}])
t = datetime.datetime(
year=2015, month=12, day=1, hour=19, minute=5,
tzinfo=zoneinfo.gettz('America/New_York'))
f = OffHour({})
with mock_datetime_now(t, datetime):
self.assertEqual(f(i), False)
self.assertEqual(f(i2), True)
self.assertEqual(f(i3), True)
t = datetime.datetime(
year=2015, month=12, day=1, hour=7, minute=5,
tzinfo=zoneinfo.gettz('America/New_York'))
f = OnHour({})
with mock_datetime_now(t, datetime):
self.assertEqual(f(i), False)
self.assertEqual(f(i2), True)
self.assertEqual(f(i3), True)
def xtest_time_match_stops_after_skew(self):
hour = 7
t = datetime.datetime(
year=2015, month=12, day=1, hour=hour, minute=5,
tzinfo=zoneinfo.gettz('America/New_York'))
i = instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OnHour({'skew': 1})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(0, 4):
dt.target = t.replace(hour=hour + n)
results.append(f(i))
self.assertEqual(results, [True, True, False, False])
def test_resource_schedule_error(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2015, month=12, day=1, hour=19, minute=5)
f = OffHour({})
f.process_resource_schedule = lambda: False
with mock_datetime_now(t, datetime):
i = instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'tz=est'}])
self.assertEqual(f(i), False)
def test_time_filter_usage_errors(self):
self.assertRaises(NotImplementedError, Time, {})
def test_everyday_onhour(self):
# weekends on means we match times on the weekend
start_day = 14 # sunday
t = datetime.datetime(
year=2016, day=start_day, month=8, hour=7, minute=20)
i = instance(Tags=[{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OnHour({'weekends': False})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [True] * 7)
def test_everyday_offhour(self):
# weekends on means we match times on the weekend
start_day = 14 # sunday
t = datetime.datetime(
year=2016, day=start_day, month=8, hour=19, minute=20)
i = instance(Tags=[{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OffHour({'weekends': False})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [True] * 7)
def test_weekends_only_onhour_support(self):
# start day is a sunday, weekend only means we only start
# on monday morning.
start_day = 14
t = datetime.datetime(
year=2016, day=start_day, month=8, hour=7, minute=20)
i = instance(Tags=[{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OnHour({'weekends-only': True})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [
False, True, False, False, False, False, False])
def test_weekends_only_offhour_support(self):
# start day is a sunday, weekend only means we only stop
# on friday evening.
start_day = 14
t = datetime.datetime(
year=2016, day=start_day, month=8, hour=7, minute=20)
i = instance(Tags=[{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OnHour({'weekends-only': True})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [
False, True, False, False, False, False, False])
def test_onhour_weekend_support(self):
start_day = 14
t = datetime.datetime(
year=2016, day=start_day, month=2, hour=19, minute=20)
i = instance(Tags=[{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OffHour({'weekends-only': True})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(7):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(
results,
[False, False, False, False, False, True, False])
def test_offhour_weekend_support(self):
start_day = 26
t = datetime.datetime(
year=2016, day=start_day, month=2, hour=19, minute=20)
i = instance(Tags=[{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OffHour({})
results = []
with mock_datetime_now(t, datetime) as dt:
for n in range(0, 4):
dt.target = t.replace(day=start_day + n)
results.append(f(i))
self.assertEqual(results, [True, False, False, True])
def test_current_time_test(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2015, month=12, day=1, hour=19, minute=5)
with mock_datetime_now(t, datetime):
i = instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'tz=est'}])
f = OffHour({})
p = f.get_tag_value(i)
self.assertEqual(p, 'tz=est')
tz = f.get_tz('est')
self.assertTrue(str(tz) in (
"tzfile('US/Eastern')",
"tzfile('America/New_York')"))
self.assertEqual(
datetime.datetime.now(tz), t)
self.assertEqual(t.hour, 19)
def test_offhours_real_world_values(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2015, month=12, day=1, hour=19, minute=5)
with mock_datetime_now(t, datetime):
results = [OffHour({})(i) for i in [
instance(Tags=[
{'Key': 'maid_offhours', 'Value': ''}]),
instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'on'}]),
instance(Tags=[
{'Key': 'maid_offhours', 'Value': '"Offhours tz=ET"'}]),
instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'Offhours tz=PT'}])]]
# unclear what this is really checking
self.assertEqual(results, [True, True, True, True])
def test_offhours_get_value(self):
off = OffHour({'default_tz': 'ct'})
i = instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'Offhours tz=PT'}])
self.assertEqual(off.get_tag_value(i), "offhours tz=pt")
self.assertFalse(off.parser.has_resource_schedule(
off.get_tag_value(i), 'off'))
self.assertTrue(off.parser.keys_are_valid(
off.get_tag_value(i)))
self.assertEqual(off.parser.raw_data(
off.get_tag_value(i)), {'tz': 'pt'})
def test_offhours(self):
t = datetime.datetime(year=2015, month=12, day=1, hour=19, minute=5,
tzinfo=zoneinfo.gettz('America/New_York'))
with mock_datetime_now(t, datetime):
i = instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'tz=est'}])
self.assertEqual(OffHour({})(i), True)
def test_onhour(self):
t = datetime.datetime(year=2015, month=12, day=1, hour=7, minute=5,
tzinfo=zoneinfo.gettz('America/New_York'))
with mock_datetime_now(t, datetime):
i = instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'tz=est'}])
self.assertEqual(OnHour({})(i), True)
self.assertEqual(OnHour({'onhour': 8})(i), False)
def test_cant_parse_tz(self):
i = instance(Tags=[
{'Key': 'maid_offhours', 'Value': 'tz=evt'}])
self.assertEqual(OffHour({})(i), False)
def test_custom_offhours(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=19, minute=00)
results = []
with mock_datetime_now(t, datetime):
for i in [instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,19);on=(m-f,7);tz=et'}]),
instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,20);on=(m-f,7);tz=et'}])]:
results.append(OffHour({})(i))
self.assertEqual(results, [True, False])
def test_custom_onhours(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
results = []
with mock_datetime_now(t, datetime):
for i in [instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,19);on=(m-f,7);tz=et'}]),
instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,20);on=(m-f,9);tz=et'}])]:
results.append(OnHour({})(i))
self.assertEqual(results, [True, False])
def test_arizona_tz(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,19);on=(m-f,7);tz=at'}])
self.assertEqual(OnHour({})(i), True)
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,20);on=(m-f,6);tz=ast'}])
self.assertEqual(OnHour({})(i), False)
def test_custom_bad_tz(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,19);on=(m-f,7);tz=et'}])
self.assertEqual(OnHour({})(i), True)
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,20);on=(m-f,7);tz=abc'}])
self.assertEqual(OnHour({})(i), False)
def test_custom_bad_hours(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=19, minute=00)
# default error handling is to exclude the resource
with mock_datetime_now(t, datetime):
# This isn't considered a bad value, its basically omitted.
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=();tz=et'}])
self.assertEqual(OffHour({})(i), False)
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,90);on=(m-f,7);tz=et'}])
# malformed value
self.assertEqual(OffHour({})(i), False)
t = t.replace(year=2016, month=5, day=26, hour=13, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=();tz=et'}])
# will go to default values, but not work due to default time
self.assertEqual(OffHour({})(i), False)
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'off=(m-f,90);on=(m-f,7);tz=et'}])
self.assertEqual(OffHour({})(i), False)
def test_tz_only(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'tz=est'}])
self.assertEqual(OnHour({})(i), True)
def test_empty_tag(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': ''}])
self.assertEqual(OnHour({})(i), True)
def test_on_tag(self):
t = datetime.datetime.now(zoneinfo.gettz('America/New_York'))
t = t.replace(year=2016, month=5, day=26, hour=7, minute=00)
with mock_datetime_now(t, datetime):
i = instance(Tags=[{'Key': 'maid_offhours',
'Value': 'on'}])
self.assertEqual(OnHour({})(i), True)
class ScheduleParserTest(BaseTest):
# table style test
# list of (tag value, parse result)
table = [
################
# Standard cases
('off=(m-f,10);on=(m-f,7);tz=et',
{'off': [{'days': [0, 1, 2, 3, 4], 'hour': 10}],
'on': [{'days': [0, 1, 2, 3, 4], 'hour': 7}],
'tz': 'et'}),
("off=[(m-f,9)];on=(m-s,10);tz=pt",
{'off': [{'days': [0, 1, 2, 3, 4], 'hour': 9}],
'on': [{'days': [0, 1, 2, 3, 4, 5], 'hour': 10}],
'tz': 'pt'}),
("off=[(m-f,23)];on=(m-s,10);tz=pt",
{'off': [{'days': [0, 1, 2, 3, 4], 'hour': 23}],
'on': [{'days': [0, 1, 2, 3, 4, 5], 'hour': 10}],
'tz': 'pt'}),
('off=(m-f,19);on=(m-f,7);tz=pst',
{'off': [{'days': [0, 1, 2, 3, 4], 'hour': 19}],
'on': [{'days': [0, 1, 2, 3, 4], 'hour': 7}],
'tz': 'pst'}),
# wrap around days (saturday, sunday, monday)
('on=[(s-m,10)];off=(s-m,19)',
{'on': [{'days': [5, 6, 0], 'hour': 10}],
'off': [{'days': [5, 6, 0], 'hour': 19}],
'tz': 'et'}),
# multiple single days specified
('on=[(m,9),(t,10),(w,7)];off=(m-u,19)',
{'on': [{'days': [0], 'hour': 9},
{'days': [1], 'hour': 10},
{'days': [2], 'hour': 7}],
'off': [{'days': [0, 1, 2, 3, 4, 5, 6], 'hour': 19}],
'tz': 'et'}),
# using brackets also works, if only single time set
('off=[m-f,20];on=[m-f,5];tz=est',
{'on': [{'days': [0, 1, 2, 3, 4], 'hour': 5}],
'off': [{'days': [0, 1, 2, 3, 4], 'hour': 20}],
'tz': 'est'}),
# same string, exercise cache lookup.
('off=[m-f,20];on=[m-f,5];tz=est',
{'on': [{'days': [0, 1, 2, 3, 4], 'hour': 5}],
'off': [{'days': [0, 1, 2, 3, 4], 'hour': 20}],
'tz': 'est'}),
################
# Invalid Cases
('', None),
# invalid day
('off=(1-2,12);on=(m-f,10);tz=est', None),
# invalid hour
('off=(m-f,a);on=(m-f,10);tz=est', None),
('off=(m-f,99);on=(m-f,7);tz=pst', None),
# invalid day
('off=(x-f,10);on=(m-f,10);tz=est', None),
# no hour specified for on
('off=(m-f);on=(m-f,10);tz=est', None),
# invalid day spec
('off=(m-t-f,12);on=(m-f,10);tz=est', None),
# random extra
('off=(m-f,5);zebra=blue,on=(t-w,5)', None),
('off=(m-f,5);zebra=blue;on=(t-w,5)', None),
# random extra again
('off=(m-f,5);zebrablue,on=(t-w,5)', None),
('bar;off=(m-f,5);zebrablue,on=(t-w,5)', None),
]
def test_schedule_parser(self):
self.maxDiff = None
parser = ScheduleParser({'tz': 'et'})
for value, expected in self.table:
self.assertEqual(parser.parse(value), expected)
| jimmyraywv/cloud-custodian | tests/test_offhours.py | Python | apache-2.0 | 21,903 | 0.000274 |
# Copyright (c) 2014-2018 Matteo Degiacomi and Valentina Erastova
#
# Assemble is free software ;
# you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation ;
# either version 2 of the License, or (at your option) any later version.
# Assemble is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY ;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with Assemble ;
# if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
# Authors : Matteo Degiacomi, matteo.degiacomi@gmail.com, Valentina Erastova, valentina.erastova@gmail.com
import numpy as np
import logging
class ForceField(object):
def __init__(self):
self.bonded={}
self.nonbonded={}
self.combination=[]
self.fftype=[]
#default return values
self.default_bond=1.5
self.default_angle=114
self.default_dihedral=120
self.logger=logging.getLogger('assemble')
def load(self,fffile):
self.logger.info("\n> loading force field %s..."%fffile)
f = open(fffile, 'r+')
line = f.readline()
while line:
w=line.split()
if "bondedtypes" in line:
break
line=f.readline()
#extract equations type
while line:
w=line.split()
if not line.isspace() and not ";" in w[0] and len(w)==4:
self.fftype=np.array(w).astype(int)
break
line=f.readline()
line=f.readline()
#extract bonded potential constants
while line:
w=line.split()
if "atomtypes" in line:
break
if not line.isspace() and not ";" in w[0]:
self.bonded[w[0]]=np.array(w[1:]).astype(float)
line=f.readline()
line=f.readline()
#non bonded potential
while line:
w=line.split()
if "defaults" in line:
break
if not line.isspace() and not ";" in w[0]:
self.nonbonded[w[0]]=np.array(w[1:])
line=f.readline()
line=f.readline()
#get combination rules
while line:
w=line.split()
if not line.isspace() and not ";" in w[0]:
self.combination=np.array(w)
break
line=f.readline()
f.close()
if len(self.fftype)==0:
raise IOError("bond types not found in force field %s!"%fffile)
if len(self.bonded)==0:
raise IOError("bonded parameters not found in force field %s!"%fffile)
if len(self.nonbonded)==0:
raise IOError("non-bonded parameters not found in force field %s!"%fffile)
if len(self.combination)==0:
raise IOError("combination rules not found in force field %s!"%fffile)
def get_bond(self,name):
if self.fftype[0]>=1 and self.fftype[0]<=7:
return self.bonded[name][0]*10
#tabulated potential, no way to know where the minimum is. Return default value
else:
return self.default_bond
def get_angle(self,name):
if self.fftype[1]>=1 and self.fftype[1]<=2:
return self.bonded[name][0]
#no analytical minimum exists, return default
else:
return self.default_angle
def get_dihedral(self,name):
if self.fftype[2]>=1 and self.fftype[2]<=2:
return self.bonded[name][0]
#no analytical minimum exists, return default
else:
return self.default_dihedral
if __name__=="__main__":
FF=ForceField()
FF.load("./database/forcefield/trappe.ff.txt")
#FF.load("C:\Users\Matteo\workspace\polymer\database\forcefield\trappe.ff")
print(FF.bonded)
#print FF.nonbonded
#print FF.combination
print(FF.fftype)
| degiacom/assemble | ForceField.py | Python | gpl-3.0 | 4,345 | 0.023245 |
import imagepusher, random
if __name__ == '__main__':
host, port = '', 18002
pusher = imagepusher.ImagePusher( (host, port) )
width, height = 12, 10
munch = [ [ [0,0,0] for x in xrange(width) ] for y in xrange(height) ]
while True:
for i in xrange(16):
for j in xrange(i+1):
for y in xrange(height):
for x in xrange(width):
if y == (x ^ j):
munch[y][x][0] += 1
munch[y][x][0] %= 256
munch[y][x][1] += 5
munch[y][x][1] %= 256
munch[y][x][2] += 9
munch[y][x][2] %= 256
frame = [ [ [n/255., m/255., o/255.] for n,m,o in row ] for row in munch ]
pusher.push_frame( frame )
| techinc/imagepusher | munch.py | Python | mit | 651 | 0.058372 |
import sys, os, re, time, resource, gc
import ujson, boto
import boto.s3.connection
from collections import defaultdict
access_key = os.environ["AWS_ACCESS_KEY"]
secret_key = os.environ["AWS_SECRET_KEY"]
def key_iterator(key):
"""
Iterator for line by line, for going through the whole contents of a key
"""
unfinished_line = ""
for byte in key:
byte = unfinished_line + byte
lines = byte.split("\n")
unfinished_line = lines.pop()
for line in lines:
yield line
def main(args):
"""
Main method
Rolling like it's 2006
"""
conn = boto.connect_s3(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
bucket = conn.get_bucket("tweettrack")
count = 0
keys = bucket.list("Twitterrank_Full_Output/graph")
for f in keys:
print f
f_iter = key_iterator(f)
for line in f_iter:
count += 1
if line.find("score") == -1:
print "fuck this shit, we fucked up"
print line
sys.exit(0)
if count % 50000 == 0:
print "count is: %d" % (count,)
conn.close()
if __name__ == "__main__":
main(sys.argv)
| jvictor0/TweetTracker | src/TwitterRank/CheckGraph.py | Python | mit | 1,247 | 0.00401 |
#import libraries
import tensorflow as tf
import numpy as np
def get_data():
#data is from the computer hardware dataset found on the UCI ML repository
with open('data.txt', 'r') as fin:
text_in = fin.read()
split = text_in.splitlines()
data = []
for line in split:
data.append(line.split(','))
np_data = np.array(data)
x = np_data[:, 2:8].astype('f4')
y = np_data[:, 8].astype('f4')
#normalize features of x
x_mean = np.mean(x, 0)
x_std = np.std(x, 0)
x = (x - x_mean) / x_std
return x, y
def tf_summary():
if tf.gfile.Exists("summary"):
tf.gfile.DeleteRecursively("summary")
tf.summary.scalar('cost', cost)
tf.summary.histogram('weights', w)
tf.summary.histogram('bias', b)
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("summary")
writer.add_graph(sess.graph)
return summary, writer
#get data
x_data, y_data = get_data()
n_examples = np.shape(x_data)[0]
n_features = np.shape(x_data)[1]
x_data = np.transpose(x_data)
y_data = np.reshape(y_data, [1, n_examples])
############################## YOUR CODE HERE #####################################
''' Replace all the quotes/variables in quotes with the correct code '''
#declare graph
#1: declare placeholders x and y (to hold data)
x = 'x'
y = 'y'
#2: declare variables w (weights) and b (bias)
w = 'w'
b = 'b'
#3: declare operations and output (multiplication)
h = 'h'
#declare cost function
cost = 'cost'
#declare optimizer and learning rate
learning_rate = 'learning rate'
optimizer = 'optimizer'
#run graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#tensorboard stuff
summary, writer = tf_summary()
#train model
iterations = 'iterations'
for i in range(iterations):
#fill in var1, 2, 3 with the correct code
sess.run('var1', feed_dict={x: 'var2', y: 'var3'})
#this is for logging the results to tensorboard so you can visualize them (i % 10 == 0 says to log the result every 10 iterations)
if i % 10 == 0:
writer.add_summary(sess.run(summary, feed_dict={x: 'var2', y: 'var3'}))
| SimpleGeometry/bisv-ml | tensorflow-2/tf_linreg.py | Python | mit | 2,049 | 0.028306 |
"""
Custom Authenticator to use MediaWiki OAuth with JupyterHub
Requires `mwoauth` package.
"""
import json
import os
from asyncio import wrap_future
from concurrent.futures import ThreadPoolExecutor
from jupyterhub.handlers import BaseHandler
from jupyterhub.utils import url_path_join
from mwoauth import ConsumerToken
from mwoauth import Handshaker
from mwoauth.tokens import RequestToken
from traitlets import Any
from traitlets import Integer
from traitlets import Unicode
from oauthenticator import OAuthCallbackHandler
from oauthenticator import OAuthenticator
# Name of cookie used to pass auth token between the oauth
# login and authentication phase
AUTH_REQUEST_COOKIE_NAME = 'mw_oauth_request_token_v2'
# Helpers to jsonify/de-jsonify request_token
# It is a named tuple with bytestrings, json.dumps balks
def jsonify(request_token):
return json.dumps(
[
request_token.key,
request_token.secret,
]
)
def dejsonify(js):
key, secret = json.loads(js)
return RequestToken(key, secret)
class MWLoginHandler(BaseHandler):
async def get(self):
consumer_token = ConsumerToken(
self.authenticator.client_id,
self.authenticator.client_secret,
)
handshaker = Handshaker(self.authenticator.mw_index_url, consumer_token)
redirect, request_token = await wrap_future(
self.authenticator.executor.submit(handshaker.initiate)
)
self.set_secure_cookie(
AUTH_REQUEST_COOKIE_NAME,
jsonify(request_token),
expires_days=1,
path=url_path_join(self.base_url, 'hub', 'oauth_callback'),
httponly=True,
)
self.log.info('oauth redirect: %r', redirect)
self.redirect(redirect)
class MWCallbackHandler(OAuthCallbackHandler):
"""
Override OAuthCallbackHandler to take out state parameter handling.
mwoauth doesn't seem to support it for now!
"""
def check_arguments(self):
pass
def get_state_url(self):
return None
class MWOAuthenticator(OAuthenticator):
login_service = 'MediaWiki'
login_handler = MWLoginHandler
callback_handler = MWCallbackHandler
mw_index_url = Unicode(
os.environ.get('MW_INDEX_URL', 'https://meta.wikimedia.org/w/index.php'),
config=True,
help='Full path to index.php of the MW instance to use to log in',
)
executor_threads = Integer(
12,
help="""Number of executor threads.
MediaWiki OAuth requests happen in this thread,
so it is mostly waiting for network replies.
""",
config=True,
)
executor = Any()
def normalize_username(self, username):
"""
Override normalize_username to avoid lowercasing usernames
"""
return username
def _executor_default(self):
return ThreadPoolExecutor(self.executor_threads)
async def authenticate(self, handler, data=None):
consumer_token = ConsumerToken(
self.client_id,
self.client_secret,
)
handshaker = Handshaker(self.mw_index_url, consumer_token)
request_token = dejsonify(handler.get_secure_cookie(AUTH_REQUEST_COOKIE_NAME))
handler.clear_cookie(AUTH_REQUEST_COOKIE_NAME)
access_token = await wrap_future(
self.executor.submit(
handshaker.complete, request_token, handler.request.query
)
)
identity = await wrap_future(
self.executor.submit(handshaker.identify, access_token)
)
if identity and 'username' in identity:
# this shouldn't be necessary anymore,
# but keep for backward-compatibility
return {
'name': identity['username'].replace(' ', '_'),
'auth_state': {
'ACCESS_TOKEN_KEY': access_token.key,
'ACCESS_TOKEN_SECRET': access_token.secret,
'MEDIAWIKI_USER_IDENTITY': identity,
},
}
else:
self.log.error("No username found in %s", identity)
| jupyterhub/oauthenticator | oauthenticator/mediawiki.py | Python | bsd-3-clause | 4,178 | 0.000957 |
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from __future__ import absolute_import
from __future__ import division
import toyplot.cairo.png
def render(canvas, fobj=None, width=None, height=None, scale=None):
"""Render the PNG bitmap representation of a canvas.
By default, canvas dimensions in CSS pixels are mapped directly to pixels in
the output PNG image. Use one of `width`, `height`, or `scale` to override
this behavior.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
Canvas to be rendered.
fobj: file-like object or string, optional
The file to write. Use a string filepath to write data directly to disk.
If `None` (the default), the PNG data will be returned to the caller
instead.
width: number, optional
Specify the width of the output image in pixels.
height: number, optional
Specify the height of the output image in pixels.
scale: number, optional
Ratio of output image pixels to `canvas` pixels.
Returns
-------
png: PNG image data, or `None`
PNG representation of `canvas`, or `None` if the caller specifies the
`fobj` parameter.
Notes
-----
The output PNG is currently rendered using
:func:`toyplot.cairo.png.render()`. This may change in the future.
"""
return toyplot.cairo.png.render(canvas, fobj, width, height, scale)
def render_frames(canvas, width=None, height=None, scale=None):
"""Render a canvas as a sequence of PNG images.
By default, canvas dimensions in CSS pixels are mapped directly to pixels in
the output PNG images. Use one of `width`, `height`, or `scale` to override
this behavior.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
Canvas to be rendered.
width: number, optional
Specify the width of the output image in pixels.
height: number, optional
Specify the height of the output image in pixels.
scale: number, optional
Ratio of output image pixels to `canvas` pixels.
Returns
-------
frames: Python generator expression that returns each PNG image in the sequence.
The caller must iterate over the returned frames and is responsible for all
subsequent processing, including disk I/O, video compression, etc.
Notes
-----
The output PNG images are currently rendered using
:func:`toyplot.cairo.png.render_frames()`. This may change in the future.
Examples
--------
>>> for frame, png in enumerate(toyplot.cairo.render_png_frames(canvas)):
... open("frame-%s.png" % frame, "wb").write(png)
"""
return toyplot.cairo.png.render_frames(canvas, width, height, scale)
| cmorgan/toyplot | toyplot/png.py | Python | bsd-3-clause | 2,849 | 0.002457 |
"""
Test that the debugger handles loops in std::list (which can appear as a result of e.g. memory
corruption).
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class LibcxxListDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@add_test_categories(["libc++"])
@expectedFailureAndroid(bugnumber="llvm.org/pr32592")
def test_with_run_command(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target and target.IsValid(), "Target is valid")
file_spec = lldb.SBFileSpec("main.cpp", False)
breakpoint1 = target.BreakpointCreateBySourceRegex(
'// Set break point at this line.', file_spec)
self.assertTrue(breakpoint1 and breakpoint1.IsValid())
breakpoint2 = target.BreakpointCreateBySourceRegex(
'// Set second break point at this line.', file_spec)
self.assertTrue(breakpoint2 and breakpoint2.IsValid())
# Run the program, it should stop at breakpoint 1.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process and process.IsValid(), PROCESS_IS_VALID)
self.assertEqual(
len(lldbutil.get_threads_stopped_at_breakpoint(process, breakpoint1)), 1)
# verify our list is displayed correctly
self.expect(
"frame variable *numbers_list",
substrs=[
'[0] = 1',
'[1] = 2',
'[2] = 3',
'[3] = 4',
'[5] = 6'])
# Continue to breakpoint 2.
process.Continue()
self.assertTrue(process and process.IsValid(), PROCESS_IS_VALID)
self.assertEqual(
len(lldbutil.get_threads_stopped_at_breakpoint(process, breakpoint2)), 1)
# The list is now inconsistent. However, we should be able to get the first three
# elements at least (and most importantly, not crash).
self.expect(
"frame variable *numbers_list",
substrs=[
'[0] = 1',
'[1] = 2',
'[2] = 3'])
# Run to completion.
process.Continue()
self.assertEqual(process.GetState(), lldb.eStateExited, PROCESS_EXITED)
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx/list/loop/TestDataFormatterLibcxxListLoop.py | Python | bsd-3-clause | 2,446 | 0.002044 |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Red Hat, Inc.
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
'''
A Wiki Client
This interface is *deprecated*. Please use resources recommended by upstream
instead: https://www.mediawiki.org/wiki/API:Client_code#Python
.. moduleauthor:: Luke Macken <lmacken@redhat.com>
.. moduleauthor:: Toshio Kuratomi <tkuratom@redhat.com>
.. moduleauthor:: Ian Weller <ian@ianweller.org>
'''
from __future__ import print_function
from datetime import datetime, timedelta
import time
import warnings
from kitchen.text.converters import to_bytes
from fedora.client import BaseClient, AuthError
from fedora import _
MEDIAWIKI_DATEFORMAT = "%Y-%m-%dT%H:%M:%SZ"
class Wiki(BaseClient):
api_high_limits = False
def __init__(self, base_url='https://fedoraproject.org/w/',
*args, **kwargs):
super(Wiki, self).__init__(base_url, *args, **kwargs)
warnings.warn(
"The Wiki client is deprecated. Please use resources "
"recommended by upstream instead: https://www.mediawiki."
"org/wiki/API:Client_code#Python")
def get_recent_changes(self, now, then, limit=500):
""" Get recent wiki changes from `now` until `then` """
data = self.send_request(
'api.php', req_params={
'list': 'recentchanges',
'action': 'query',
'format': 'json',
'rcprop': 'user|title',
'rcend': then.isoformat().split('.')[0] + 'Z',
'rclimit': limit,
})
if 'error' in data:
raise Exception(data['error']['info'])
return data['query']['recentchanges']
def login(self, username, password):
data = self.send_request('api.php', req_params={
'action': 'login',
'format': 'json',
'lgname': username,
'lgpassword': password,
})
if 'lgtoken' not in data.get('login', {}):
raise AuthError(
'Login failed: %(data)s' % {
'data': to_bytes(data)
})
#self.session_id = data['login']['lgtoken']
#self.username = data['login']['lgusername']
self.check_api_limits()
return data
def check_api_limits(self):
""" Checks whether you have the 'apihighlimits' right or not. """
data = self.send_request('api.php', req_params={
'action': 'query',
'meta': 'userinfo',
'uiprop': 'rights',
'format': 'json',
})
self.api_high_limits = "apihighlimits" in \
data['query']['userinfo']['rights']
return self.api_high_limits
def print_recent_changes(self, days=7, show=10):
now = datetime.utcnow()
then = now - timedelta(days=days)
print(_(u"From %(then)s to %(now)s") % {'then': then, 'now': now})
changes = self.get_recent_changes(now=now, then=then)
num_changes = len(changes)
print(_(u"%d wiki changes in the past week") % num_changes)
if num_changes == 500:
print(_(
u"""Warning: Number of changes reaches the API return limit.
You will not get the complete list of changes unless
you run this script using a 'bot' account."""))
users = {}
pages = {}
for change in changes:
users.setdefault(change['user'], []).append(change['title'])
pages[change['title']] = pages.setdefault(change['title'], 0) + 1
print(_(u'\n== Most active wiki users =='))
for user, changes in sorted(users.items(),
cmp=lambda x, y: cmp(len(x[1]), len(y[1])),
reverse=True)[:show]:
print(u' %-50s %d' % (('%s' % user).ljust(50, '.'),
len(changes)))
print(_(u'\n== Most edited pages =='))
for page, num in sorted(pages.items(),
cmp=lambda x, y: cmp(x[1], y[1]),
reverse=True)[:show]:
print(u' %-50s %d' % (('%s' % page).ljust(50, '.'), num))
def fetch_all_revisions(self, start=1, flags=True, timestamp=True,
user=True, size=False, comment=True, content=False,
title=True, ignore_imported_revs=True,
ignore_wikibot=False, callback=None):
"""
Fetch data for all revisions. This could take a long time. You can
start at a specific revision by modifying the 'start' keyword argument.
To ignore revisions made by "ImportUser" and "Admin" set
ignore_imported_revs to True (this is the default). To ignore edits
made by Wikibot set ignore_wikibot to True (False is the default).
Modifying the remainder of the keyword arguments will return less/more
data.
"""
# first we need to get the latest revision id
change = self.send_request(
'api.php', req_params={
'list': 'recentchanges',
'action': 'query',
'format': 'json',
'rcprop': 'ids',
'rclimit': 1,
'rctype': 'edit|new',
}
)
latest_revid = change['query']['recentchanges'][0]['revid']
# now we loop through all the revisions we want
rvprop_list = {
'flags': flags,
'timestamp': timestamp,
'user': True,
'size': size,
'comment': comment,
'content': content,
'ids': True,
}
rvprop = '|'.join([key for key in rvprop_list if rvprop_list[key]])
revs_to_get = list(range(start, latest_revid))
all_revs = {}
if self.api_high_limits:
limit = 500
else:
limit = 50
for i in range(0, len(revs_to_get), limit):
revid_list = revs_to_get[i:i+limit]
revid_str = '|'.join([str(rev) for rev in revid_list])
data = self.send_request(
'api.php', req_params={
'action': 'query',
'prop': 'revisions',
'rvprop': rvprop,
'revids': revid_str,
'format': 'json',
}
)
if 'pages' not in data['query'].keys():
continue
if 'badrevids' in data['query'].keys():
[revs_to_get.remove(i['revid']) for i in
data['query']['badrevids'].values()]
for pageid in data['query']['pages']:
page = data['query']['pages'][pageid]
for revision in page['revisions']:
if ignore_imported_revs and \
revision['user'] in ['ImportUser', 'Admin'] or \
ignore_wikibot and revision['user'] == 'Wikibot':
revs_to_get.remove(revision['revid'])
continue
this_rev = {}
if flags:
this_rev['minor'] = 'minor' in revision.keys()
if timestamp:
this_rev['time'] = time.strptime(revision['timestamp'],
MEDIAWIKI_DATEFORMAT)
if user:
this_rev['user'] = revision['user']
if size:
this_rev['size'] = revision['size']
if comment:
if 'comment' in revision.keys():
this_rev['comment'] = revision['comment']
else:
this_rev['comment'] = None
if content:
this_rev['content'] = revision['content']
if title:
this_rev['title'] = page['title']
all_revs[revision['revid']] = this_rev
if callback:
callback(all_revs, revs_to_get)
return all_revs
if __name__ == '__main__':
#from getpass import getpass
#username = raw_input('Username: ')
#password = getpass()
wiki = Wiki()
#cookie = wiki.login(username=username, password=password)
#print "login response =", cookie
#wiki = Wiki(username=username, session_id=cookie['login']['lgtoken'],
# session_name='fpo-mediawiki_en_Token')
wiki.print_recent_changes()
| vivekanand1101/python-fedora | fedora/client/wiki.py | Python | gpl-2.0 | 9,280 | 0.000862 |
"""distutils.msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: msvccompiler.py,v 1.64.2.4 2005/08/07 20:50:37 loewis Exp $"
import sys, os, string
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils import log
_can_read_reg = 0
try:
import _winreg
_can_read_reg = 1
hkey_mod = _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
except ImportError:
try:
import win32api
import win32con
_can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegError = win32api.error
except ImportError:
log.info("Warning: Can't read registry to find the "
"necessary compiler setting\n"
"Make sure that Python modules _winreg, "
"win32api or win32con are installed.")
pass
if _can_read_reg:
HKEYS = (hkey_mod.HKEY_USERS,
hkey_mod.HKEY_CURRENT_USER,
hkey_mod.HKEY_LOCAL_MACHINE,
hkey_mod.HKEY_CLASSES_ROOT)
def read_keys(base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while 1:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i = i + 1
return L
def read_values(base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while 1:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[convert_mbcs(name)] = convert_mbcs(value)
i = i + 1
return d
def convert_mbcs(s):
enc = getattr(s, "encode", None)
if enc is not None:
try:
s = enc("mbcs")
except UnicodeError:
pass
return s
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.load_macros(version)
def set_macro(self, macro, path, key):
for base in HKEYS:
d = read_values(base, path)
if d:
self.macros["$(%s)" % macro] = d[key]
break
def load_macros(self, version):
vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
net = r"Software\Microsoft\.NETFramework"
self.set_macro("FrameworkDir", net, "installroot")
try:
if version > 7.0:
try:
self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
except KeyError:
# likely using free Command-line compiler with free SDK
freeSDK = r"SOFTWARE\Microsoft\MicrosoftSDK\InstalledSDKs\63DADB24-DC99-45EB-A748-EC93AB8A7497"
# following should raise key error if not available...
self.set_macro( "FrameworkSDKDir", freeSDK, 'install dir' )
else:
self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
except KeyError, exc: #
raise DistutilsPlatformError, \
("The .NET Framework SDK needs to be installed before "
"building extensions for Python.")
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = read_values(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = string.replace(s, k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = string.find(sys.version, prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
class MSVCCompiler (CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = get_build_version()
if self.__version >= 7:
self.__root = r"Software\Microsoft\VisualStudio"
self.__macros = MacroExpander(self.__version)
else:
self.__root = r"Software\Microsoft\Devstudio"
self.initialized = False
def initialize(self):
self.__paths = self.get_msvc_paths("path")
if len (self.__paths) == 0:
raise DistutilsPlatformError, \
("Python was built with version %s of Visual Studio, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed." % self.__version)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
self.set_path_env_var('lib')
self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in string.split(os.environ['path'], ';'):
self.__paths.append(p)
except KeyError:
pass
os.environ['path'] = string.join(self.__paths, ';')
self.preprocess_options = None
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
]
else:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized: self.initialize()
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn ([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError, msg
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname (src)
rc_dir = os.path.dirname (obj)
try:
# first compile .MC to .RC and .H file
self.spawn ([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn ([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError, msg
continue
else:
# how to handle this file?
raise CompileError (
"Don't know how to compile %s to %s" % \
(src, obj))
output_opt = "/Fo" + obj
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized: self.initialize()
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized: self.initialize()
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options (self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
os.path.dirname(objects[0]),
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option (self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option (self, dir):
raise DistutilsPlatformError, \
"don't know how to set runtime library search path for MSVC++"
def library_option (self, lib):
return self.library_filename (lib)
def find_library_file (self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# find_library_file ()
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in string.split(os.environ['Path'],';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
def get_msvc_paths(self, path, platform='x86'):
"""Get a list of devstudio directories (include, lib or path).
Return a list of strings. The list will be empty if unable to
access the registry or appropriate registry keys not found.
"""
if not _can_read_reg:
return []
path = path + " dirs"
if self.__version >= 7:
key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
% (self.__root, self.__version))
else:
key = (r"%s\6.0\Build System\Components\Platforms"
r"\Win32 (%s)\Directories" % (self.__root, platform))
for base in HKEYS:
d = read_values(base, key)
if d:
if self.__version >= 7:
return string.split(self.__macros.sub(d[path]), ";")
else:
return string.split(d[path], ";")
# MSVC 6 seems to create the registry entries we need only when
# the GUI is run.
if self.__version == 6:
for base in HKEYS:
if read_values(base, r"%s\6.0" % self.__root) is not None:
self.warn("It seems you have Visual Studio 6 installed, "
"but the expected registry settings are not present.\n"
"You must at least run the Visual Studio GUI once "
"so that these entries are created.")
break
if self.__version >= 7:
# well, what if we've already set the environment variables?
map = {
'library dirs': 'lib',
'path dirs': 'path',
'include dirs': 'include',
}
path = map.get( path )
if os.environ.get( path ) is not None:
return string.split(
os.environ.get( path ),
os.pathsep,
)
return []
def set_path_env_var(self, name):
"""Set environment variable 'name' to an MSVC path type value.
This is equivalent to a SET command prior to execution of spawned
commands.
"""
if name == "lib":
p = self.get_msvc_paths("library")
else:
p = self.get_msvc_paths(name)
if p:
os.environ[name] = string.join(p, ';')
| Csega/pyTsai | windows_binary_2.4/windows_compiling/msvccompiler.py | Python | lgpl-2.1 | 22,127 | 0.004384 |
import sys
from remoteserver import DirectResultRemoteServer
class SpecialErrors(object):
def continuable(self, message, traceback):
return self._special_error(message, traceback, continuable=True)
def fatal(self, message, traceback):
return self._special_error(message, traceback,
fatal='this wins', continuable=42)
def _special_error(self, message, traceback, continuable=False, fatal=False):
return {'status': 'FAIL', 'error': message, 'traceback': traceback,
'continuable': continuable, 'fatal': fatal}
if __name__ == '__main__':
DirectResultRemoteServer(SpecialErrors(), *sys.argv[1:])
| yahman72/robotframework | atest/testdata/standard_libraries/remote/specialerrors.py | Python | apache-2.0 | 689 | 0.001451 |
#!/usr/bin/env python
import threading
import Queue
import ftplib
import getopt
import os
import sys
import time
DEF_THREAD_CNT = 5
DEF_NAP_TIME = 10
class FTPExcept(Exception):
def __init__(self, v):
self.value = v
def __str__(self):
return repr(self.value)
class FTPWT(threading.Thread):
def __init__(self, q, lock, s=DEF_NAP_TIME):
threading.Thread.__init__(self)
self.queue = q
self.sleep_time = s
self.lock = lock
def run(self):
ftpsite = self.queue.get()
while True:
try:
print "Connect to site: " + ftpsite + " ================================================================"
f = ftplib.FTP(ftpsite)
f.login()
if self.lock:
self.lock.aquire()
f.retrlines('LIST')
if self.lock:
self.lock.release()
print "Listing completed ================================================================"
except Exception as e:
print >> sys.stderr, "FTPWT exception: ", e
pass
finally:
time.sleep(self.sleep_time)
self.queue.task_done()
f.quit()
def usage():
print >> sys.stderr, sys.argv[0] + ": list ftp site directory contents"
print >> sys.stderr, "usage is: " + sys.argv[0] + " -s site_list -t thread_count -n sleep_time -l [-h]"
print >> sys.stderr, "site_list can be a comma-delimited list of sites"
def process_args(argv):
sites = []
thread_count = -1
nap_time = -1
locking = False
try:
opts, args = getopt.getopt(argv, 'hs:t:l')
except getopt.GetoptError:
usage()
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(2)
elif opt == '-s':
for s in (arg.split(',')):
sites.append(s)
elif opt == '-t':
thread_count = int(arg)
elif opt == '-n':
nap_time = int(arg)
elif opt == '-l':
locking = True
return (sites, thread_count, nap_time, locking)
def queue_em(sites, locking, t=DEF_THREAD_CNT, s=DEF_NAP_TIME):
queue = Queue.Queue()
lock = None
if locking:
lock = threading.Lock()
for i in range(1, t+1):
worker = FTPWT(queue, lock, s=s)
worker.setDaemon(True)
worker.start()
print "FTPWT worker %d created" %i
for site in sites:
queue.put(site.strip())
queue.join()
if __name__ == "__main__":
(sites, thread_count, nap_time, locking) = process_args(sys.argv[1:])
try:
if len(sites) < 1:
raise FTPExcept("no sites specified")
if thread_count < 1:
thread_count = DEF_THREAD_CNT
print >> sys.stderr, "warning: no thread count (-t) specified - using default %d" %DEF_THREAD_CNT
if len(sites) < thread_count:
print >> sys.stderr, "thread count exceeds number of sites to check - using number of sites as thread count"
thread_count = len(sites)
if nap_time < 1:
print >> sys.stderr, "warning: no sleep time (-n) argument specified - using default %d" %DEF_NAP_TIME
nap_time = DEF_NAP_TIME
queue_em(sites, locking, t=thread_count, s=nap_time)
print "all threads completed"
except FTPExcept as e:
print "fatal error: ", e.value
sys.exit(3) | rereidy/SPSE | module 5 - Exploitation Techniques/5-1.py | Python | gpl-3.0 | 3,540 | 0.004237 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.sensors.gcs`."""
import warnings
from airflow.providers.google.cloud.sensors.gcs import (
GCSObjectExistenceSensor, GCSObjectsWtihPrefixExistenceSensor, GCSObjectUpdateSensor,
GCSUploadSessionCompleteSensor,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.sensors.gcs`.",
DeprecationWarning, stacklevel=2
)
class GoogleCloudStorageObjectSensor(GCSObjectExistenceSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
class GoogleCloudStorageObjectUpdatedSensor(GCSObjectUpdateSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
class GoogleCloudStoragePrefixSensor(GCSObjectsWtihPrefixExistenceSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectsWtihPrefixExistenceSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSObjectsWtihPrefixExistenceSensor`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
class GoogleCloudStorageUploadSessionCompleteSensor(GCSUploadSessionCompleteSensor):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
| spektom/incubator-airflow | airflow/contrib/sensors/gcs_sensor.py | Python | apache-2.0 | 3,286 | 0.003652 |
from collections import namedtuple
# Structure returned by DatabaseIntrospection.get_table_list()
TableInfo = namedtuple('TableInfo', ['name', 'type'])
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo', 'name type_code display_size internal_size precision scale null_ok default')
class BaseDatabaseIntrospection:
"""Encapsulate backend-specific introspection utilities."""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""
Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example.
"""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""
Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def column_name_converter(self, name):
"""
Apply a conversion to the column name for the purposes of comparison.
Use table_name_converter() by default.
"""
return self.table_name_converter(name)
def table_names(self, cursor=None, include_views=False):
"""
Return a list of names of all tables that exist in the database.
Sort the returned table list by Python's default sorting. Do NOT use
the database's ORDER BY here to avoid subtle differences in sorting
order between databases.
"""
def get_names(cursor):
return sorted(ti.name for ti in self.get_table_list(cursor)
if include_views or ti.type == 't')
if cursor is None:
with self.connection.cursor() as cursor:
return get_names(cursor)
return get_names(cursor)
def get_table_list(self, cursor):
"""
Return an unsorted list of TableInfo named tuples of all tables and
views that exist in the database.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method')
def django_table_names(self, only_existing=False, include_views=True):
"""
Return a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, include only the tables in the database.
"""
from django.apps import apps
from django.db import router
tables = set()
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
tables.add(model._meta.db_table)
tables.update(
f.m2m_db_table() for f in model._meta.local_many_to_many
if f.remote_field.through._meta.managed
)
tables = list(tables)
if only_existing:
existing_tables = self.table_names(include_views=include_views)
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"""
Return a set of all models represented by the provided list of table
names.
"""
from django.apps import apps
from django.db import router
all_models = []
for app_config in apps.get_app_configs():
all_models.extend(router.get_migratable_models(app_config, self.connection.alias))
tables = list(map(self.table_name_converter, tables))
return {
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
}
def sequence_list(self):
"""
Return a list of information about all DB sequences for all models in
all apps.
"""
from django.apps import apps
from django.db import router
sequence_list = []
with self.connection.cursor() as cursor:
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
sequence_list.extend(self.get_sequences(cursor, model._meta.db_table, model._meta.local_fields))
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.remote_field.through is None:
sequence = self.get_sequences(cursor, f.m2m_db_table())
sequence_list.extend(sequence or [{'table': f.m2m_db_table(), 'column': None}])
return sequence_list
def get_sequences(self, cursor, table_name, table_fields=()):
"""
Return a list of introspected sequences for table_name. Each sequence
is a dict: {'table': <table_name>, 'column': <column_name>}. An optional
'name' key can be added if the backend supports named sequences.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_sequences() method')
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of:
(column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method')
def get_primary_key_column(self, cursor, table_name):
"""
Return the name of the primary key column for the given table.
"""
for constraint in self.get_constraints(cursor, table_name).values():
if constraint['primary_key']:
return constraint['columns'][0]
return None
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Return a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise
* unique: True if this is a unique constraint, False otherwise
* foreign_key: (table, column) of target, or None
* check: True if check constraint, False otherwise
* index: True if index, False otherwise.
* orders: The order (ASC/DESC) defined for the columns of indexes
* type: The type of the index (btree, hash, etc.)
Some backends may return special constraint names that don't exist
if they don't name constraints of a certain type (e.g. SQLite)
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method')
| uranusjr/django | django/db/backends/base/introspection.py | Python | bsd-3-clause | 7,497 | 0.001601 |
# Copyright 2013 The Distro Tracker Developers
# See the COPYRIGHT file at the top-level directory of this distribution and
# at http://deb.li/DTAuthors
#
# This file is part of Distro Tracker. It is subject to the license terms
# in the LICENSE file found in the top-level directory of this
# distribution and at http://deb.li/DTLicense. No part of Distro Tracker,
# including this file, may be copied, modified, propagated, or distributed
# except according to the terms contained in the LICENSE file.
"""
A skeleton of all vendor-specific function that can be implemented.
"""
from __future__ import unicode_literals
def get_keyword(local_part, msg):
"""
The function should return a keyword which matches the message or ``None``
if it does not match any keyword or the vendor does not provide any custom
keyword matching.
:param local_part: The local part of the email address to which the message
was sent
:type local_part: string
:param msg: The original received package message
:type msg: :py:class:`Message <email.message.Message>`
"""
pass
def add_new_headers(received_message, package_name, keyword):
"""
The function should return a list of two-tuples (header_name, header_value)
which are extra headers that should be added to package messages before
they are forwarded to subscribers.
If no extra headers are wanted return an empty list or ``None``
:param received_message: The original received package message
:type received_message: :py:class:`email.message.Message`
:param package_name: The name of the package for which the message was
intended
:type package_name: string
:param keyword: The keyword with which the message is tagged.
:type keyword: string
"""
pass
def approve_default_message(msg):
"""
The function should return a ``Boolean`` indicating whether this message
should be forwarded to subscribers which are subscribed to default
keyword messages.
:param msg: The original received package message
:type msg: :py:class:`email.message.Message`
"""
pass
def get_pseudo_package_list():
"""
The function should return a list of pseudo-packages (their names) which
are to be considered valid pseudo-packages.
Any existing pseudo-packages which are no longer found in this list will be
"demoted" to subscription-only packages, instead of being deleted.
If there should be no update to the list, the function should return
``None``.
"""
pass
def get_package_information_site_url(package_name,
source_package=False,
repository_name=None):
"""
The function should return a URL to a package information Web page for
the given package and repository. The repository parameter is optional.
If no URL exists for the given parameters, returns ``None``.
:param package_name: The name of the package for which the URL of the
package information Web page should be given.
:type package_name: string
:param source_package: If ``True`` the function should consider the given
package a source package, otherwise it should be considered a binary
package.
:type source_package: ``Boolean``
:param repository_name: The name of the repository for which the package
information should be provided.
"""
pass
def get_developer_information_url(developer_email):
"""
The function should return a URL which displays extra information about a
developer, given his email.
The function should return ``None`` if the vendor does not provide
additional developer information or if it does not have the information for
the particular developer email.
In this case, on the package page, a <mailto> link will be provided,
instead of the additional information.
.. note::
This function can be used by other modules apart from the general panel
:param developer_email: The email of the developer for which a URL to a
site with additional information should be given.
:type developer_email: string
"""
pass
def get_external_version_information_urls(package_name):
"""
The function should return a list of external Web resources which provide
additional information about the versions of a package.
Each element of the list should be a dictionary with the keys:
- url
- description
The function should return ``None`` if the vendor does not want to provide
extra version information URLs.
:param package_name: The name of the package for which external version
information URLs should be provided.
:type package_name: string
"""
pass
def get_maintainer_extra(developer_email, package_name=None):
"""
The function should return a list of additional items that are to be
included in the general panel next to the maintainer.
Each item needs to be a dictionary itself and can contain the following
keys:
- display
- description
- url
.. note::
Only the ``display`` key is mandatory.
The function should return ``None`` if the vendor does not wish to include
any extra items.
:param developer_email: The email of the maintainer for which extra
information is requested.
:param package_name: The name of the package where the contributor is the
maintainer and for which extra information should be provided.
This parameter is included in case vendors want to provide different
information based on the package page where the information will be
displayed.
"""
pass
def get_uploader_extra(developer_email, package_name=None):
"""
The function should return a list of additional items that are to be
included in the general panel next to an uploader.
Each item needs to be a dictionary itself and can contain the following
keys:
- display
- description
- url
.. note::
Only the ``display`` key is mandatory.
The function should return ``None`` if the vendor does not wish to include
any extra items.
:param developer_email: The email of the uploader for which extra
information is requested.
:param package_name: The name of the package where the contributor is an
uploader and for which extra information should be provided.
This parameter is included in case vendors want to provide different
information based on the package page where the information will be
displayed.
"""
pass
def allow_package(stanza):
"""
The function provides a way for vendors to exclude some packages from being
saved in the database.
:param stanza: The raw package entry from a ``Sources`` file.
:type stanza: case-insensitive dict
"""
pass
def get_bug_tracker_url(package_name, package_type, category_name):
"""
The function provides a way for vendors to give a URL to a bug tracker
based on a package name, its type and the bug category name.
This function is used by :class:`BugsPanel <pts.core.panels.BugsPanel>` to
include a link to the bug tracking site on top of the known bug statistics.
:param package_name: The name of the package for which the bug tracker URL
should be provided.
:param package_type: The type of the package for which the bug tracker URL
should be provided. It is one of: ``source``, ``pseudo`` or ``binary``.
:param category_name: The name of the bug tracker category for which the
URL should be provided.
:returns: The bug tracker URL for the package and given category.
:rtype: string or ``None`` if the vendor does not have a bug tracker URL
for the given parameters.
"""
pass
def get_bug_panel_stats(package_name):
"""
The function provides a way for vendors to customize the bug categories
displayed in the :class:`BugsPanel <pts.core.panels.BugsPanel>`.
This is useful if the vendor does not want to have all categories which are
stored in the :class:`PackageBugStats <pts.core.models.PackageBugStats>`
displayed on the package page.
In this case the return value must be a list of dicts where each element
describes a single bug category for the given package.
Each dict has to provide at minimum the following keys:
- ``category_name`` - the name of the bug category
- ``bug_count`` - the number of known bugs for the given package and category
Optionally, the following keys can be provided:
- ``display_name`` - a name for the bug category which is displayed in the
list. If this is not provided, the ``category_name`` is used instead.
- ``description`` - text further explaining the category which shows up in a
tooltip when mousing over the display name.
Another use case is when the vendor provides a custom
:data:`PTS_BUGS_PANEL_TEMPLATE <pts.project.local_settings.PTS_BUGS_PANEL_TEMPLATE>`
in which case the return value is passed to the template in the
``panel.context`` context variable and does not need to follow any special
format.
"""
pass
def get_binary_package_bug_stats(binary_name):
"""
The function provides a way for vendors to provide customized bug stats
for binary packages.
This function is used by the
:class:`BinariesInformationPanel <pts.core.panels.BinariesInformationPanel>`
to display the bug information next to the binary name.
It should return a list of dicts where each element describes a single bug
category for the given package.
Each dict has to provide at minimum the following keys:
- ``category_name`` - the name of the bug category
- ``bug_count`` - the number of known bugs for the given package and category
Optionally, the following keys can be provided:
- ``display_name`` - a name for the bug category. It is used by the
:class:`BinariesInformationPanel <pts.core.panels.BinariesInformationPanel>`
to display a tooltip when mousing over the bug count number.
"""
pass
def create_news_from_email_message(message):
"""
The function provides a way for vendors to customize the news created from
received emails.
The function should create a :class:`pts.core.models.News` model instance
for any news items it wishes to generate out of the received email message.
The content type of the created :class:`News <pts.core.models.News>` does
not have to be ``message/rfc822`` if the created news is only based on
information found in the message. It should be set to ``message/rfc822`` if
the content of the news is set to the content of the email message to make
sure it is rendered appropriately.
The function :func:`pts.mail.mail_news.process.create_news` can be used to
create simple news from the message after determining that it should in
fact be created.
The function should return a list of created :class:`News <pts.core.models.News>`
instances or ``None`` if it did not create any.
"""
pass
def get_extra_versions(package):
"""
The function provides additional versions which should be displayed in the
versions panel.
Each version to be displayed should be a dict with the following keys:
- version
- repository_shorthand
- version_link - optional
- repository_link - optional
The return value should be a list of such versions or ``None`` if the vendor
does not wish to provide any additional versions.
:param package: The package for which additional versions should be
provided.
:type package: :class:`PackageName <pts.core.models.PackageName>`
"""
pass
| sa2ajj/DistroTracker | pts/vendor/skeleton/rules.py | Python | gpl-2.0 | 11,857 | 0.000675 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "QiuDaBao.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| sysuccc/QiuDaBao | manage.py | Python | gpl-2.0 | 251 | 0 |
class O(object): pass
class A(O): pass
class B(O): pass
class C(O): pass
class D(O): pass
class E(O): pass
class K1(A,B,C): pass
class K2(D,B,E): pass
class K3(D,A): pass
class Z(K1,K2,K3): pass
print K1.__mro__
print K2.__mro__
print K3.__mro__
print Z.__mro__
| ArcherSys/ArcherSys | skulpt/test/run/t242.py | Python | mit | 262 | 0.072519 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""End-to-end test for the streaming wordcount example."""
from __future__ import absolute_import
import logging
import unittest
import uuid
from builtins import range
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.examples import streaming_wordcount
from apache_beam.io.gcp.tests.pubsub_matcher import PubSubMessageMatcher
from apache_beam.runners.runner import PipelineState
from apache_beam.testing import test_utils
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
INPUT_TOPIC = 'wc_topic_input'
OUTPUT_TOPIC = 'wc_topic_output'
INPUT_SUB = 'wc_subscription_input'
OUTPUT_SUB = 'wc_subscription_output'
DEFAULT_INPUT_NUMBERS = 500
WAIT_UNTIL_FINISH_DURATION = 6 * 60 * 1000 # in milliseconds
class StreamingWordCountIT(unittest.TestCase):
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.project = self.test_pipeline.get_option('project')
self.uuid = str(uuid.uuid4())
# Set up PubSub environment.
from google.cloud import pubsub
self.pub_client = pubsub.PublisherClient()
self.input_topic = self.pub_client.create_topic(
self.pub_client.topic_path(self.project, INPUT_TOPIC + self.uuid))
self.output_topic = self.pub_client.create_topic(
self.pub_client.topic_path(self.project, OUTPUT_TOPIC + self.uuid))
self.sub_client = pubsub.SubscriberClient()
self.input_sub = self.sub_client.create_subscription(
self.sub_client.subscription_path(self.project, INPUT_SUB + self.uuid),
self.input_topic.name)
self.output_sub = self.sub_client.create_subscription(
self.sub_client.subscription_path(self.project, OUTPUT_SUB + self.uuid),
self.output_topic.name,
ack_deadline_seconds=60)
def _inject_numbers(self, topic, num_messages):
"""Inject numbers as test data to PubSub."""
logging.debug('Injecting %d numbers to topic %s', num_messages, topic.name)
for n in range(num_messages):
self.pub_client.publish(self.input_topic.name, str(n).encode('utf-8'))
def tearDown(self):
test_utils.cleanup_subscriptions(self.sub_client,
[self.input_sub, self.output_sub])
test_utils.cleanup_topics(self.pub_client,
[self.input_topic, self.output_topic])
@attr('IT')
def test_streaming_wordcount_it(self):
# Build expected dataset.
expected_msg = [('%d: 1' % num).encode('utf-8')
for num in range(DEFAULT_INPUT_NUMBERS)]
# Set extra options to the pipeline for test purpose
state_verifier = PipelineStateMatcher(PipelineState.RUNNING)
pubsub_msg_verifier = PubSubMessageMatcher(self.project,
self.output_sub.name,
expected_msg,
timeout=400)
extra_opts = {'input_subscription': self.input_sub.name,
'output_topic': self.output_topic.name,
'wait_until_finish_duration': WAIT_UNTIL_FINISH_DURATION,
'on_success_matcher': all_of(state_verifier,
pubsub_msg_verifier)}
# Generate input data and inject to PubSub.
self._inject_numbers(self.input_topic, DEFAULT_INPUT_NUMBERS)
# Get pipeline options from command argument: --test-pipeline-options,
# and start pipeline job by calling pipeline main function.
streaming_wordcount.run(
self.test_pipeline.get_full_options_as_args(**extra_opts),
save_main_session=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| RyanSkraba/beam | sdks/python/apache_beam/examples/streaming_wordcount_it_test.py | Python | apache-2.0 | 4,580 | 0.001965 |
'''
This is not part of specification
Helper class to make it work as python lib
'''
from .attribute import DbusAttr
from .interface import DbusInterface
from .method import DbusMethod
from .signal import DbusSignal
from .utils import get_mainloop, get_uri, implements, \
list_all_interface, list_interfaces, list_paths
| pat1/autoradio | autoradio/mpris2/decorator/__init__.py | Python | gpl-2.0 | 323 | 0.003096 |
"""
Activity logs.
"""
import asyncio
import datetime
import json
import websockets
from . import base
from shellish.layout import Table
class Log(base.PlexCommand):
""" Show activity log """
name = 'log'
type_map = {
'StatusNotification': 'Status',
'ProgressNotification': 'Progress'
}
@asyncio.coroutine
def notifications(self, table):
server = self.serverapi.uri.split('://', 1)[1]
notif_url = 'ws://%s/:/websockets/notifications' % server
feed = yield from websockets.connect(notif_url)
while True:
data = yield from feed.recv()
if data is None:
break
table.print(json.loads(data).get('_children'))
yield from feed.close()
def get_ts(self, obj):
return datetime.datetime.now().strftime('%I:%M:%S %p')
def get_type(self, obj):
return self.type_map[obj['_elementType']]
def get_msg(self, obj):
if 'message' in obj:
return obj['message']
return '%s: %s' % (obj['title'], obj['description'])
def run(self, args):
headers = ['Date', 'Type', 'Message']
accessors = [self.get_ts, self.get_type, self.get_msg]
table = Table(headers=headers, accessors=accessors)
evloop = asyncio.get_event_loop()
with evloop.run_until_complete(self.notifications(table)):
pass
activity = base.PlexCommand(name='activity', doc=__doc__)
activity.add_subcommand(Log, default=True)
__commands__ = [activity]
| mayfield/plexcli | plexcli/commands/activity.py | Python | mit | 1,539 | 0 |
from ImageScripter import *
from elan.functions import Get_Device_List_Simple,Diff
from elan import *
#Past_List = ['2GIG GC3', '2GIG GC3', '2GIG GC3 (3.2 firmware)', 'Ademco VISTA-128BP,250BP,FA1660C', 'Ademco VISTA-128BPT,250BPT', 'Ademco VISTA-128FBP,250FBP', 'Bosch/Radionics D7412G,D9412G', 'DSC MAXSYS', 'DSC Power Series / 5401', 'DSC Power Series / IT-100', 'ELK-M1', 'GE Concord', 'GE NetworX NX-4,6,8,8E', 'HAI Omni Series', 'Napco Gemini GEM-X255, P9600', 'Paradox Digiplex', 'Texecom Premier Elite', 'Virtual Security Controller']
#Past_List = ['2GIG GC3', '2GIG GC3 (3.2 firmware)', 'Ademco VISTA-128BP,250BP,FA1660C', 'Ademco VISTA-128BPT,250BPT', 'Ademco VISTA-128FBP,250FBP', 'Bosch/Radionics D7412G,D9412G', 'DSC MAXSYS', 'DSC Power Series / 5401', 'DSC Power Series / IT-100', 'ELK-M1', 'GE Concord', 'GE NetworX NX-4,6,8,8E', 'HAI Omni Series', 'Napco Gemini GEM-X255, P9600', 'Paradox Digiplex', 'Texecom Premier Elite', 'Vario (IP)', 'Vario (RS-232)', 'Virtual Security Controller']
#Past_List = ['2GIG GC3', '2GIG GC3', '2GIG GC3 (3.2 firmware)', 'Ademco VISTA-128BP,250BP,FA1660C', 'Ademco VISTA-128BPT,250BPT', 'Ademco VISTA-128FBP,250FBP', 'Bosch/Radionics D7412G,D9412G', 'DSC MAXSYS', 'DSC Power Series / 5401', 'DSC Power Series / IT-100', 'ELK-M1', 'GE Concord', 'GE NetworX NX-4,6,8,8E', 'HAI Omni Series', 'Napco Gemini GEM-X255, P9600', 'Paradox Digiplex', 'Texecom Premier Elite', 'Vario (IP)', 'Vario (RS-232)', 'Virtual Security Controller']
Past_List = ['2GIG GC3', '2GIG GC3 (3.2 firmware)', 'Ademco VISTA-128BP,250BP,FA1660C', 'Ademco VISTA-128BPT,250BPT', 'Ademco VISTA-128FBP,250FBP', 'Bosch/Radionics D7412G,D9412G', 'DSC MAXSYS', 'DSC Power Series / 5401', 'DSC Power Series / IT-100', 'ELK-M1', 'GE Concord', 'GE NetworX NX-4,6,8,8E', 'HAI Omni Series', 'Napco Gemini GEM-X255, P9600', 'Paradox Digiplex', 'Texecom Premier Elite', 'Vario (IP)', 'Vario (RS-232)', 'Virtual Security Controller']
One = Configurator.security
Two = Configurator.securitypanels
Current_List = Get_Device_List_Simple(One,Two)
Configurator.system.Click()
if Current_List != Past_List:
Difference = Diff(Past_List,Current_List)
error = 'List Changed\n' + str(Difference)
raise ValueError(error)
'''
from ImageScripter import *
from elan.functions import Get_Device_Count
from elan import *
One = Configurator.security
Two = Configurator.securitypanels
count = 18
newcount = Get_Device_Count_Simple(One,Two)
print("New Count is " + str(newcount))
if count != newcount:
Say('Count for ' + Two.DisplayName + " is off")
raise ValueError('Exception 11 -> Count for ' + Two.DisplayName + " is off, raising error")
else:
Say("The new count matches the old count. The test has passed")
Configurator.system.Click()
''' | kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/elan/Pools/AddRemove/1_Count_Security_Panels.py | Python | gpl-3.0 | 2,762 | 0.005069 |
#! /usr/bin/python
#Guruprasad ANanda
"""
Fetches substitutions from pairwise alignments.
"""
from galaxy import eggs
from galaxy.tools.util import maf_utilities
import bx.align.maf
import sys
import os, fileinput
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
if len(sys.argv) < 3:
stop_err("Incorrect number of arguments.")
inp_file = sys.argv[1]
out_file = sys.argv[2]
fout = open(out_file, 'w')
def fetchSubs(block):
src1 = block.components[0].src
sequence1 = block.components[0].text
start1 = block.components[0].start
end1 = block.components[0].end
len1 = int(end1)-int(start1)
len1_withgap = len(sequence1)
for seq in range (1,len(block.components)):
src2 = block.components[seq].src
sequence2 = block.components[seq].text
start2 = block.components[seq].start
end2 = block.components[seq].end
len2 = int(end2)-int(start2)
sub_begin = None
sub_end = None
begin = False
for nt in range(len1_withgap):
if sequence1[nt] not in '-#$^*?' and sequence2[nt] not in '-#$^*?': #Not a gap or masked character
if sequence1[nt].upper() != sequence2[nt].upper():
if not(begin):
sub_begin = nt
begin = True
sub_end = nt
else:
if begin:
print >>fout, "%s\t%s\t%s" %(src1,start1+sub_begin-sequence1[0:sub_begin].count('-'),start1+sub_end-sequence1[0:sub_end].count('-'))
print >>fout, "%s\t%s\t%s" %(src2,start2+sub_begin-sequence2[0:sub_begin].count('-'),start2+sub_end-sequence2[0:sub_end].count('-'))
begin = False
else:
if begin:
print >>fout, "%s\t%s\t%s" %(src1,start1+sub_begin-sequence1[0:sub_begin].count('-'),end1+sub_end-sequence1[0:sub_end].count('-'))
print >>fout, "%s\t%s\t%s" %(src2,start2+sub_begin-sequence2[0:sub_begin].count('-'),end2+sub_end-sequence2[0:sub_end].count('-'))
begin = False
ended = False
def main():
skipped = 0
not_pairwise = 0
try:
maf_reader = bx.align.maf.Reader( open(inp_file, 'r') )
except:
stop_err("Your MAF file appears to be malformed.")
print >>fout, "#Chr\tStart\tEnd"
for block in maf_reader:
if len(block.components) != 2:
not_pairwise += 1
continue
try:
fetchSubs(block)
except:
skipped += 1
if not_pairwise:
print "Skipped %d non-pairwise blocks" %(not_pairwise)
if skipped:
print "Skipped %d blocks" %(skipped)
if __name__ == "__main__":
main()
| volpino/Yeps-EURAC | tools/regVariation/substitutions.py | Python | mit | 2,849 | 0.015444 |
__version__ = '0.1.2'
| dashee87/cluster-flag | clusterflag/__init__.py | Python | mit | 23 | 0 |
from django.shortcuts import render
from django.http import HttpResponse
import json
def services(request):
return render(request, 'services/services.html', {})
| Adventure-Inc/chachas-adventures | services/views.py | Python | apache-2.0 | 167 | 0 |
#!/usr/bin/env python
#
# Generate an input mesh
#
from boututils import DataFile # Wrapper around NetCDF4 libraries
nx = 5 # Minimum is 5: 2 boundary, one evolved
ny = 32 # Minimum 5. Should be divisible by number of processors (so powers of 2 nice)
dy = 1. # distance between points in y, in m/g22/lengthunit
ixseps1 = -1
ixseps2 = -1
f = DataFile()
f.open("test-staggered.nc", create=True)
f.write("nx", nx)
f.write("ny", ny)
f.write("dy", dy)
f.write("ixseps1", ixseps1)
f.write("ixseps2", ixseps2)
f.close()
| bendudson/BOUT | examples/test-staggered/generate.py | Python | gpl-3.0 | 522 | 0.005747 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import math
import itertools
import mxnet as mx
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, retry
import numpy as np
import random as rnd
from common import setup_module, with_seed, random_seed, teardown
import scipy.stats as ss
import unittest
from mxnet.test_utils import *
def same(a, b):
return np.sum(a != b) == 0
def check_with_device(device, dtype):
# The thresholds chosen for the tests are too loose. We will rely on the other tests to test the samples from the
# generators.
tol = 0.1
symbols = [
{
'name': 'normal',
'symbol': mx.sym.random.normal,
'ndop': mx.nd.random.normal,
'pdfsymbol': mx.sym.random_pdf_normal,
'pdffunc': ss.norm.pdf,
'discrete': False,
'params': { 'loc': 10.0, 'scale': 0.5 },
'inputs': [ ('loc',[ [ 0.0, 2.5 ], [ -9.75, -7.0 ] ]) , ('scale',[ [ 1.0, 3.7 ], [ 4.2, 1.5 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64) - params['loc']), tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
]
},
{
'name': 'normal_like',
'symbol': mx.sym.random.normal_like,
'ndop': mx.nd.random.normal_like,
'params': { 'loc': 10.0, 'scale': 0.5 },
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64) - params['loc']), tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
]
},
{
'name': 'randn',
'symbol': mx.sym.random.randn,
'ndop': mx.nd.random.randn,
'params': { 'loc': 10.0, 'scale': 0.5 },
'inputs': [ ('loc',[ [ 0.0, 2.5 ], [ -9.75, -7.0 ] ]) , ('scale',[ [ 1.0, 3.7 ], [ 4.2, 1.5 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64) - params['loc']), tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
]
},
{
'name': 'uniform',
'symbol': mx.sym.random.uniform,
'ndop': mx.nd.random.uniform,
'pdfsymbol': mx.sym.random_pdf_uniform,
'pdffunc': lambda x, low, high: ss.uniform.pdf(x, low, high-low),
'discrete': False,
'params': { 'low': -1.5, 'high': 3.0 },
'inputs': [ ('low', [ [ 0.0, 2.5 ], [ -9.75, -1.0 ] ]) , ('high', [ [ 1.0, 3.7 ], [ 4.2, 10.5 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - (params['low'] + params['high']) / 2.0, tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(1.0 / 12.0) * (params['high'] - params['low']), tol)
]
},
{
'name': 'uniform_like',
'symbol': mx.sym.random.uniform_like,
'ndop': mx.nd.random.uniform_like,
'params': { 'low': -1.5, 'high': 3.0 },
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - (params['low'] + params['high']) / 2.0, tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(1.0 / 12.0) * (params['high'] - params['low']), tol)
]
},
{
'name': 'gamma',
'symbol': mx.sym.random.gamma,
'ndop': mx.nd.random.gamma,
'pdfsymbol': mx.sym.random_pdf_gamma,
'pdffunc': lambda x, alpha, beta: ss.gamma.pdf(x, alpha, 0, 1/beta),
'discrete': False,
'params': { 'alpha': 9.0, 'beta': 0.5 },
'inputs': [ ('alpha', [ [ 0.1, 2.5 ], [ 9.75, 11.0 ] ]) , ('beta', [ [ 1.0, 0.7 ], [ 0.5, 0.3 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['alpha'] * params['beta'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['alpha'] * params['beta'] ** 2), tol)
]
},
{
'name': 'gamma_like',
'symbol': mx.sym.random.gamma_like,
'ndop': mx.nd.random.gamma_like,
'params': { 'alpha': 9.0, 'beta': 0.5 },
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['alpha'] * params['beta'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['alpha'] * params['beta'] ** 2), tol)
]
},
{
'name': 'exponential',
'symbol': mx.sym.random.exponential,
'ndop': mx.nd.random.exponential,
'pdfsymbol': mx.sym.random_pdf_exponential,
'pdffunc': lambda x, lam: ss.expon.pdf(x, 0, 1/lam),
'discrete': False,
'params': { 'scale': 1.0/4.0 },
'inputs': [ ('scale', [ [ 1.0/1.0, 1.0/8.5 ], [ 1.0/2.7 , 1.0/0.5 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['scale'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
]
},
{
'name': 'exponential_like',
'symbol': mx.sym.random.exponential_like,
'ndop': mx.nd.random.exponential_like,
'params': { 'lam': 4.0 },
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - 1.0/params['lam'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - 1.0/params['lam'], tol)
]
},
{
'name': 'poisson',
'symbol': mx.sym.random.poisson,
'ndop': mx.nd.random.poisson,
'pdfsymbol': mx.sym.random_pdf_poisson,
'pdffunc': ss.poisson.pmf,
'discrete': True,
'params': { 'lam': 4.0 },
'inputs': [ ('lam', [ [ 25.0, 8.5 ], [ 2.7 , 0.5 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['lam'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['lam']), tol)
]
},
{
'name': 'poisson_like',
'symbol': mx.sym.random.poisson_like,
'ndop': mx.nd.random.poisson_like,
'params': { 'lam': 4.0 },
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['lam'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['lam']), tol)
]
},
{
'name': 'neg_binomial',
'symbol': mx.sym.random.negative_binomial,
'ndop': mx.nd.random.negative_binomial,
'pdfsymbol': mx.sym.random_pdf_negative_binomial,
'pdffunc': ss.nbinom.pmf,
'discrete': True,
'params': { 'k': 3, 'p': 0.4 },
'inputs': [ ('k', [ [ 3, 4 ], [ 5 , 6 ] ]) , ('p', [ [ 0.4 , 0.77 ], [ 0.5, 0.84 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['k'] * (1.0 - params['p']) / params['p'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['k'] * (1.0 - params['p']))/params['p'], tol)
]
},
{
'name': 'neg_binomial_like',
'symbol': mx.sym.random.negative_binomial_like,
'ndop': mx.nd.random.negative_binomial_like,
'params': { 'k': 3, 'p': 0.4 },
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['k'] * (1.0 - params['p']) / params['p'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['k'] * (1.0 - params['p']))/params['p'], tol)
]
},
{
'name': 'gen_neg_binomial',
'symbol': mx.sym.random.generalized_negative_binomial,
'ndop': mx.nd.random.generalized_negative_binomial,
'pdfsymbol': mx.sym.random_pdf_generalized_negative_binomial,
'pdffunc': lambda x, mu, alpha: ss.nbinom.pmf(x, 1.0/alpha, 1.0/(mu*alpha+1.0)),
'discrete': True,
'params': { 'mu': 2.0, 'alpha': 0.3 },
'inputs': [ ('mu', [ [ 2.0, 2.5 ], [ 1.3, 1.9 ] ]) , ('alpha', [ [ 1.0, 0.1 ], [ 0.2, 0.5 ] ]) ],
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['mu'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['mu'] + params['alpha'] * params['mu'] ** 2 ), tol)
]
},
{
'name': 'gen_neg_binomial_like',
'symbol': mx.sym.random.generalized_negative_binomial_like,
'ndop': mx.nd.random.generalized_negative_binomial_like,
'params': { 'mu': 2.0, 'alpha': 0.3 },
'checks': [
('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['mu'], tol),
('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['mu'] + params['alpha'] * params['mu'] ** 2 ), tol)
]
},
]
# Create enough samples such that we get a meaningful distribution.
shape = (500, 500)
# Test pdf on smaller shapes as backward checks will take too long otherwise.
# This must be a subshape of the former one.
pdfshape = (30, 30)
for symbdic in symbols:
name = symbdic['name']
ndop = symbdic['ndop']
# check directly
params = symbdic['params'].copy()
params.update(shape=shape, dtype=dtype, ctx=device)
args = ()
if name == 'randn':
params.pop('shape') # randn does not accept shape param
args = shape
if name.endswith('_like'):
params['data'] = mx.nd.ones(params.pop('shape'),
dtype=params.pop('dtype'),
ctx=params.pop('ctx'))
mx.random.seed(128)
ret1 = ndop(*args, **params).asnumpy()
mx.random.seed(128)
ret2 = ndop(*args, **params).asnumpy()
assert same(ret1, ret2), \
"ndarray test: `%s` should give the same result with the same seed" % name
for check_name, check_func, tol in symbdic['checks']:
assert np.abs(check_func(ret1, params)) < tol, "ndarray test: %s check for `%s` did not pass" % (check_name, name)
# check multi-distribution sampling
if 'inputs' not in symbdic: continue # randn does not support multi-distribution sampling
params = {'shape': shape, 'dtype': dtype, 'ctx': device}
params.update({k : mx.nd.array(v, ctx=device, dtype=dtype) for k, v in symbdic['inputs']})
if name == 'randn':
params.pop('shape') # randn does not accept shape param
args = shape
mx.random.seed(128)
ret1 = ndop(*args, **params).asnumpy()
mx.random.seed(128)
ret2 = ndop(*args, **params).asnumpy()
assert same(ret1, ret2), \
"ndarray test: `%s` should give the same result with the same seed" % name
for i in range(2):
for j in range(2):
stats = {k : v[i][j] for k, v in symbdic['inputs']}
for check_name, check_func, tol in symbdic['checks']:
err = np.abs(check_func(ret2[i,j], stats))
assert err < tol, "%f vs %f: symbolic test: %s check for `%s` did not pass" % (err, tol, check_name, name)
# check symbolic
symbol = symbdic['symbol']
X = mx.sym.Variable("X")
params = symbdic['params'].copy()
params.update(shape=shape, dtype=dtype)
if name.endswith('_like') or name == 'randn':
params['data'] = mx.sym.ones(params.pop('shape'))
Y = symbol(**params) + X
x = mx.nd.zeros(shape, dtype=dtype, ctx=device)
xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=device)
yexec = Y.bind(device, {'X' : x}, {'X': xgrad})
mx.random.seed(128)
yexec.forward(is_train=True)
yexec.backward(yexec.outputs[0])
un1 = (yexec.outputs[0] - x).copyto(device)
assert same(xgrad.asnumpy(), un1.asnumpy())
mx.random.seed(128)
yexec.forward()
un2 = (yexec.outputs[0] - x).copyto(device)
assert same(un1.asnumpy(), un2.asnumpy()), \
"symbolic test: `%s` should give the same result with the same seed" % name
ret1 = un1.asnumpy()
for check_name, check_func, tol in symbdic['checks']:
assert np.abs(check_func(ret1, params)) < tol, "symbolic test: %s check for `%s` did not pass" % (check_name, name)
if name.endswith('_like'): continue
# check multi-distribution sampling
symbol = symbdic['symbol']
params = { 'shape' : shape, 'dtype' : dtype }
single_param = len(symbdic['inputs']) == 1
v1 = mx.sym.Variable('v1')
v2 = mx.sym.Variable('v2')
if name == 'randn':
params.pop('shape') # randn does not accept shape param
args=shape
Y = symbol(v1, **params) if single_param else symbol(*args, loc=v1, scale=v2,**params)
else:
Y = symbol(v1,**params) if single_param else symbol(v1,v2,**params)
bindings = { 'v1' : mx.nd.array(symbdic['inputs'][0][1]) }
if not single_param :
bindings.update({ 'v2' : mx.nd.array(symbdic['inputs'][1][1]) })
yexec = Y.bind(ctx=device, args=bindings)
yexec.forward()
un1 = yexec.outputs[0].copyto(device).asnumpy()
params = {}
for i, r in enumerate(symbdic['inputs'][0][1]):
for j, p1 in enumerate(r):
params.update({ symbdic['inputs'][0][0] : p1 })
if not single_param:
params.update({ symbdic['inputs'][1][0] : symbdic['inputs'][1][1][i][j] })
samples = un1[i,j]
for check_name, check_func, tol in symbdic['checks']:
assert np.abs(check_func(samples, params)) < tol, "symbolic test: %s check for `%s` did not pass" % (check_name, name)
if 'pdfsymbol' not in symbdic: continue # randn not tested for pdf
# check pdfs with only a subset of the generated samples
un1 = np.resize(un1, (un1.shape[0], un1.shape[1], pdfshape[0], pdfshape[1]))
symbol = symbdic['pdfsymbol']
pdffunc = symbdic['pdffunc']
v0 = mx.sym.Variable('v0')
v1 = mx.sym.Variable('v1')
v2 = mx.sym.Variable('v2')
p1 = np.array(symbdic['inputs'][0][1])
p2 = None if single_param else np.array(symbdic['inputs'][1][1])
# Move samples away from boundaries of support
if name == 'gamma' or name == 'exponential':
un1 = np.maximum(un1, 1e-1)
if name == 'uniform':
un1 = np.minimum(np.maximum(un1.reshape((un1.shape[0],un1.shape[1],-1)), p1.reshape((p1.shape[0],p1.shape[1],-1))+1e-4),
p2.reshape((p2.shape[0],p2.shape[1],-1))-1e-4).reshape(un1.shape)
for use_log in [False, True]:
test_pdf = symbol(v0, v1, is_log=use_log) if single_param else symbol(v0, v1, v2, is_log=use_log)
forw_atol = 1e-7 if dtype != np.float16 else 1e-3
forw_rtol = 1e-4 if dtype != np.float16 else 5e-2
backw_atol = 1e-3
backw_rtol = 5e-2
if single_param:
res = pdffunc(un1.reshape((un1.shape[0],un1.shape[1],-1)),
p1.reshape((p1.shape[0],p1.shape[1],-1))).reshape(un1.shape)
if use_log:
res = np.log(res)
check_symbolic_forward(test_pdf, [un1, p1], [res], atol=forw_atol, rtol=forw_rtol, dtype=dtype)
if dtype == np.float64:
grad_nodes = ['v1'] if symbdic['discrete'] else ['v0', 'v1']
check_numeric_gradient(test_pdf, [un1, p1], grad_nodes=grad_nodes, atol=backw_atol, rtol=backw_rtol, dtype=dtype)
else:
res = pdffunc(un1.reshape((un1.shape[0],un1.shape[1],-1)),
p1.reshape((p1.shape[0],p1.shape[1],-1)),
p2.reshape((p2.shape[0],p2.shape[1],-1))).reshape(un1.shape)
if use_log:
res = np.log(res)
check_symbolic_forward(test_pdf, [un1, p1, p2], [res], atol=forw_atol, rtol=forw_rtol, dtype=dtype)
if dtype == np.float64:
grad_nodes = ['v1', 'v2'] if symbdic['discrete'] else ['v0', 'v1', 'v2']
check_numeric_gradient(test_pdf, [un1, p1, p2], grad_nodes=grad_nodes, atol=backw_atol, rtol=backw_rtol, dtype=dtype)
@with_seed(1000)
def test_dirichlet():
num_classes = 2
num = 100
alpha = np.random.uniform(low=0.5, high=2, size=(4, num_classes))
samples = []
results = []
for a in alpha:
v = ss.dirichlet.rvs(a, size=num)
samples.append(v)
results.append(ss.dirichlet.logpdf(v.transpose(), a))
samples = np.concatenate(samples, axis=0).reshape((2, 2, num, num_classes))
results = np.concatenate(results, axis=0).reshape((2, 2, num))
alpha = alpha.reshape((2, 2, num_classes))
for dtype in [np.float32, np.float64]:
forw_atol = 1e-5
forw_rtol = 1e-4
for use_log in [False, True]:
v0 = mx.sym.Variable('v0')
v1 = mx.sym.Variable('v1')
test_pdf = mx.sym.random_pdf_dirichlet(v0, v1, is_log=use_log)
res = results if use_log else np.exp(results)
check_symbolic_forward(test_pdf, [samples, alpha], [res], atol=forw_atol, rtol=forw_rtol, dtype=dtype)
if dtype == np.float64:
backw_atol = 1e-2
backw_rtol = 1e-2
eps = 1e-5
check_numeric_gradient(test_pdf, [samples, alpha], numeric_eps=eps, atol=backw_atol, rtol=backw_rtol, dtype=dtype)
def test_random():
for dtype in [np.float16, np.float32, np.float64]:
check_with_device(mx.context.current_context(), dtype)
# Set seed variously based on `start_seed` and `num_init_seeds`, then set seed finally to `final_seed`
def set_seed_variously(init_seed, num_init_seeds, final_seed):
end_seed = init_seed + num_init_seeds
for seed in range(init_seed, end_seed):
mx.random.seed(seed)
mx.random.seed(final_seed)
return end_seed
# Tests that seed setting of std (non-parallel) rng is synchronous w.r.t. rng use before and after.
@with_seed()
def test_random_seed_setting():
ctx = mx.context.current_context()
seed_to_test = 1234
num_temp_seeds = 25
probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]
num_samples = 100000
for dtype in ['float16', 'float32', 'float64']:
seed = set_seed_variously(1, num_temp_seeds, seed_to_test)
samples1 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),
shape=num_samples)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
samples2 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),
shape=num_samples)
samples1np = samples1.asnumpy()
set_seed_variously(seed, num_temp_seeds, seed_to_test+1)
samples2np = samples2.asnumpy()
assert same(samples1np, samples2np), \
"seed-setting test: `multinomial` should give the same result with the same seed"
# Tests that seed setting of parallel rng is synchronous w.r.t. rng use before and after.
@with_seed()
def test_parallel_random_seed_setting():
ctx = mx.context.current_context()
seed_to_test = 1234
for dtype in ['float16', 'float32', 'float64']:
# Avoid excessive test cpu runtimes
num_temp_seeds = 25 if ctx.device_type == 'gpu' else 1
# To flush out a possible race condition, run multiple times
for _ in range(20):
# Create enough samples such that we get a meaningful distribution.
shape = (200, 200)
params = { 'low': -1.5, 'high': 3.0 }
params.update(shape=shape, dtype=dtype, ctx=ctx)
# check directly
seed = set_seed_variously(1, num_temp_seeds, seed_to_test)
ret1 = mx.nd.random.uniform(**params)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
ret2 = mx.nd.random.uniform(**params)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
assert same(ret1.asnumpy(), ret2.asnumpy()), \
"ndarray seed-setting test: `uniform` should give the same result with the same seed"
# check symbolic
X = mx.sym.Variable("X")
Y = mx.sym.random.uniform(**params) + X
x = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)
xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)
yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
yexec.forward(is_train=True)
yexec.backward(yexec.outputs[0])
un1 = (yexec.outputs[0] - x).copyto(ctx)
seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
yexec.forward()
set_seed_variously(seed, num_temp_seeds, seed_to_test)
un2 = (yexec.outputs[0] - x).copyto(ctx)
assert same(un1.asnumpy(), un2.asnumpy()), \
"symbolic seed-setting test: `uniform` should give the same result with the same seed"
# Set seed for the context variously based on `start_seed` and `num_init_seeds`, then set seed finally to `final_seed`
def set_seed_variously_for_context(ctx, init_seed, num_init_seeds, final_seed):
end_seed = init_seed + num_init_seeds
for seed in range(init_seed, end_seed):
mx.random.seed(seed, ctx=ctx)
mx.random.seed(final_seed, ctx=ctx)
return end_seed
# Tests that seed setting of std (non-parallel) rng for specific context is synchronous w.r.t. rng use before and after.
@with_seed()
def test_random_seed_setting_for_context():
seed_to_test = 1234
num_temp_seeds = 25
probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]
num_samples = 100000
dev_type = mx.context.current_context().device_type
for dtype in ['float16', 'float32', 'float64']:
samples_imp = []
samples_sym = []
# Collect random number samples from the generators of all devices, each seeded with the same number.
for dev_id in range(0, mx.context.num_gpus() if dev_type == 'gpu' else 1):
with mx.Context(dev_type, dev_id):
ctx = mx.context.current_context()
seed = set_seed_variously_for_context(ctx, 1, num_temp_seeds, seed_to_test)
# Check imperative. `multinomial` uses non-parallel rng.
rnds = mx.nd.random.multinomial(data=mx.nd.array(probs, dtype=dtype), shape=num_samples)
samples_imp.append(rnds.asnumpy())
# Check symbolic. `multinomial` uses non-parallel rng.
P = mx.sym.Variable("P")
X = mx.sym.random.multinomial(data=P, shape=num_samples, get_prob=False)
exe = X.bind(ctx, {"P": mx.nd.array(probs, dtype=dtype)})
set_seed_variously_for_context(ctx, seed, num_temp_seeds, seed_to_test)
exe.forward()
samples_sym.append(exe.outputs[0].asnumpy())
# The samples should be identical across different gpu devices.
for i in range(1, len(samples_imp)):
assert same(samples_imp[i - 1], samples_imp[i])
for i in range(1, len(samples_sym)):
assert same(samples_sym[i - 1], samples_sym[i])
# Tests that seed setting of parallel rng for specific context is synchronous w.r.t. rng use before and after.
@with_seed()
def test_parallel_random_seed_setting_for_context():
seed_to_test = 1234
dev_type = mx.context.current_context().device_type
for dtype in ['float16', 'float32', 'float64']:
samples_imp = []
samples_sym = []
# Collect random number samples from the generators of all devices, each seeded with the same number.
for dev_id in range(0, mx.context.num_gpus() if dev_type == 'gpu' else 1):
with mx.Context(dev_type, dev_id):
ctx = mx.context.current_context()
# Avoid excessive test cpu runtimes.
num_temp_seeds = 25 if dev_type == 'gpu' else 1
# To flush out a possible race condition, run multiple times.
for _ in range(20):
# Create enough samples such that we get a meaningful distribution.
shape = (200, 200)
params = { 'low': -1.5, 'high': 3.0 }
params.update(shape=shape, dtype=dtype)
# Check imperative. `uniform` uses parallel rng.
seed = set_seed_variously_for_context(ctx, 1, num_temp_seeds, seed_to_test)
rnds = mx.nd.random.uniform(**params)
samples_imp.append(rnds.asnumpy())
# Check symbolic. `uniform` uses parallel rng.
X = mx.sym.Variable("X")
Y = mx.sym.random.uniform(**params) + X
x = mx.nd.zeros(shape, dtype=dtype)
xgrad = mx.nd.zeros(shape, dtype=dtype)
yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})
set_seed_variously_for_context(ctx, seed, num_temp_seeds, seed_to_test)
yexec.forward(is_train=True)
yexec.backward(yexec.outputs[0])
samples_sym.append(yexec.outputs[0].asnumpy())
# The samples should be identical across different gpu devices.
for i in range(1, len(samples_imp)):
assert same(samples_imp[i - 1], samples_imp[i])
for i in range(1, len(samples_sym)):
assert same(samples_sym[i - 1], samples_sym[i])
@retry(5)
@with_seed()
def test_sample_multinomial():
for dtype in ['uint8', 'int32', 'float16', 'float32', 'float64']: # output array types
for x in [mx.nd.array([[0,1,2,3,4],[4,3,2,1,0]])/10.0, mx.nd.array([0,1,2,3,4])/10.0]:
dx = mx.nd.ones_like(x)
mx.contrib.autograd.mark_variables([x], [dx])
# Adding rtol and increasing samples needed to pass with seed 2951820647
samples = 10000
with mx.autograd.record():
y, prob = mx.nd.random.multinomial(x, shape=samples, get_prob=True, dtype=dtype)
r = prob * 5
r.backward()
assert(np.dtype(dtype) == y.dtype)
y = y.asnumpy()
x = x.asnumpy()
dx = dx.asnumpy()
if len(x.shape) is 1:
x = x.reshape((1, x.shape[0]))
dx = dx.reshape(1, dx.shape[0])
y = y.reshape((1, y.shape[0]))
prob = prob.reshape((1, prob.shape[0]))
for i in range(x.shape[0]):
freq = np.bincount(y[i,:].astype('int32'), minlength=5)/np.float32(samples)*x[i,:].sum()
mx.test_utils.assert_almost_equal(freq, x[i], rtol=0.20, atol=1e-1)
rprob = x[i][y[i].astype('int32')]/x[i].sum()
mx.test_utils.assert_almost_equal(np.log(rprob), prob.asnumpy()[i], atol=1e-5)
real_dx = np.zeros((5,))
for j in range(samples):
real_dx[int(y[i][j])] += 5.0 / rprob[j]
mx.test_utils.assert_almost_equal(real_dx, dx[i, :], rtol=1e-4, atol=1e-5)
for dtype in ['uint8', 'float16', 'float32']:
# Bound check for the output data types. 'int32' and 'float64' require large memory so are skipped.
x = mx.nd.zeros(2 ** 25) # Larger than the max integer in float32 without precision loss.
bound_check = False
try:
y = mx.nd.random.multinomial(x, dtype=dtype)
except mx.MXNetError as e:
bound_check = True
assert bound_check
# Test the generators with the chi-square testing
@with_seed()
def test_normal_generator():
ctx = mx.context.current_context()
samples = 1000000
# Default success rate is 0.25, so 2 successes of 8 trials will pass.
trials = 8
num_buckets = 5
for dtype in ['float16', 'float32', 'float64']:
for mu, sigma in [(0.0, 1.0), (1.0, 5.0)]:
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, mu, sigma), num_buckets)
# Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
buckets = np.array(buckets, dtype=dtype).tolist()
probs = [(ss.norm.cdf(buckets[i][1], mu, sigma) -
ss.norm.cdf(buckets[i][0], mu, sigma)) for i in range(num_buckets)]
generator_mx = lambda x: mx.nd.random.normal(mu, sigma, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs,
nsamples=samples, nrepeat=trials)
generator_mx_same_seed =\
lambda x: np.concatenate(
[mx.nd.random.normal(mu, sigma, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs,
nsamples=samples, nrepeat=trials)
@with_seed()
def test_uniform_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for low, high in [(-1.0, 1.0), (1.0, 3.0)]:
scale = high - low
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)
# Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
buckets = np.array(buckets, dtype=dtype).tolist()
probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(5)]
generator_mx = lambda x: mx.nd.random.uniform(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.uniform(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
@with_seed()
def test_gamma_generator():
success_rate = 0.05
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for kappa, theta in [(0.5, 1.0), (1.0, 5.0)]:
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.gamma.ppf(x, a=kappa, loc=0, scale=theta), 5)
generator_mx = lambda x: mx.nd.random.gamma(kappa, theta, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs, success_rate=success_rate)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.gamma(kappa, theta, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, success_rate=success_rate)
@with_seed()
def test_exponential_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for scale in [0.1, 1.0]:
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.expon.ppf(x, loc=0, scale=scale), 5)
generator_mx = lambda x: mx.nd.random.exponential(scale, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs, success_rate=0.20)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.exponential(scale, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, success_rate=0.20)
@with_seed()
def test_poisson_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
for lam in [1, 10]:
buckets = [(-1.0, lam - 0.5), (lam - 0.5, 2 * lam + 0.5), (2 * lam + 0.5, np.inf)]
probs = [ss.poisson.cdf(bucket[1], lam) - ss.poisson.cdf(bucket[0], lam) for bucket in buckets]
generator_mx = lambda x: mx.nd.random.poisson(lam, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.poisson(lam, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
@with_seed()
def test_negative_binomial_generator():
ctx = mx.context.current_context()
for dtype in ['float16', 'float32', 'float64']:
success_num = 2
success_prob = 0.2
buckets = [(-1.0, 2.5), (2.5, 5.5), (5.5, 8.5), (8.5, np.inf)]
probs = [ss.nbinom.cdf(bucket[1], success_num, success_prob) -
ss.nbinom.cdf(bucket[0], success_num, success_prob) for bucket in buckets]
generator_mx = lambda x: mx.nd.random.negative_binomial(success_num, success_prob,
shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.negative_binomial(success_num, success_prob, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
# Also test the Gamm-Poisson Mixture
alpha = 1.0 / success_num
mu = (1.0 - success_prob) / success_prob / alpha
generator_mx = lambda x: mx.nd.random.generalized_negative_binomial(mu, alpha,
shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.generalized_negative_binomial(mu, alpha, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
@with_seed()
def test_multinomial_generator():
# This test fails with dtype float16 if the probabilities themselves cannot be
# well-represented in float16. When the float16 random picks are assigned to buckets,
# only certain bucket-probabilities are possible. Here we map the desired probabilites
# (e.g. 0.1) to nearby float16 probabilities (e.g. 0.10009766) that are achievable.
def quantize_probs(probs, dtype):
if dtype == 'float16':
# float16 has a 10-bit fraction plus an implicit leading 1, so all probabilities
# of the form N/2^11 (where N is an integer) are representable.
num_quanta = 2048.0
quantized_probs = np.rint(np.array(probs) * num_quanta) / num_quanta
# Ensure probabilities add to 1
quantized_probs[0] += 1.0 - quantized_probs.sum()
else:
# no need to quantize probs with this data precision
quantized_probs = np.array(probs)
return quantized_probs
ctx = mx.context.current_context()
probs = [0.1, 0.2, 0.3, 0.05, 0.15, 0.2]
samples = 1000000
trials = 5
buckets = list(range(6))
for dtype in ['float16', 'float32', 'float64']:
quantized_probs = quantize_probs(probs, dtype)
generator_mx = lambda x: mx.nd.random.multinomial(data=mx.nd.array(quantized_probs, ctx=ctx, dtype=dtype),
shape=x).asnumpy()
# success_rate was set to 0.15 since PR #13498 and became flaky
# both of previous issues(#14457, #14158) failed with success_rate 0.25
# In func verify_generator inside test_utilis.py
# it raise the error when success_num(1) < nrepeat(5) * success_rate(0.25)
# by changing the 0.25 -> 0.2 solve these edge case but still have strictness
verify_generator(generator=generator_mx, buckets=buckets, probs=quantized_probs,
nsamples=samples, nrepeat=trials, success_rate=0.20)
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.multinomial(data=mx.nd.array(quantized_probs, ctx=ctx, dtype=dtype),
shape=x // 10).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=quantized_probs,
nsamples=samples, nrepeat=trials, success_rate=0.20)
@with_seed()
def test_with_random_seed():
ctx = mx.context.current_context()
size = 100
shape = (size,)
def check_same(x, y, name):
assert same(x, y), \
"%s rng should give the same result with the same seed" % name
def check_diff(x, y, name):
assert not same(x, y), \
"%s rng should give different results with different seeds" % name
# generate python, numpy and mxnet datasets with the given seed
def gen_data(seed=None):
with random_seed(seed):
python_data = [rnd.random() for _ in range(size)]
np_data = np.random.rand(size)
mx_data = mx.random.uniform(shape=shape, ctx=ctx).asnumpy()
return (seed, python_data, np_data, mx_data)
# check data, expecting them to be the same or different based on the seeds
def check_data(a, b):
seed_a = a[0]
seed_b = b[0]
if seed_a == seed_b and seed_a is not None:
check_same(a[1], b[1], 'python')
check_same(a[2], b[2], 'numpy')
check_same(a[3], b[3], 'mxnet')
else:
check_diff(a[1], b[1], 'python')
check_diff(a[2], b[2], 'numpy')
check_diff(a[3], b[3], 'mxnet')
# 5 tests that include a duplicated seed 1 and randomizing seed None
seeds = [1, 2, 1, None, None]
data = [gen_data(seed) for seed in seeds]
# Add more complicated test case scenarios
with random_seed(1):
seeds.append(None)
data.append(gen_data(None))
with random_seed(2):
seeds.append(None)
data.append(gen_data(None))
with random_seed():
seeds.append(1)
data.append(gen_data(1))
with random_seed():
seeds.append(2)
data.append(gen_data(2))
with random_seed(1):
seeds.append(2)
data.append(gen_data(2))
num_seeds = len(seeds)
for i in range(0, num_seeds-1):
for j in range(i+1, num_seeds):
check_data(data[i],data[j])
@with_seed()
def test_random_seed():
shape = (5, 5)
seed = rnd.randint(-(1 << 31), (1 << 31))
def _assert_same_mx_arrays(a, b):
assert len(a) == len(b)
for a_i, b_i in zip(a, b):
assert (a_i.asnumpy() == b_i.asnumpy()).all()
N = 100
mx.random.seed(seed)
v1 = [mx.random.uniform(shape=shape) for _ in range(N)]
mx.random.seed(seed)
v2 = [mx.random.uniform(shape=shape) for _ in range(N)]
_assert_same_mx_arrays(v1, v2)
try:
long
mx.random.seed(long(seed))
v3 = [mx.random.uniform(shape=shape) for _ in range(N)]
_assert_same_mx_arrays(v1, v3)
except NameError:
pass
@with_seed()
def test_unique_zipfian_generator():
ctx = mx.context.current_context()
if ctx.device_type == 'cpu':
num_sampled = 8192
range_max = 793472
batch_size = 4
op = mx.nd._internal._sample_unique_zipfian
classes, num_trials = op(range_max, shape=(batch_size, num_sampled))
for i in range(batch_size):
num_trial = num_trials[i].asscalar()
# test uniqueness
assert np.unique(classes[i].asnumpy()).size == num_sampled
# test num trials. reference count obtained from pytorch implementation
assert num_trial > 14500
assert num_trial < 17000
@with_seed()
def test_zipfian_generator():
# dummy true classes
num_true = 5
num_sampled = 1000
range_max = 20
def compute_expected_prob():
# P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
classes = mx.nd.arange(0, range_max)
expected_counts = ((classes + 2).log() - (classes + 1).log()) / np.log(range_max + 1)
return expected_counts
exp_cnt = compute_expected_prob() * num_sampled
# test ndarray
true_classes = mx.nd.random.uniform(0, range_max, shape=(num_true,)).astype('int32')
sampled_classes, exp_cnt_true, exp_cnt_sampled = mx.nd.contrib.rand_zipfian(true_classes, num_sampled, range_max)
mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)
mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)
# test symbol
true_classes_var = mx.sym.var('true_classes')
outputs = mx.sym.contrib.rand_zipfian(true_classes_var, num_sampled, range_max)
outputs = mx.sym.Group(outputs)
executor = outputs.bind(mx.context.current_context(), {'true_classes' : true_classes})
executor.forward()
sampled_classes, exp_cnt_true, exp_cnt_sampled = executor.outputs
mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)
mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)
# Issue #10277 (https://github.com/apache/incubator-mxnet/issues/10277) discusses this test.
@with_seed()
def test_shuffle():
def check_first_axis_shuffle(arr):
stride = int(arr.size / arr.shape[0])
column0 = arr.reshape((arr.size,))[::stride]
seq = mx.nd.arange(0, arr.size - stride + 1, stride, ctx=arr.context)
assert (column0.sort() == seq).prod() == 1
# Check for ascending flattened-row sequences for 2D or greater inputs.
if stride > 1:
ascending_seq = mx.nd.arange(0, stride, ctx=arr.context)
equalized_columns = arr.reshape((arr.shape[0], stride)) - ascending_seq
column0_2d = column0.reshape((arr.shape[0],1))
assert (column0_2d == equalized_columns).prod() == 1
# This tests that the shuffling is along the first axis with `repeat1` number of shufflings
# and the outcomes are uniformly distributed with `repeat2` number of shufflings.
# Note that the enough number of samples (`repeat2`) to verify the uniformity of the distribution
# of the outcomes grows factorially with the length of the first axis of the array `data`.
# So we have to settle down with small arrays in practice.
# `data` must be a consecutive sequence of integers starting from 0 if it is flattened.
def testSmall(data, repeat1, repeat2):
# Check that the shuffling is along the first axis.
# The order of the elements in each subarray must not change.
# This takes long time so `repeat1` need to be small.
for i in range(repeat1):
ret = mx.nd.random.shuffle(data)
check_first_axis_shuffle(ret)
# Count the number of each different outcome.
# The sequence composed of the first elements of the subarrays is enough to discriminate
# the outcomes as long as the order of the elements in each subarray does not change.
count = {}
stride = int(data.size / data.shape[0])
for i in range(repeat2):
ret = mx.nd.random.shuffle(data)
h = str(ret.reshape((ret.size,))[::stride])
c = count.get(h, 0)
count[h] = c + 1
# Check the total number of possible outcomes.
# If `repeat2` is not large enough, this could fail with high probability.
assert len(count) == math.factorial(data.shape[0])
# The outcomes must be uniformly distributed.
# If `repeat2` is not large enough, this could fail with high probability.
for p in itertools.permutations(range(0, data.size - stride + 1, stride)):
err = abs(1. * count[str(mx.nd.array(p))] / repeat2 - 1. / math.factorial(data.shape[0]))
assert err < 0.01, "The absolute error {} is larger than the tolerance.".format(err)
# Check symbol interface
a = mx.sym.Variable('a')
b = mx.sym.random.shuffle(a)
c = mx.sym.random.shuffle(data=b, name='c')
d = mx.sym.sort(c, axis=0)
assert (d.eval(a=data, ctx=mx.current_context())[0] == data).prod() == 1
# This test is weaker than `testSmall` and to test larger arrays.
# `repeat` should be much smaller than the factorial of `len(x.shape[0])`.
# `data` must be a consecutive sequence of integers starting from 0 if it is flattened.
def testLarge(data, repeat):
# Check that the shuffling is along the first axis
# and count the number of different outcomes.
stride = int(data.size / data.shape[0])
count = {}
for i in range(repeat):
ret = mx.nd.random.shuffle(data)
check_first_axis_shuffle(ret)
h = str(ret.reshape((ret.size,))[::stride])
c = count.get(h, 0)
count[h] = c + 1
# The probability of duplicated outcomes is very low for large arrays.
assert len(count) == repeat
# Test small arrays with different shapes
testSmall(mx.nd.arange(0, 3), 100, 40000)
testSmall(mx.nd.arange(0, 9).reshape((3, 3)), 100, 40000)
testSmall(mx.nd.arange(0, 18).reshape((3, 2, 3)), 100, 40000)
# Test larger arrays
testLarge(mx.nd.arange(0, 100000).reshape((10, 10000)), 10)
testLarge(mx.nd.arange(0, 100000).reshape((10000, 10)), 10)
testLarge(mx.nd.arange(0, 100000), 10)
@with_seed()
def test_randint():
dtypes = ['int32', 'int64']
for dtype in dtypes:
params = {
'low': -1,
'high': 3,
'shape' : (500, 500),
'dtype' : dtype,
'ctx' : mx.context.current_context()
}
mx.random.seed(128)
ret1 = mx.nd.random.randint(**params).asnumpy()
mx.random.seed(128)
ret2 = mx.nd.random.randint(**params).asnumpy()
assert same(ret1, ret2), \
"ndarray test: `%s` should give the same result with the same seed"
@with_seed()
def test_randint_extremes():
a = mx.nd.random.randint(dtype='int64', low=50000000, high=50000010, ctx=mx.context.current_context())
assert a>=50000000 and a<=50000010
@with_seed()
def test_randint_generator():
ctx = mx.context.current_context()
for dtype in ['int32', 'int64']:
for low, high in [(50000000, 50001000),(-50000100,-50000000),(-500,199)]:
scale = high - low
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)
# Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
buckets = np.array(buckets, dtype=dtype).tolist()
probs = [(buckets[i][1] - buckets[i][0]) / float(scale) for i in range(5)]
generator_mx = lambda x: mx.nd.random.randint(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets, probs=probs, nrepeat=100)
# Scipy uses alpha = 0.01 for testing discrete distribution generator but we are using default alpha=0.05 (higher threshold ensures robustness)
# Refer - https://github.com/scipy/scipy/blob/9f12af697763fb5f9767d5cb1280ce62456a3974/scipy/stats/tests/test_discrete_basic.py#L45
generator_mx_same_seed = \
lambda x: np.concatenate(
[mx.nd.random.randint(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
for _ in range(10)])
verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, nrepeat=100)
@with_seed()
def test_randint_without_dtype():
a = mx.nd.random.randint(low=50000000, high=50000010, ctx=mx.context.current_context())
assert a.dtype == np.int32
@with_seed()
def test_sample_multinomial_num_outputs():
ctx = mx.context.current_context()
probs = [[0.125, 0.25, 0.25], [0.0625, 0.125, 0.1875]]
out = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx), shape=10000, get_prob=False)
assert isinstance(out, mx.nd.NDArray)
out = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx), shape=10000, get_prob=True)
assert isinstance(out, list)
assert len(out) == 2
if __name__ == '__main__':
import nose
nose.runmodule()
| reminisce/mxnet | tests/python/unittest/test_random.py | Python | apache-2.0 | 50,683 | 0.009569 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.exceptions import StackStormBaseException
class StackStormDBObjectNotFoundError(StackStormBaseException):
pass
class StackStormDBObjectMalformedError(StackStormBaseException):
pass
class StackStormDBObjectConflictError(StackStormBaseException):
"""
Exception that captures a DB object conflict error.
"""
def __init__(self, message, conflict_id, model_object):
super(StackStormDBObjectConflictError, self).__init__(message)
self.conflict_id = conflict_id
self.model_object = model_object
| punalpatel/st2 | st2common/st2common/exceptions/db.py | Python | apache-2.0 | 1,337 | 0 |
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Intel
#
# Author: Shuangtai Tian <shuangtai.tian@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import oslo.messaging
from ceilometer import plugin
OPTS = [
cfg.StrOpt('nova_control_exchange',
default='nova',
help="Exchange name for Nova notifications."),
]
cfg.CONF.register_opts(OPTS)
class ComputeNotificationBase(plugin.NotificationBase):
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target defining the exchange and
topics to be connected for this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.nova_control_exchange)
for topic in conf.notification_topics]
| tanglei528/ceilometer | ceilometer/compute/notifications/__init__.py | Python | apache-2.0 | 1,338 | 0 |
# coding: utf-8
from datetime import timedelta as td
import json
from django.core import mail
from django.utils.timezone import now
from hc.api.models import Channel, Check, Notification, Ping
from hc.test import BaseTestCase
class NotifyEmailTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check(project=self.project)
self.check.name = "Daily Backup"
self.check.desc = "Line 1\nLine2"
self.check.tags = "foo bar"
self.check.status = "down"
self.check.last_ping = now() - td(minutes=61)
self.check.n_pings = 112233
self.check.save()
self.ping = Ping(owner=self.check)
self.ping.remote_addr = "1.2.3.4"
self.ping.body = "Body Line 1\nBody Line 2"
self.ping.save()
self.channel = Channel(project=self.project)
self.channel.kind = "email"
self.channel.value = "alice@example.org"
self.channel.email_verified = True
self.channel.save()
self.channel.checks.add(self.check)
def test_email(self):
self.channel.notify(self.check)
n = Notification.objects.get()
self.assertEqual(n.error, "")
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertEqual(email.extra_headers["X-Status-Url"], n.status_url())
self.assertTrue("List-Unsubscribe" in email.extra_headers)
self.assertTrue("List-Unsubscribe-Post" in email.extra_headers)
html = email.alternatives[0][0]
self.assertIn("Daily Backup", html)
self.assertIn("Line 1<br>Line2", html)
self.assertIn("Alices Project", html)
self.assertIn("foo</code>", html)
self.assertIn("bar</code>", html)
self.assertIn("1 day", html)
self.assertIn("from 1.2.3.4", html)
self.assertIn("112233", html)
self.assertIn("Body Line 1<br>Body Line 2", html)
# Check's code must not be in the html
self.assertNotIn(str(self.check.code), html)
# Check's code must not be in the plain text body
self.assertNotIn(str(self.check.code), email.body)
def test_it_shows_cron_schedule(self):
self.check.kind = "cron"
self.check.schedule = "0 18-23,0-8 * * *"
self.check.save()
self.channel.notify(self.check)
email = mail.outbox[0]
html = email.alternatives[0][0]
self.assertIn("<code>0 18-23,0-8 * * *</code>", html)
def test_it_truncates_long_body(self):
self.ping.body = "X" * 10000 + ", and the rest gets cut off"
self.ping.save()
self.channel.notify(self.check)
email = mail.outbox[0]
html = email.alternatives[0][0]
self.assertIn("[truncated]", html)
self.assertNotIn("the rest gets cut off", html)
def test_it_handles_missing_ping_object(self):
self.ping.delete()
self.channel.notify(self.check)
email = mail.outbox[0]
html = email.alternatives[0][0]
self.assertIn("Daily Backup", html)
def test_it_handles_missing_profile(self):
self.channel.value = "alice+notifications@example.org"
self.channel.save()
self.channel.notify(self.check)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice+notifications@example.org")
html = email.alternatives[0][0]
self.assertIn("Daily Backup", html)
self.assertNotIn("Projects Overview", html)
def test_email_transport_handles_json_value(self):
payload = {"value": "alice@example.org", "up": True, "down": True}
self.channel.value = json.dumps(payload)
self.channel.save()
self.channel.notify(self.check)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
def test_it_reports_unverified_email(self):
self.channel.email_verified = False
self.channel.save()
self.channel.notify(self.check)
# If an email is not verified, it should say so in the notification:
n = Notification.objects.get()
self.assertEqual(n.error, "Email not verified")
def test_email_checks_up_down_flags(self):
payload = {"value": "alice@example.org", "up": True, "down": False}
self.channel.value = json.dumps(payload)
self.channel.save()
self.channel.notify(self.check)
# This channel should not notify on "down" events:
self.assertEqual(Notification.objects.count(), 0)
self.assertEqual(len(mail.outbox), 0)
def test_email_handles_amperstand(self):
self.check.name = "Foo & Bar"
self.check.save()
self.channel.notify(self.check)
email = mail.outbox[0]
self.assertEqual(email.subject, "DOWN | Foo & Bar")
| iphoting/healthchecks | hc/api/tests/test_notify_email.py | Python | bsd-3-clause | 5,005 | 0 |
from __future__ import unicode_literals
from django.apps import AppConfig
class ToolboxConfig(AppConfig):
name = 'toolbox'
| california-civic-data-coalition/django-calaccess-downloads-website | toolbox/apps.py | Python | mit | 130 | 0 |
#!/usr/bin/env python
from urlparse import urlparse
from socket import gethostbyname
from spam import DomainInexistentException
class SpamHausChecker(object):
"""spam checker using spamhaus"""
IS_SPAM = 1
IS_NOT_SPAM = 2
def _query_spamhaus(self, spamhaus_zone):
try:
return gethostbyname(spamhaus_zone)
except Exception:
return None
def _resolve(self, domain):
try:
return gethostbyname(domain)
except Exception:
return None
def _build_spamhaus_zone(self, ip):
ip_segments = ip.split(".")
ip_segments.reverse()
return ".".join(ip_segments) + ".zen.spamhaus.org"
def _decode_spamhaus(self, spamhaus_result):
if spamhaus_result:
return self.IS_SPAM
else:
return self.IS_NOT_SPAM
def check_url(self, url):
"""check an url"""
domain = urlparse(url).netloc
return self.check_domain(domain)
def check_domain(self, domain):
"""check a domain"""
domain = domain[domain.find('@')+1:] # remove user info
if domain.count(":") > 0:
domain = domain[:domain.find(':')] # remove port info
ip = self._resolve(domain)
if not ip:
raise DomainInexistentException
spamhaus_zone = self._build_spamhaus_zone(ip)
spamhaus_result = self._query_spamhaus(spamhaus_zone)
return self._decode_spamhaus(spamhaus_result)
def is_spam(self, url):
"""shortcut for check_url == IS_SPAM"""
return self.check_url(url) == self.IS_SPAM
def is_not_spam(self, url):
"""shortcut for check_url == IS_NOT_SPAM"""
return self.check_url(url) == self.IS_NOT_SPAM
| fmarani/spam | spam/spamhaus.py | Python | lgpl-3.0 | 1,760 | 0.001705 |
import gtk
import treetohtml
from mensajes import info, yesno
from datetime import date, datetime
from articulos import DlgArticulo
from articulos_produccion import ArticulosEnProduccion
from modelo import Model
from comunes import punto_coma, coma_punto, caracter_a_logico, logico_a_caracter, calcular_iva_venta, calcular_precio_neto, calcular_precio_venta, calcular_utilidad
class ActualizarPrecios:
def main(self):
gtk.main()
return 0
def __init__(self, padre=None):
builder = gtk.Builder()
builder.add_from_file('dlgActualizacionPrecios.glade')
builder.connect_signals(self)
self.dialogo = builder.get_object('dialogo')
self.scroll = builder.get_object('scroll_window')
self.tree = builder.get_object('vista')
self.lista = builder.get_object('lista')
self.opcion_algunos = builder.get_object('algunos')
self.opcion_todos = builder.get_object('todos')
self.dialogo.show()
def on_todos_group_changed(self, *args):
pass
def on_algunos_group_changed(self, *args):
if self.opcion_algunos.get_active() == 1:
self.scroll.set_visible(True)
self.tree.set_visible(True)
def on_aceptar_clicked(self, *args):
pass
def on_salir_clicked(self, *args):
self.on_dialogo_destroy()
def on_dialogo_destroy(self, *args):
self.dialogo.destroy()
if __name__ == '__main__':
ActualizarPrecios().main()
| jehomez/pymeadmin | actualizacion_de_precios.py | Python | gpl-2.0 | 1,494 | 0.003347 |
import base64
import datetime
import json
from M2Crypto import EVP, X509, Rand
class TicketEncoder():
@staticmethod
def _formatDate(d):
return d.strftime("%Y%m%d%H%M%S")
def __init__(self, cert, key, lifetime=5):
self._lifetime = lifetime
self._x509 = X509.load_cert(cert)
self._pkey = EVP.load_key(key)
def encode(self, data):
d = {
'salt': base64.b64encode(Rand.rand_bytes(8)),
'digest': 'sha1',
'validFrom': self._formatDate(datetime.datetime.utcnow()),
'validTo': self._formatDate(
datetime.datetime.utcnow() + datetime.timedelta(
seconds=self._lifetime
)
),
'data': data
}
self._pkey.reset_context(md=d['digest'])
self._pkey.sign_init()
fields = []
for k, v in d.items():
fields.append(k)
self._pkey.sign_update(v)
d['signedFields'] = ','.join(fields)
d['signature'] = base64.b64encode(self._pkey.sign_final())
d['certificate'] = self._x509.as_pem()
return base64.b64encode(json.dumps(d))
class TicketDecoder():
_peer = None
_ca = None
@staticmethod
def _parseDate(d):
return datetime.datetime.strptime(d, '%Y%m%d%H%M%S')
@staticmethod
def _verifyCertificate(ca, x509):
if x509.verify(ca.get_pubkey()) == 0:
raise ValueError('Untrusted certificate')
if not (
x509.get_not_before().get_datetime().replace(tzinfo=None) <=
datetime.datetime.utcnow() <=
x509.get_not_after().get_datetime().replace(tzinfo=None)
):
raise ValueError('Certificate expired')
def __init__(self, ca, eku, peer=None):
self._eku = eku
if peer is not None:
self._peer = X509.load_cert_string(peer)
if ca is not None:
self._ca = X509.load_cert(ca)
def decode(self, ticket):
decoded = json.loads(base64.b64decode(ticket))
if self._peer is not None:
x509 = self._peer
else:
x509 = X509.load_cert_string(
decoded['certificate'].encode('utf8')
)
if self._ca is not None:
self._verifyCertificate(self._ca, x509)
if self._eku is not None:
if self._eku not in x509.get_ext(
'extendedKeyUsage'
).get_value().split(','):
raise ValueError('Certificate is not authorized for action')
signedFields = [s.strip() for s in decoded['signedFields'].split(',')]
if len(
set(['salt', 'data']) &
set(signedFields)
) == 0:
raise ValueError('Invalid ticket')
pkey = x509.get_pubkey()
pkey.reset_context(md=decoded['digest'])
pkey.verify_init()
for field in signedFields:
pkey.verify_update(decoded[field].encode('utf8'))
if pkey.verify_final(
base64.b64decode(decoded['signature'])
) != 1:
raise ValueError('Invalid ticket signature')
if not (
self._parseDate(decoded['validFrom']) <=
datetime.datetime.utcnow() <=
self._parseDate(decoded['validTo'])
):
raise ValueError('Ticket life time expired')
return decoded['data']
# vim: expandtab tabstop=4 shiftwidth=4
| walteryang47/ovirt-engine | packaging/pythonlib/ovirt_engine/ticket.py | Python | apache-2.0 | 3,464 | 0 |
import re
from LUIObject import LUIObject
from LUISprite import LUISprite
from LUILabel import LUILabel
from LUIInitialState import LUIInitialState
from LUILayouts import LUIHorizontalStretchedLayout
__all__ = ["LUIInputField"]
class LUIInputField(LUIObject):
""" Simple input field, accepting text input. This input field supports
entering text and navigating. Selecting text is (currently) not supported.
The input field also supports various keyboard shortcuts:
[pos1] Move to the beginning of the text
[end] Move to the end of the text
[arrow_left] Move one character to the left
[arrow_right] Move one character to the right
[ctrl] + [arrow_left] Move to the left, skipping over words
[ctrl] + [arrow_right] Move to the right, skipping over words
[escape] Un-focus input element
"""
re_skip = re.compile("\W*\w+\W")
def __init__(self, parent=None, width=200, placeholder=u"Enter some text ..", value=u"", **kwargs):
""" Constructs a new input field. An input field always needs a width specified """
LUIObject.__init__(self, x=0, y=0, solid=True)
self.set_width(width)
self._layout = LUIHorizontalStretchedLayout(parent=self, prefix="InputField", width="100%")
# Container for the text
self._text_content = LUIObject(self)
self._text_content.margin = (5, 7, 5, 7)
self._text_content.clip_bounds = (0,0,0,0)
self._text_content.set_size("100%", "100%")
# Scroller for the text, so we can move right and left
self._text_scroller = LUIObject(parent=self._text_content)
self._text_scroller.center_vertical = True
self._text = LUILabel(parent=self._text_scroller, text="")
# Cursor for the current position
self._cursor = LUISprite(self._text_scroller, "blank", "skin", x=0, y=0, w=2, h=15)
self._cursor.color = (0.5, 0.5, 0.5)
self._cursor.margin.top = 2
self._cursor.z_offset = 20
self._cursor_index = 0
self._cursor.hide()
self._value = value
# Placeholder text, shown when out of focus and no value exists
self._placeholder = LUILabel(parent=self._text_content, text=placeholder, shadow=False,
center_vertical=True, alpha=0.2)
# Various states
self._tickrate = 1.0
self._tickstart = 0.0
self._render_text()
if parent is not None:
self.parent = parent
LUIInitialState.init(self, kwargs)
@property
def value(self):
""" Returns the value of the input field """
return self._value
@value.setter
def value(self, new_value):
""" Sets the value of the input field """
self._value = new_value
self._render_text()
self.trigger_event("changed", self._value)
def clear(self):
""" Clears the input value """
self.value = u""
@property
def cursor_pos(self):
""" Set the cursor position """
return self._cursor_index
@cursor_pos.setter
def cursor_pos(self, pos):
""" Set the cursor position """
if pos >= 0:
self._cursor_index = max(0, min(len(self._value), pos))
else:
self._cursor_index = max(len(self._value) + pos + 1, 0)
self._reset_cursor_tick()
self._render_text()
def on_tick(self, event):
""" Tick handler, gets executed every frame """
frame_time = globalClock.get_frame_time() - self._tickstart
show_cursor = frame_time % self._tickrate < 0.5 * self._tickrate
if show_cursor:
self._cursor.color = (0.5, 0.5, 0.5, 1)
else:
self._cursor.color = (1, 1, 1, 0)
def on_click(self, event):
""" Internal on click handler """
self.request_focus()
def on_mousedown(self, event):
""" Internal mousedown handler """
local_x_offset = self._text.text_handle.get_relative_pos(event.coordinates).x
self.cursor_pos = self._text.text_handle.get_char_index(local_x_offset)
def _reset_cursor_tick(self):
""" Internal method to reset the cursor tick """
self._tickstart = globalClock.get_frame_time()
def on_focus(self, event):
""" Internal focus handler """
self._cursor.show()
self._placeholder.hide()
self._reset_cursor_tick()
self._layout.color = (0.9, 0.9, 0.9, 1)
def on_keydown(self, event):
""" Internal keydown handler. Processes the special keys, and if none are
present, redirects the event """
key_name = event.message
if key_name == "backspace":
self._value = self._value[:max(0, self._cursor_index - 1)] + self._value[self._cursor_index:]
self.cursor_pos -= 1
self.trigger_event("changed", self._value)
elif key_name == "delete":
post_value = self._value[min(len(self._value), self._cursor_index + 1):]
self._value = self._value[:self._cursor_index] + post_value
self.cursor_pos = self._cursor_index
self.trigger_event("changed", self._value)
elif key_name == "arrow_left":
if event.get_modifier_state("alt") or event.get_modifier_state("ctrl"):
self.cursor_skip_left()
else:
self.cursor_pos -= 1
elif key_name == "arrow_right":
if event.get_modifier_state("alt") or event.get_modifier_state("ctrl"):
self.cursor_skip_right()
else:
self.cursor_pos += 1
elif key_name == "escape":
self.blur()
elif key_name == "home":
self.cursor_pos = 0
elif key_name == "end":
self.cursor_pos = len(self.value)
self.trigger_event(key_name, self._value)
def on_keyrepeat(self, event):
""" Internal keyrepeat handler """
self.on_keydown(event)
def on_textinput(self, event):
""" Internal textinput handler """
self._value = self._value[:self._cursor_index] + event.message + \
self._value[self._cursor_index:]
self.cursor_pos = self._cursor_index + len(event.message)
self.trigger_event("changed", self._value)
def on_blur(self, event):
""" Internal blur handler """
self._cursor.hide()
if len(self._value) < 1:
self._placeholder.show()
self._layout.color = (1, 1, 1, 1)
def _render_text(self):
""" Internal method to render the text """
self._text.set_text(self._value)
self._cursor.left = self._text.left + \
self._text.text_handle.get_char_pos(self._cursor_index) + 1
max_left = self.width - 15
if self._value:
self._placeholder.hide()
else:
if not self.focused:
self._placeholder.show()
# Scroll if the cursor is outside of the clip bounds
rel_pos = self.get_relative_pos(self._cursor.get_abs_pos()).x
if rel_pos >= max_left:
self._text_scroller.left = min(0, max_left - self._cursor.left)
if rel_pos <= 0:
self._text_scroller.left = min(0, - self._cursor.left - rel_pos)
def cursor_skip_left(self):
""" Moves the cursor to the left, skipping the previous word """
left_hand_str = ''.join(reversed(self.value[0:self.cursor_pos]))
match = self.re_skip.match(left_hand_str)
if match is not None:
self.cursor_pos -= match.end() - 1
else:
self.cursor_pos = 0
def cursor_skip_right(self):
""" Moves the cursor to the right, skipping the next word """
right_hand_str = self.value[self.cursor_pos:]
match = self.re_skip.match(right_hand_str)
if match is not None:
self.cursor_pos += match.end() - 1
else:
self.cursor_pos = len(self.value)
| tobspr/LUI | Builtin/LUIInputField.py | Python | mit | 8,056 | 0.002234 |
#! /usr/bin/python
# _*_ coding: utf-8 _*_
#
# Dell EMC OpenManage Ansible Modules
#
# Copyright © 2017 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its
# subsidiaries. Other trademarks may be trademarks of their respective owners.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dellemc_idrac_nic
short_description: Configure iDRAC Network settings
version_added: "2.3"
description:
- Configure iDRAC Network settings
options:
idrac_ip:
required: True
description:
- iDRAC IP Address
type: 'str'
idrac_user:
required: True
description:
- iDRAC user name
type: 'str'
idrac_pwd:
required: False
description:
- iDRAC user password
type: 'str'
idrac_port:
required: False
description:
- iDRAC port
default: 443
type: 'int'
share_name:
required: True
description:
- CIFS or NFS Network share
share_user:
required: True
description:
- Network share user in the format user@domain if user is part of a domain else 'user'
type: 'str'
share_pwd:
required: True
description:
- Network share user password
type: 'str'
share_mnt:
required: True
description:
- Local mount path of the network file share with read-write permission for ansible user
type: 'path'
nic_selection:
required: False
description:
- NIC Selection mode
choices: ['Dedicated','LOM1','LOM2','LOM3','LOM4']
default: "Dedicated"
nic_failover:
required: False
description:
- Failover network if NIC selection fails
choices: ["None", "LOM1", "LOM2", "LOM3", "LOM4", "All"]
default: "None"
nic_autoneg:
required: False
description:
- if C(True), will enable auto negotiation
- if C(False), will disable auto negotiation
default: False
nic_speed:
required: False
description:
- Network Speed
choices: ["10", "100", "1000"]
default: "1000"
nic_duplex:
required: False
description:
- if C(Full), will enable the Full-Duplex mode
- if C(Half), will enable the Half-Duplex mode
choices: ["Full", "Half"]
default: "Full"
nic_autodedicated:
required: False
description:
- if C(True), will enable the auto-dedicated NIC option
- if C(False), will disable the auto-dedicated NIC option
default: False
requirements: ['omsdk']
author: "anupam.aloke@dell.com"
'''
EXAMPLES = '''
# Configure NIC Selection using a CIFS Network share
- name: Configure NIC Selection
dellemc_idrac_nic:
idrac_ip: "192.168.1.1"
idrac_user: "root"
idrac_pwd: "calvin"
share_name: "\\\\192.168.10.10\\share"
share_user: "user1"
share_pwd: "password"
share_mnt: "/mnt/share"
nic_selection: "Dedicated"
state: "enable"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.dellemc_idrac import iDRACConnection
from ansible.module_utils.basic import AnsibleModule
try:
from omsdk.sdkcenum import TypeHelper
from omdrivers.enums.iDRAC.iDRAC import (
AutoConfig_NICTypes, Autoneg_NICTypes, DHCPEnable_IPv4Types,
DNSDomainFromDHCP_NICStaticTypes, DNSFromDHCP_IPv4StaticTypes,
DNSRegister_NICTypes, Duplex_NICTypes, Enable_IPv4Types,
Enable_NICTypes, Failover_NICTypes, Selection_NICTypes, Speed_NICTypes,
VLanEnable_NICTypes
)
HAS_OMSDK = True
except ImportError:
HAS_OMSDK = False
def _setup_nic(idrac, module):
"""
Setup iDRAC NIC attributes
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
# Get the current NIC settings
curr_nic_selection = idrac.config_mgr._sysconfig.iDRAC.NIC.Selection_NIC
curr_nic_failover = idrac.config_mgr._sysconfig.iDRAC.NIC.Failover_NIC
curr_nic_autoneg = idrac.config_mgr._sysconfig.iDRAC.NIC.Autoneg_NIC
idrac.config_mgr._sysconfig.iDRAC.NIC.Enable_NIC = \
TypeHelper.convert_to_enum(module.params['nic_enable'],
Enable_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.Selection_NIC = \
TypeHelper.convert_to_enum(module.params['nic_selection'],
Selection_NICTypes)
# NIC Selection mode and failover mode should not be same
if module.params['nic_selection'] == module.params['nic_failover']:
module.fail_json(msg="NIC Selection mode and Failover mode cannot be same")
elif curr_nic_selection != Selection_NICTypes.Dedicated and \
module.params['nic_selection'] != 'Dedicated':
idrac.config_mgr._sysconfig.iDRAC.NIC.Failover_NIC = \
TypeHelper.convert_to_enum(module.params['nic_failover'],
Failover_NICTypes)
# if NIC Selection is not 'Dedicated', then Auto-Negotiation is always ON
if curr_nic_selection != Selection_NICTypes.Dedicated and \
module.params['nic_selection'] != 'Dedicated':
idrac.config_mgr._sysconfig.iDRAC.NIC.Autoneg_NIC = Autoneg_NICTypes.Enabled
else:
idrac.config_mgr._sysconfig.iDRAC.NIC.Autoneg_NIC = \
TypeHelper.convert_to_enum(module.params['nic_autoneg'],
Autoneg_NICTypes)
# NIC Speed and Duplex mode can only be set when Auto-Negotiation is not ON
if curr_nic_autoneg != Autoneg_NICTypes.Enabled and \
module.params['nic_autoneg'] != 'Enabled':
if curr_nic_selection != Selection_NICTypes.Enabled and \
module.params['nic_selection'] != 'Dedicated':
idrac.config_mgr._sysconfig.iDRAC.NIC.Speed_NIC = Speed_NICTypes.T_100
else:
idrac.config_mgr._sysconfig.iDRAC.NIC.Speed_NIC = \
TypeHelper.convert_to_enum(module.params['nic_speed'],
Speed_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.Duplex_NIC = \
TypeHelper.convert_to_enum(module.params['nic_duplex'],
Duplex_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.MTU_NIC = module.params['nic_mtu']
# DNS Registration
idrac.config_mgr._sysconfig.iDRAC.NIC.DNSRegister_NIC = \
TypeHelper.convert_to_enum(module.params['dns_register'],
DNSRegister_NICTypes)
if module.params['dns_idrac_name']:
idrac.config_mgr._sysconfig.iDRAC.NIC.DNSRacName = module.params['dns_idrac_name']
# Enable Auto-Config
if module.params['nic_auto_config'] != 'Disabled':
if module.params['ipv4_enable'] != 'Enabled' or \
module.params['ipv4_dhcp_enable'] != 'Enabled':
module.fail_json(msg="IPv4 and DHCPv4 must be enabled for Auto-Config")
idrac.config_mgr._sysconfig.iDRAC.NIC.AutoConfig_NIC = \
TypeHelper.convert_to_enum(module.params['nic_auto_config'],
AutoConfig_NICTypes)
# VLAN
idrac.config_mgr._sysconfig.iDRAC.NIC.VLanEnable_NIC = \
TypeHelper.convert_to_enum(module.params['vlan_enable'],
VLanEnable_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.VLanID_NIC = module.params['vlan_id']
idrac.config_mgr._sysconfig.iDRAC.NIC.VLanPriority_NIC = module.params['vlan_priority']
def _setup_nic_static(idrac, module):
"""
Setup iDRAC NIC Static attributes
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
idrac.config_mgr._sysconfig.iDRAC.NICStatic.DNSDomainFromDHCP_NICStatic = \
TypeHelper.convert_to_enum(module.params['dns_domain_from_dhcp'],
DNSDomainFromDHCP_NICStaticTypes)
if module.params['dns_domain_name']:
idrac.config_mgr._sysconfig.iDRAC.NICStatic.DNSDomainName_NICStatic = \
module.params['dns_domain_name']
def _setup_ipv4(idrac, module):
"""
Setup IPv4 parameters
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
idrac.config_mgr._sysconfig.iDRAC.IPv4.Enable_IPv4 = \
TypeHelper.convert_to_enum(module.params['ipv4_enable'],
Enable_IPv4Types)
idrac.config_mgr._sysconfig.iDRAC.IPv4.DHCPEnable_IPv4 = \
TypeHelper.convert_to_enum(module.params['ipv4_dhcp_enable'],
DHCPEnable_IPv4Types)
def _setup_ipv4_static(idrac, module):
"""
Setup IPv4 Static parameters
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
if module.params['ipv4_dhcp_enable'] == 'Disabled':
if module.params['ipv4_static']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.Address_IPv4Static = \
module.params['ipv4_static']
if module.params['ipv4_static_gw']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.Gateway_IPv4Static = \
module.params['ipv4_static_gw']
if module.params['ipv4_static_mask']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.Netmask_IPv4Static = \
module.params['ipv4_static_mask']
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.DNSFromDHCP_IPv4Static = \
TypeHelper.convert_to_enum(module.params['ipv4_dns_from_dhcp'],
DNSFromDHCP_IPv4StaticTypes)
if module.params['ipv4_dns_from_dhcp'] != 'Enabled':
if module.params['ipv4_preferred_dns']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.DNS1_IPv4Static = \
module.params['ipv4_prefered_dns']
if module.params['ipv4_alternate_dns']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.DNS2_IPv4Static = \
module.params['ipv4_alternate_dns']
def setup_idrac_nic (idrac, module):
"""
Setup iDRAC NIC configuration settings
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
msg = {}
msg['changed'] = False
msg['failed'] = False
msg['msg'] = {}
err = False
try:
_setup_nic(idrac, module)
_setup_nic_static(idrac, module)
_setup_ipv4(idrac, module)
_setup_ipv4_static(idrac, module)
msg['changed'] = idrac.config_mgr._sysconfig.is_changed()
if module.check_mode:
# since it is running in check mode, reject the changes
idrac.config_mgr._sysconfig.reject()
else:
msg['msg'] = idrac.config_mgr.apply_changes()
if 'Status' in msg['msg'] and msg['msg']["Status"] != "Success":
msg['failed'] = True
msg['changed'] = False
except Exception as e:
err = True
msg['msg'] = "Error: %s" % str(e)
msg['exception'] = traceback.format_exc()
msg['failed'] = True
return msg, err
# Main
def main():
module = AnsibleModule(
argument_spec=dict(
# iDRAC handle
idrac=dict(required=False, type='dict'),
# iDRAC Credentials
idrac_ip=dict(required=True, type='str'),
idrac_user=dict(required=True, type='str'),
idrac_pwd=dict(required=True, type='str', no_log=True),
idrac_port=dict(required=False, default=443, type='int'),
# Network File Share
share_name=dict(required=True, type='str'),
share_user=dict(required=True, type='str'),
share_pwd=dict(required=True, type='str', no_log=True),
share_mnt=dict(required=True, type='path'),
# iDRAC Network Settings
nic_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Enabled', type='str'),
nic_selection=dict(required=False,
choices=['Dedicated', 'LOM1', 'LOM2', 'LOM3', 'LOM4'],
default='Dedicated', type='str'),
nic_failover=dict(required=False,
choices=['ALL', 'LOM1', 'LOM2', 'LOM3', 'LOM4', 'None'],
default='None'),
nic_autoneg=dict(required=False, choices=['Enabled', 'Disabled'],
default='Enabled', type='str'),
nic_speed=dict(required=False, choices=['10', '100', '1000'],
default='1000', type='str'),
nic_duplex=dict(required=False, choices=['Full', 'Half'],
default='Full', type='str'),
nic_mtu=dict(required=False, default=1500, type='int'),
# Network Common Settings
dns_register=dict(required=False, choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
dns_idrac_name=dict(required=False, default=None, type='str'),
dns_domain_from_dhcp=dict(required=False,
choices=['Disabled', 'Enabled'],
default='Disabled', type='str'),
dns_domain_name=dict(required=False, default=None, type='str'),
# Auto-Config Settings
nic_auto_config=dict(required=False,
choices=['Disabled', 'Enable Once', 'Enable Once After Reset'],
default='Disabled', type='str'),
# IPv4 Settings
ipv4_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Enabled', type='str'),
ipv4_dhcp_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
ipv4_static=dict(required=False, default=None, type='str'),
ipv4_static_gw=dict(required=False, default=None, type='str'),
ipv4_static_mask=dict(required=False, default=None, type='str'),
ipv4_dns_from_dhcp=dict(required=False,
choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
ipv4_preferred_dns=dict(required=False, default=None, type='str'),
ipv4_alternate_dns=dict(required=False, default=None, type='str'),
# VLAN Settings
vlan_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
vlan_id=dict(required=False, default=None, type='int'),
vlan_priority=dict(required=False, default=None, type='int'),
),
supports_check_mode=True)
if not HAS_OMSDK:
module.fail_json(msg="Dell EMC OpenManage Python SDK required for this module")
# Connect to iDRAC
idrac_conn = iDRACConnection(module)
idrac = idrac_conn.connect()
# Setup network share as local mount
if not idrac_conn.setup_nw_share_mount():
module.fail_json(msg="Failed to setup network share local mount point")
# Setup iDRAC NIC
(msg, err) = setup_idrac_nic(idrac, module)
# Disconnect from iDRAC
idrac_conn.disconnect()
if err:
module.fail_json(**msg)
module.exit_json(**msg)
if __name__ == '__main__':
main()
| anupamaloke/Dell-EMC-Ansible-Modules-for-iDRAC | library/dellemc_idrac_nic.py | Python | gpl-3.0 | 16,156 | 0.001795 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Class in Python 2.7 for incorporation of the RPi.GPIO module to control the GPIO channels of Raspberry Pi.
"""
import RPi.GPIO as GPIO
__author__ = ""
__copyright__ = ""
__email__ = ""
__status__ = "Prototype"
class PinsGPIO(object):
gpio = None
def __init__(self):
self.gpio = GPIO
| pat-odoo/TwoRC522_RPi2-3 | module/gpio.py | Python | gpl-3.0 | 391 | 0.01023 |
from __future__ import print_function
import qt
# Importing vtk initializes vtkPythonMap owned by vtkPythonUtil and prevent
# call to vtkPythonUtil::GetObjectFromPointer() from segfaulting.
# PythonQt internally uses vtkPythonUtil to properly wrap/unwrap VTK objects
from vtk import *
t = _testWrappedVTKQInvokableInstance.getTable()
print(t.GetClassName())
t2 = vtkTable()
_testWrappedVTKQInvokableInstance.setTable(t2)
if _testWrappedVTKQInvokableInstance.getTable() != t2:
qt.QApplication.exit(1)
qt.QApplication.exit(0)
| SINTEFMedtek/CTK | Applications/ctkSimplePythonShell/Testing/Python/wrappedVTKQInvokableTest.py | Python | apache-2.0 | 531 | 0.001883 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: setup
version_added: historical
short_description: Gathers facts about remote hosts
options:
filter:
version_added: "1.1"
description:
- if supplied, only return facts that match this shell-style (fnmatch) wildcard.
required: false
default: '*'
fact_path:
version_added: "1.3"
description:
- path used for local ansible facts (*.fact) - files in this dir
will be run (if executable) and their results be added to ansible_local facts
if a file is not executable it is read.
File/results format can be json or ini-format
required: false
default: '/etc/ansible/facts.d'
description:
- This module is automatically called by playbooks to gather useful
variables about remote hosts that can be used in playbooks. It can also be
executed directly by C(/usr/bin/ansible) to check what variables are
available to a host. Ansible provides many I(facts) about the system,
automatically.
notes:
- More ansible facts will be added with successive releases. If I(facter) or
I(ohai) are installed, variables from these programs will also be snapshotted
into the JSON file for usage in templating. These variables are prefixed
with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
bubbled up to the caller. Using the ansible facts and choosing to not
install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
remote systems. (See also M(facter) and M(ohai).)
- The filter option filters only the first level subkey below ansible_facts.
- If the target host is Windows, you will not currently have the ability to use
C(fact_path) or C(filter) as this is provided by a simpler implementation of the module.
Different facts are returned for Windows hosts.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = """
# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
ansible all -m setup --tree /tmp/facts
# Display only facts regarding memory found by ansible on all hosts and output them.
ansible all -m setup -a 'filter=ansible_*_mb'
# Display only facts returned by facter.
ansible all -m setup -a 'filter=facter_*'
# Display only facts about certain interfaces.
ansible all -m setup -a 'filter=ansible_eth[0-2]'
"""
def run_setup(module):
setup_options = dict(module_setup=True)
facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter and ohai binary and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
ohai_path = module.get_bin_path('ohai')
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --puppet --json")
facter = True
try:
facter_ds = json.loads(out)
except:
facter = False
if facter:
for (k,v) in facter_ds.items():
setup_options["facter_%s" % k] = v
# ditto for ohai
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
try:
ohai_ds = json.loads(out)
except:
ohai = False
if ohai:
for (k,v) in ohai_ds.items():
k2 = "ohai_%s" % k.replace('-', '_')
setup_options[k2] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['verbose_override'] = True
return setup_result
def main():
global module
module = AnsibleModule(
argument_spec = dict(
filter=dict(default="*", required=False),
fact_path=dict(default='/etc/ansible/facts.d', required=False),
),
supports_check_mode = True,
)
data = run_setup(module)
module.exit_json(**data)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
main()
| amandolo/ansible-modules-core | system/setup.py | Python | gpl-3.0 | 5,256 | 0.005327 |
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import pandas as pd
from deepcpg.data import annotations as annos
def test_join_overlapping():
f = annos.join_overlapping
s, e = f([], [])
assert len(s) == 0
assert len(e) == 0
s = [1, 3, 6]
e = [2, 4, 10]
expect = (s, e)
result = f(s, e)
assert result == expect
x = np.array([[1, 2],
[3, 4], [4, 5],
[6, 8], [8, 8], [8, 9],
[10, 15], [10, 11], [11, 14], [14, 16]]
)
expect = [[1, 2], [3, 5], [6, 9], [10, 16]]
result = np.array(f(x[:, 0], x[:, 1])).T
npt.assert_array_equal(result, expect)
def test_in_which():
f = annos.in_which
ys = [2, 4, 12, 17]
ye = [2, 8, 15, 18]
x = []
expect = []
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
x = [-1, 3, 9, 19]
expect = [-1, -1, -1, -1]
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
x = [-1, 2, 2, 3, 4, 8, 15, 16]
expect = [-1, 0, 0, -1, 1, 1, 2, -1]
result = f(x, ys, ye)
npt.assert_array_equal(result, expect)
def test_is_in():
ys = [2, 4, 12, 17]
ye = [2, 8, 15, 18]
x = [-1, 2, 2, 3, 4, 8, 15, 16]
expect = [False, True, True, False, True, True, True, False]
result = annos.is_in(x, ys, ye)
npt.assert_array_equal(result, expect)
def test_distance():
start = [3, 10, 17]
end = [6, 15, 18]
pos = [1, 2, 5, 8, 10, 15, 16, 19]
expect = [2, 1, 0, 2, 0, 0, 1, 1]
start = np.asarray(start)
end = np.asarray(end)
pos = np.asarray(pos)
actual = annos.distance(pos, start, end)
npt.assert_array_equal(actual, expect)
pos = [1, 6, 7, 9]
expect = [2, 0, 1, 1]
start = np.asarray(start)
end = np.asarray(end)
pos = np.asarray(pos)
actual = annos.distance(pos, start, end)
npt.assert_array_equal(actual, expect)
def test_extend_frame():
d = pd.DataFrame({
'chromo': '1',
'start': [2, 3, 3, 1, 1],
'end': [3, 3, 8, 2, 1]
})
d = d.loc[:, ['chromo', 'start', 'end']]
expect = pd.DataFrame({
'chromo': '1',
'start': [1, 2, 3, 1, 1],
'end': [4, 5, 8, 4, 4]
})
expect = expect.loc[:, ['chromo', 'start', 'end']]
actual = annos.extend_len_frame(d, 4)
npt.assert_array_equal(actual.values, expect.values)
def test_group_overlapping():
npt.assert_array_equal(annos.group_overlapping([], []), [])
npt.assert_array_equal(annos.group_overlapping([1], [2]), [0])
s = [1, 5, 7, 11, 13, 16, 22]
e = [3, 8, 9, 15, 17, 20, 24]
g = [0, 1, 1, 2, 2, 2, 3]
a = annos.group_overlapping(s, e)
npt.assert_array_equal(a, g)
| cangermueller/deepcpg | tests/deepcpg/data/test_annos.py | Python | mit | 2,793 | 0 |
from django.contrib import admin
from fluent_contents.admin import PlaceholderFieldAdmin
from .models import PlaceholderFieldTestPage
class PlaceholderFieldTestPageAdmin(PlaceholderFieldAdmin):
"""
Admin interface for the PlaceholderFieldTestPage model.
"""
pass
admin.site.register(PlaceholderFieldTestPage, PlaceholderFieldTestPageAdmin)
| jpotterm/django-fluent-contents | fluent_contents/tests/testapp/admin.py | Python | apache-2.0 | 360 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.utils.logger import system_log
def import_mod(mod_name):
try:
from importlib import import_module
return import_module(mod_name)
except Exception as e:
system_log.error("*" * 30)
system_log.error("Mod Import Error: {}, error: {}", mod_name, e)
system_log.error("*" * 30)
raise
| xclxxl414/rqalpha | rqalpha/utils/package_helper.py | Python | apache-2.0 | 953 | 0 |
from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils import six
from django.utils.timezone import is_naive
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.mime_type)
if hasattr(self, 'item_pubdate'):
# if item_pubdate is defined for the feed, set header so as
# ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE,
feed_url = add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, context))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, context))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_text(enc_url),
length = smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink = self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
| edisonlz/fruit | web_project/base/site-packages/django/contrib/syndication/views.py | Python | apache-2.0 | 8,515 | 0.007634 |
from django.db import models
from django.core.exceptions import ValidationError
from django.db.models.fields.related import ForeignObject
try:
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor
except ImportError:
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor
from django.utils.encoding import python_2_unicode_compatible
import logging
logger = logging.getLogger(__name__)
# Python 3 fixes.
import sys
if sys.version > '3':
long = int
basestring = (str, bytes)
unicode = str
__all__ = ['Country', 'State', 'Locality', 'Address', 'AddressField']
class InconsistentDictError(Exception):
pass
def _to_python(value):
raw = value.get('raw', '')
country = value.get('country', '')
country_code = value.get('country_code', '')
state = value.get('state', '')
state_code = value.get('state_code', '')
locality = value.get('locality', '')
postal_code = value.get('postal_code', '')
street_number = value.get('street_number', '')
route = value.get('route', '')
formatted = value.get('formatted', '')
latitude = value.get('latitude', None)
longitude = value.get('longitude', None)
# If there is no value (empty raw) then return None.
if not raw:
return None
# If we have an inconsistent set of value bail out now.
if (country or state or locality) and not (country and state and locality):
raise InconsistentDictError
# Handle the country.
try:
country_obj = Country.objects.get(name=country)
except Country.DoesNotExist:
if country:
if len(country_code) > Country._meta.get_field('code').max_length:
if country_code != country:
raise ValueError('Invalid country code (too long): %s'%country_code)
country_code = ''
country_obj = Country.objects.create(name=country, code=country_code)
else:
country_obj = None
# Handle the state.
try:
state_obj = State.objects.get(name=state, country=country_obj)
except State.DoesNotExist:
if state:
if len(state_code) > State._meta.get_field('code').max_length:
if state_code != state:
raise ValueError('Invalid state code (too long): %s'%state_code)
state_code = ''
state_obj = State.objects.create(name=state, code=state_code, country=country_obj)
else:
state_obj = None
# Handle the locality.
try:
locality_obj = Locality.objects.get(name=locality, state=state_obj)
except Locality.DoesNotExist:
if locality:
locality_obj = Locality.objects.create(name=locality, postal_code=postal_code, state=state_obj)
else:
locality_obj = None
# Handle the address.
try:
if not (street_number or route or locality):
address_obj = Address.objects.get(raw=raw)
else:
address_obj = Address.objects.get(
street_number=street_number,
route=route,
locality=locality_obj
)
except Address.DoesNotExist:
address_obj = Address(
street_number=street_number,
route=route,
raw=raw,
locality=locality_obj,
formatted=formatted,
latitude=latitude,
longitude=longitude,
)
# If "formatted" is empty try to construct it from other values.
if not address_obj.formatted:
address_obj.formatted = unicode(address_obj)
# Need to save.
address_obj.save()
# Done.
return address_obj
##
## Convert a dictionary to an address.
##
def to_python(value):
# Keep `None`s.
if value is None:
return None
# Is it already an address object?
if isinstance(value, Address):
return value
# If we have an integer, assume it is a model primary key. This is mostly for
# Django being a cunt.
elif isinstance(value, (int, long)):
return value
# A string is considered a raw value.
elif isinstance(value, basestring):
obj = Address(raw=value)
obj.save()
return obj
# A dictionary of named address components.
elif isinstance(value, dict):
# Attempt a conversion.
try:
return _to_python(value)
except InconsistentDictError:
return Address.objects.create(raw=value['raw'])
# Not in any of the formats I recognise.
raise ValidationError('Invalid address value.')
##
## A country.
##
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=40, unique=True, blank=True)
code = models.CharField(max_length=2, blank=True) # not unique as there are duplicates (IT)
class Meta:
verbose_name_plural = 'Countries'
ordering = ('name',)
def __str__(self):
return '%s'%(self.name or self.code)
##
## A state. Google refers to this as `administration_level_1`.
##
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=165, blank=True)
code = models.CharField(max_length=3, blank=True)
country = models.ForeignKey(Country, related_name='states')
class Meta:
unique_together = ('name', 'country')
ordering = ('country', 'name')
def __str__(self):
txt = self.to_str()
country = '%s'%self.country
if country and txt:
txt += ', '
txt += country
return txt
def to_str(self):
return '%s'%(self.name or self.code)
##
## A locality (suburb).
##
@python_2_unicode_compatible
class Locality(models.Model):
name = models.CharField(max_length=165, blank=True)
postal_code = models.CharField(max_length=10, blank=True)
state = models.ForeignKey(State, related_name='localities')
class Meta:
verbose_name_plural = 'Localities'
unique_together = ('name', 'state')
ordering = ('state', 'name')
def __str__(self):
txt = '%s'%self.name
state = self.state.to_str() if self.state else ''
if txt and state:
txt += ', '
txt += state
if self.postal_code:
txt += ' %s'%self.postal_code
cntry = '%s'%(self.state.country if self.state and self.state.country else '')
if cntry:
txt += ', %s'%cntry
return txt
##
## An address. If for any reason we are unable to find a matching
## decomposed address we will store the raw address string in `raw`.
##
@python_2_unicode_compatible
class Address(models.Model):
street_number = models.CharField(max_length=20, blank=True)
route = models.CharField(max_length=100, blank=True)
locality = models.ForeignKey(Locality, related_name='addresses', blank=True, null=True)
raw = models.CharField(max_length=200)
formatted = models.CharField(max_length=200, blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
verbose_name_plural = 'Addresses'
ordering = ('locality', 'route', 'street_number')
# unique_together = ('locality', 'route', 'street_number')
def __str__(self):
if self.formatted != '':
txt = '%s'%self.formatted
elif self.locality:
txt = ''
if self.street_number:
txt = '%s'%self.street_number
if self.route:
if txt:
txt += ' %s'%self.route
locality = '%s'%self.locality
if txt and locality:
txt += ', '
txt += locality
else:
txt = '%s'%self.raw
return txt
def clean(self):
if not self.raw:
raise ValidationError('Addresses may not have a blank `raw` field.')
def as_dict(self):
ad = dict(
street_number=self.street_number,
route=self.route,
raw=self.raw,
formatted=self.formatted,
latitude=self.latitude if self.latitude else '',
longitude=self.longitude if self.longitude else '',
)
if self.locality:
ad['locality'] = self.locality.name
ad['postal_code'] = self.locality.postal_code
if self.locality.state:
ad['state'] = self.locality.state.name
ad['state_code'] = self.locality.state.code
if self.locality.state.country:
ad['country'] = self.locality.state.country.name
ad['country_code'] = self.locality.state.country.code
return ad
class AddressDescriptor(ForwardManyToOneDescriptor):
def __set__(self, inst, value):
super(AddressDescriptor, self).__set__(inst, to_python(value))
##
## A field for addresses in other models.
##
class AddressField(models.ForeignKey):
description = 'An address'
def __init__(self, **kwargs):
kwargs['to'] = 'address.Address'
super(AddressField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, AddressDescriptor(self))
# def deconstruct(self):
# name, path, args, kwargs = super(AddressField, self).deconstruct()
# del kwargs['to']
# return name, path, args, kwargs
def formfield(self, **kwargs):
from .forms import AddressField as AddressFormField
defaults = dict(form_class=AddressFormField)
defaults.update(kwargs)
return super(AddressField, self).formfield(**defaults)
| jamesaud/se1-group4 | address/models.py | Python | mit | 9,864 | 0.004562 |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
# Import Python libs
import logging
import json
import salt.ext.six as six
log = logging.getLogger(__name__)
# Import third party libs
try:
# pylint: disable=unused-import
import boto
import boto.sqs
# pylint: enable=unused-import
logging.getLogger('boto').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from salt.ext.six import string_types
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return (False, 'The boto_sqs module could not be loaded: boto libraries not found')
__utils__['boto.assign_funcs'](__name__, 'sqs', pack=__salt__)
return True
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI example::
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.get_queue(name):
return True
else:
return False
def create(name, region=None, key=None, keyid=None, profile=None):
'''
Create an SQS queue.
CLI example to create a queue::
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn.get_queue(name):
try:
conn.create_queue(name)
except boto.exception.SQSError:
msg = 'Failed to create queue {0}'.format(name)
log.error(msg)
return False
log.info('Created queue {0}'.format(name))
return True
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI example to delete a queue::
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
queue_obj = conn.get_queue(name)
if queue_obj:
deleted_queue = conn.delete_queue(queue_obj)
if not deleted_queue:
msg = 'Failed to delete queue {0}'.format(name)
log.error(msg)
return False
return True
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if attributes are set on an SQS queue.
CLI example::
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {}
queue_obj = conn.get_queue(name)
if not queue_obj:
log.error('Queue {0} does not exist.'.format(name))
return {}
return conn.get_queue_attributes(queue_obj)
def set_attributes(name, attributes, region=None, key=None, keyid=None,
profile=None):
'''
Set attributes on an SQS queue.
CLI example to set attributes on a queue::
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
ret = True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
queue_obj = conn.get_queue(name)
if not queue_obj:
log.error('Queue {0} does not exist.'.format(name))
ret = False
if isinstance(attributes, string_types):
attributes = json.loads(attributes)
for attr, val in six.iteritems(attributes):
attr_set = queue_obj.set_attribute(attr, val)
if not attr_set:
msg = 'Failed to set attribute {0} = {1} on queue {2}'
log.error(msg.format(attr, val, name))
ret = False
else:
msg = 'Set attribute {0} = {1} on queue {2}'
log.info(msg.format(attr, val, name))
return ret
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/modules/boto_sqs.py | Python | apache-2.0 | 5,122 | 0.000976 |
import subprocess
import sys
import re
try:
# Make terminal colors work on windows
import colorama
colorama.init()
except ImportError:
pass
def add_nanopb_builders(env):
'''Add the necessary builder commands for nanopb tests.'''
# Build command that runs a test program and saves the output
def run_test(target, source, env):
if len(source) > 1:
infile = open(str(source[1]))
else:
infile = None
args = [str(source[0])]
if env.has_key('ARGS'):
args.extend(env['ARGS'])
pipe = subprocess.Popen(args,
stdin = infile,
stdout = open(str(target[0]), 'w'),
stderr = sys.stderr)
result = pipe.wait()
if result == 0:
print '\033[32m[ OK ]\033[0m Ran ' + str(source[0])
else:
print '\033[31m[FAIL]\033[0m Program ' + str(source[0]) + ' returned ' + str(result)
return result
run_test_builder = Builder(action = run_test,
suffix = '.output')
env.Append(BUILDERS = {'RunTest': run_test_builder})
# Build command that decodes a message using protoc
def decode_actions(source, target, env, for_signature):
esc = env['ESCAPE']
dirs = ' '.join(['-I' + esc(env.GetBuildPath(d)) for d in env['PROTOCPATH']])
return '$PROTOC $PROTOCFLAGS %s --decode=%s %s <%s >%s' % (
dirs, env['MESSAGE'], esc(str(source[1])), esc(str(source[0])), esc(str(target[0])))
decode_builder = Builder(generator = decode_actions,
suffix = '.decoded')
env.Append(BUILDERS = {'Decode': decode_builder})
# Build command that encodes a message using protoc
def encode_actions(source, target, env, for_signature):
esc = env['ESCAPE']
dirs = ' '.join(['-I' + esc(env.GetBuildPath(d)) for d in env['PROTOCPATH']])
return '$PROTOC $PROTOCFLAGS %s --encode=%s %s <%s >%s' % (
dirs, env['MESSAGE'], esc(str(source[1])), esc(str(source[0])), esc(str(target[0])))
encode_builder = Builder(generator = encode_actions,
suffix = '.encoded')
env.Append(BUILDERS = {'Encode': encode_builder})
# Build command that asserts that two files be equal
def compare_files(target, source, env):
data1 = open(str(source[0]), 'rb').read()
data2 = open(str(source[1]), 'rb').read()
if data1 == data2:
print '\033[32m[ OK ]\033[0m Files equal: ' + str(source[0]) + ' and ' + str(source[1])
return 0
else:
print '\033[31m[FAIL]\033[0m Files differ: ' + str(source[0]) + ' and ' + str(source[1])
return 1
compare_builder = Builder(action = compare_files,
suffix = '.equal')
env.Append(BUILDERS = {'Compare': compare_builder})
# Build command that checks that each pattern in source2 is found in source1.
def match_files(target, source, env):
data = open(str(source[0]), 'rU').read()
patterns = open(str(source[1]))
for pattern in patterns:
if pattern.strip() and not re.search(pattern.strip(), data, re.MULTILINE):
print '\033[31m[FAIL]\033[0m Pattern not found in ' + str(source[0]) + ': ' + pattern
return 1
else:
print '\033[32m[ OK ]\033[0m All patterns found in ' + str(source[0])
return 0
match_builder = Builder(action = match_files, suffix = '.matched')
env.Append(BUILDERS = {'Match': match_builder})
| kasbah/slim_looper | src/protocol/nanopb/tests/site_scons/site_init.py | Python | gpl-3.0 | 3,700 | 0.015135 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import render_template, request, redirect, session, flash, url_for
from functools import wraps
from user import app
import services2db
import log2db
import users
import json
import time
import sys
import asset
reload(sys)
sys.setdefaultencoding('gb18030')
# 登录验证装饰器(登录页面加入验证会死循环)
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if session.get('user') is None:
return redirect('/')
rt = func(*args, **kwargs)
return rt
return wrapper
# 时间装饰器
def time_wrapper(func):
@wraps(func)
def wrapper():
print '计时开始:%s' % func.__name__
start = time.time()
rt = func()
print '计时结束:%s:%s' % (func.__name__,time.time() - start)
return rt
return wrapper
# 根目录
@app.route('/')
def index():
if session:
return redirect('/users/')
else:
return render_template('login.html')
# 登录页面
@app.route('/login/', methods=['POST', 'GET'])
def login():
params = request.args if request.method == 'GET' else request.form
username = params.get('username', '')
password = params.get('password', '')
if users.validate_login(username, password):
print '登录成功'
session['user'] = {'username': username}
return redirect('/users/')
else:
return render_template('login.html', username=username, error=u'用户名或密码错误')
# 登出页面
@app.route('/user/logout/')
def logout():
session.clear()
return redirect('/')
# 用户信息显示
@app.route('/users/')
@login_required
def user_list():
return render_template('users.html', user_list=users.get_users())
# 返回添加用户模版给dialog页面
@app.route('/user/adder/', methods=['POST', 'GET'])
@login_required
def user_create():
return render_template('user_create.html')
# 添加用户
@app.route('/user/add/', methods=['POST'])
def user_add():
params = request.args if request.method == 'GET' else request.form
username = params.get('username', '')
password = params.get('password', '')
age = params.get('age', '')
# 检查用户信息
_is_ok, _error = users.validate_add_user(username, password, age)
_status = None
if _is_ok:
if users.add_users(username=username, age=age, password=password):
_status = '添加用户成功!'
else:
_status = '添加用户失败!'
return json.dumps({'is_ok': _is_ok, 'status': _status, 'error': _error})
# 返回更新用户模版给dialog页面
@app.route('/user/update/', methods=['POST', 'GET'])
@login_required
def user_update():
_id = request.args.get('id', '')
_name = request.args.get('name', '')
# _users = []
# for i in users.get_users():
# if i.get('id') == int(_id):
# _users.append(i)
return render_template('user_update.html', uid=_id, username=_name)
# 更新用户
@app.route('/user/upd/', methods=['POST', 'GET'])
def user_upd():
_id = request.form.get('id', '')
_mpassword = request.form.get('mpassword', '')
_upassword = request.form.get('upassword', '')
_age = request.form.get('age', '')
_is_ok, _error = users.validate_update_user(_id, session['user']['username'], _mpassword, _upassword, _age)
_status = None
if _is_ok:
if users.update_users(_id, _upassword, _age):
_status = '用户更新成功!'
else:
_status = '用户更新失败!'
return json.dumps({'is_ok': _is_ok, 'status': _status, 'error': _error})
# 删除用户
@app.route('/user/delete/')
@login_required
def delete_user():
uid = request.args.get('uid', '')
if users.del_users(uid):
return redirect('/users/')
else:
return '用户删除失败'
# 显示日志信息
@app.route('/logs/', methods=['POST', 'GET'])
@time_wrapper
@login_required
def logs():
files = request.files.get('files')
if files:
# print files.filename
files.save('./access.txt')
log_files = 'access.txt'
if log2db.log2db(log_files=log_files, fetch=False):
return redirect('/logs/')
else:
return '日志写入数据库失败!'
else:
topn = request.form.get('topn', 10)
topn = int(topn) if str(topn).isdigit() else 10
rt_list = log2db.log2db(topn=topn) # 读取数据
return render_template('logs.html', rt_list=rt_list)
# 显示域名管理信息
@app.route('/services/', methods=['POST', 'GET'])
@login_required
def service_manage():
params = request.args if request.method == 'GET' else request.form
_url = params.get('url', 'Null')
_username = params.get('username', 'Null')
_password = params.get('password', 'Null')
_func = params.get('func', 'Null')
# 添加域名管理信息
if _url != 'Null':
if services2db.add_service(_url, _username, _password, _func):
return redirect('/services/')
else:
return '添加信息失败!'
# 查询域名管理信息
else:
service_list = services2db.get_service()
return render_template('services.html', service_list=service_list)
# 更新域名管理信息
@app.route('/services/update/', methods=['POST'])
def update_service():
params = request.args if request.method == 'GET' else request.form
_id = params.get('id', '')
_url = params.get('url', '')
_username = params.get('username', '')
_password = params.get('password', '')
_func = params.get('func', '')
_is_ok = services2db.update_service(_url, _username, _password, _func, _id)
return json.dumps({'is_ok': _is_ok})
# 删除域名管理信息
@app.route('/services/del/')
@login_required
def serviceDel():
uid = request.args.get('id', '')
if services2db.servicedel(uid):
return redirect('/services/')
else:
return '域名管理信息删除失败!'
# 资产信息显示
@app.route('/assets/')
@login_required
def asset_list():
_asset_list = []
for i in asset.get_list():
_rt_list = asset.get_by_id(i.get('idc_id'))
i['idc_id'] = _rt_list[0][1]
_asset_list.append(i)
return render_template('assets.html', asset_list=_asset_list)
# 返回新建资产模版给dialog页面
@app.route('/asset/create/', methods=['POST', 'GET'])
@login_required
def asset_create():
return render_template('asset_create.html', idcs=asset.get_idc())
# 添加资产信息
@app.route('/asset/add/', methods=['POST', 'GET'])
@login_required
def asset_add():
lists = ['sn','ip','hostname','idc_id','purchase_date','warranty','vendor','model','admin','business','os','cpu','ram','disk']
asset_dict = {}
for i in lists:
asset_dict['_'+i] = request.form.get(i, '')
# 检查资产信息
_is_ok, _error = asset.validate_create(asset_dict)
status = None
if _is_ok:
if asset.create(asset_dict):
status = '添加资产成功!'
else:
status = '添加资产失败!'
return json.dumps({'is_ok': _is_ok, 'status': status, 'error': _error})
# 删除资产信息
@app.route('/asset/delete/')
@login_required
def asset_del():
uid = request.args.get('id', '')
if asset.delete(uid):
return redirect('/assets/')
else:
return '资产删除失败!'
# 返回更新资产模版给dialog页面
@app.route('/asset/update/', methods=['POST', 'GET'])
@login_required
def asset_update():
_id = request.args.get('id', '')
_asset_list = []
for i in asset.get_list():
if i.get('id') == int(_id):
_asset_list.append(i)
return render_template('asset_update.html', asset_list=_asset_list, idcs=asset.get_idc())
# 更新资产信息
@app.route('/asset/upd/', methods=['POST', 'GET'])
@login_required
def asset_upd():
_id = request.form.get('id', '')
assets = ['sn','ip','hostname','idc_id','purchase_date','warranty','vendor','model','admin','business','os','cpu','ram','disk']
asset_dict = {}
for i in assets:
asset_dict['_'+i] = request.form.get(i, '')
# 检查资产信息
_is_ok, _error = asset.validate_update(asset_dict)
_status = None
if _is_ok:
if asset.update(asset_dict,_id):
_status = '更新资产成功!'
else:
_status = '更新资产失败!'
return json.dumps({'is_ok': _is_ok, 'status': _status, 'error': _error}) | 51reboot/actual_09_homework | 09/tanshuai/cmdb_v6/user/views.py | Python | mit | 8,533 | 0.006442 |
import distutils.version
try:
import greenlet
getcurrent = greenlet.greenlet.getcurrent
GreenletExit = greenlet.greenlet.GreenletExit
preserves_excinfo = (distutils.version.LooseVersion(greenlet.__version__)
>= distutils.version.LooseVersion('0.3.2'))
greenlet = greenlet.greenlet
except ImportError, e:
raise
try:
from py.magic import greenlet
getcurrent = greenlet.getcurrent
GreenletExit = greenlet.GreenletExit
preserves_excinfo = False
except ImportError:
try:
from stackless import greenlet
getcurrent = greenlet.getcurrent
GreenletExit = greenlet.GreenletExit
preserves_excinfo = False
except ImportError:
try:
from support.stacklesss import greenlet, getcurrent, GreenletExit
preserves_excinfo = False
(greenlet, getcurrent, GreenletExit) # silence pyflakes
except ImportError, e:
raise ImportError("Unable to find an implementation of greenlet.")
| ioram7/keystone-federado-pgid2013 | build/eventlet/eventlet/support/greenlets.py | Python | apache-2.0 | 1,085 | 0.003687 |
from mapwidgets.widgets import GooglePointFieldWidget
from miot.models import PointOfInterest, Page, Profile
from django import forms
class PointOfInterestForm(forms.ModelForm):
'''The form for a point of interest.'''
class Meta:
model = PointOfInterest
fields = ("name", "featured_image", "position", "tags", "active", "category")
widgets = {
'position': GooglePointFieldWidget,
}
class PageForm(forms.ModelForm):
'''The form for a page.'''
class Meta:
model = Page
fields = ("title", "content", "template")
class EstimoteAppForm(forms.ModelForm):
'''The form for a profile update (estimote credentials).'''
class Meta:
model = Profile
fields = ("estimote_app_id", "estimote_app_token")
| Ishydo/miot | miot/forms.py | Python | mit | 791 | 0.005057 |
import os
import string
#Enter your username and password below within double quotes
# eg. username="username" and password="password"
username="username"
password="password"
com="wget -O - https://"+username+":"+password+"@mail.google.com/mail/feed/atom --no-check-certificate"
temp=os.popen(com)
msg=temp.read()
index=string.find(msg,"<fullcount>")
index2=string.find(msg,"</fullcount>")
fc=int(msg[index+11:index2])
if fc==0:
print "0 new"
else:
print str(fc)+" new"
| maximilianofaccone/puppy-siberian | root/.conky/gmail.py | Python | gpl-3.0 | 480 | 0.03125 |
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/')
def index():
# return 'hello flask'
# return '<input type="button" value="click me!!">'
# return '<input type="text">'
# return '<input type="password">'
# return '<input type="date">'
# return '<input type="color">'
# return '<input type="checkbox">'
return render_template('index.html')
# @app.route('/reboot')
@app.route('/reboot', methods=['GET'])
def reboot():
# http://10.1.1.8:9092/reboot?name=abcb
name = request.args.get('name')
age = request.args.get('age')
# print type(request.args)
# print request.args
# http://10.1.1.8:9092/reboot?name=abcb&age=15
return 'hello reboot, name: %s, age: %s' % (name, age)
@app.route('/login')
def login():
user = request.args.get('user')
pwd = request.args.get('pwd')
res = ''
lines = []
user_dict = {}
try:
with open('user.txt') as f:
lines = f.readlines()
except:
return -1
for line in lines:
line = line.strip()
name = line.split(' ')[0]
passwd = line.split(' ')[1]
user_dict[name] = passwd
if user in user_dict:
if str(pwd) == user_dict[user]:
res = "yes, Login."
else:
res = "password is wrong, %s, %s" % (pwd, user_dict[user])
else:
res = "user name not exist."
# if user == 'admin' and pwd == 'admin':
# res = 'ok'
# else:
# res = 'no'
return res
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9092, debug=True)
| seerjk/reboot06 | 06/flask_web.py | Python | mit | 1,609 | 0.003108 |
# -*- coding: utf-8 -*-
"""The LVM path specification resolver helper implementation."""
# This is necessary to prevent a circular import.
import dfvfs.file_io.lvm_file_io
import dfvfs.vfs.lvm_file_system
from dfvfs.lib import definitions
from dfvfs.resolver import resolver
from dfvfs.resolver import resolver_helper
class LVMResolverHelper(resolver_helper.ResolverHelper):
"""Class that implements the Logical Volume Manager (LVM) resolver helper."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_LVM
def NewFileObject(self, resolver_context):
"""Creates a new file-like object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
Returns:
The file-like object (instance of file_io.FileIO).
"""
return dfvfs.file_io.lvm_file_io.LVMFile(resolver_context)
def NewFileSystem(self, resolver_context):
"""Creates a new file system object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
Returns:
The file system object (instance of vfs.FileSystem).
"""
return dfvfs.vfs.lvm_file_system.LVMFileSystem(resolver_context)
resolver.Resolver.RegisterHelper(LVMResolverHelper())
| dc3-plaso/dfvfs | dfvfs/resolver/lvm_resolver_helper.py | Python | apache-2.0 | 1,206 | 0.003317 |
"""Gopher protocol client interface."""
__all__ = ["send_selector","send_query"]
# Default selector, host and port
DEF_SELECTOR = '1/'
DEF_HOST = 'gopher.micro.umn.edu'
DEF_PORT = 70
# Recognized file types
A_TEXT = '0'
A_MENU = '1'
A_CSO = '2'
A_ERROR = '3'
A_MACBINHEX = '4'
A_PCBINHEX = '5'
A_UUENCODED = '6'
A_INDEX = '7'
A_TELNET = '8'
A_BINARY = '9'
A_DUPLICATE = '+'
A_SOUND = 's'
A_EVENT = 'e'
A_CALENDAR = 'c'
A_HTML = 'h'
A_TN3270 = 'T'
A_MIME = 'M'
A_IMAGE = 'I'
A_WHOIS = 'w'
A_QUERY = 'q'
A_GIF = 'g'
A_HTML = 'h' # HTML file
A_WWW = 'w' # WWW address
A_PLUS_IMAGE = ':'
A_PLUS_MOVIE = ';'
A_PLUS_SOUND = '<'
_names = dir()
_type_to_name_map = {}
def type_to_name(gtype):
"""Map all file types to strings; unknown types become TYPE='x'."""
global _type_to_name_map
if _type_to_name_map=={}:
for name in _names:
if name[:2] == 'A_':
_type_to_name_map[eval(name)] = name[2:]
if gtype in _type_to_name_map:
return _type_to_name_map[gtype]
return 'TYPE=' + `gtype`
# Names for characters and strings
CRLF = '\r\n'
TAB = '\t'
def send_selector(selector, host, port = 0):
"""Send a selector to a given host and port, return a file with the reply."""
import socket
if not port:
i = host.find(':')
if i >= 0:
host, port = host[:i], int(host[i+1:])
if not port:
port = DEF_PORT
elif type(port) == type(''):
port = int(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.sendall(selector + CRLF)
s.shutdown(1)
return s.makefile('rb')
def send_query(selector, query, host, port = 0):
"""Send a selector and a query string."""
return send_selector(selector + '\t' + query, host, port)
def path_to_selector(path):
"""Takes a path as returned by urlparse and returns the appropriate selector."""
if path=="/":
return "/"
else:
return path[2:] # Cuts initial slash and data type identifier
def path_to_datatype_name(path):
"""Takes a path as returned by urlparse and maps it to a string.
See section 3.4 of RFC 1738 for details."""
if path=="/":
# No way to tell, although "INDEX" is likely
return "TYPE='unknown'"
else:
return type_to_name(path[1])
# The following functions interpret the data returned by the gopher
# server according to the expected type, e.g. textfile or directory
def get_directory(f):
"""Get a directory in the form of a list of entries."""
list = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
gtype = line[0]
parts = line[1:].split(TAB)
if len(parts) < 4:
print '(Bad line from server:', `line`, ')'
continue
if len(parts) > 4:
if parts[4:] != ['+']:
print '(Extra info from server:',
print parts[4:], ')'
else:
parts.append('')
parts.insert(0, gtype)
list.append(parts)
return list
def get_textfile(f):
"""Get a text file as a list of lines, with trailing CRLF stripped."""
list = []
get_alt_textfile(f, list.append)
return list
def get_alt_textfile(f, func):
"""Get a text file and pass each line to a function, with trailing CRLF stripped."""
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if line[:2] == '..':
line = line[1:]
func(line)
def get_binary(f):
"""Get a binary file as one solid data block."""
data = f.read()
return data
def get_alt_binary(f, func, blocksize):
"""Get a binary file and pass each block to a function."""
while 1:
data = f.read(blocksize)
if not data:
break
func(data)
def test():
"""Trivial test program."""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], '')
selector = DEF_SELECTOR
type = selector[0]
host = DEF_HOST
if args:
host = args[0]
args = args[1:]
if args:
type = args[0]
args = args[1:]
if len(type) > 1:
type, selector = type[0], type
else:
selector = ''
if args:
selector = args[0]
args = args[1:]
query = ''
if args:
query = args[0]
args = args[1:]
if type == A_INDEX:
f = send_query(selector, query, host)
else:
f = send_selector(selector, host)
if type == A_TEXT:
list = get_textfile(f)
for item in list: print item
elif type in (A_MENU, A_INDEX):
list = get_directory(f)
for item in list: print item
else:
data = get_binary(f)
print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
# Run the test when run as script
if __name__ == '__main__':
test()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/gopherlib.py | Python | mit | 5,564 | 0.010065 |
# coding=utf-8
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from models import WorkflowBase, State
from utils import get_wf_dict_value
def create_transition_method(transition_name, transition_condition=''):
def transition_method(self, user, comment=None):
transition = transition_name.lower()
checker_name = "check_%s" % transition.replace(' ', '_')
# default conditional method
checker = getattr(self, checker_name, None)
# specific conditional method
condition_method = getattr(self, transition_condition, None)
checked = (not checker or checker(user) is True) and (not condition_method or condition_method(user) is True)
return self.do_transition(transition_name, user, comment) if checked else checked
return transition_method
def create_state_method(state_name):
def state_method(self):
try:
state = State.objects.get(name=state_name, workflow=self.get_workflow())
except State.DoesNotExist:
return False
return self.get_state() == state
return state_method
def create_manager_state_method(state_name):
def manager_state_method(self):
queryset_method = getattr(self.get_queryset(), state_name.lower(), None)
return queryset_method() if queryset_method else self.get_queryset()
return manager_state_method
def create_queryset_state_method(state_name):
def queryset_state_method(self):
return self.filter(current_state__name=state_name)
return queryset_state_method
def create_manager_get_queryset_method(manager, queryset_mixin):
def manager_get_queryset_method(self):
original_queryset = manager.get_queryset()
queryset_class = original_queryset.__class__
class ExtendedQuerySet(queryset_mixin, queryset_class):
pass
new_queryset = ExtendedQuerySet(self.model, using=self._db)
new_queryset.query = original_queryset.query.clone()
return new_queryset
return manager_get_queryset_method
def workflow_enabled(cls):
if models.Model not in cls.__mro__:
raise ImproperlyConfigured(_('The decorator "workflow_enabled" only is applied to subclasses of Django Model'))
bases = list(cls.__bases__)
if not WorkflowBase in bases:
bases.insert(0, WorkflowBase)
cls.__bases__ = tuple(bases)
current_state = models.ForeignKey(State, verbose_name=_(u"State"), name='current_state', null=True, blank=True)
current_state.contribute_to_class(cls=cls, name='current_state')
workflows_settings = getattr(settings, 'WORKFLOWS', {})
wf_item = workflows_settings.get("%s.%s" % (cls.__module__, cls.__name__), None)
try:
wf_name = wf_item['name']
except KeyError:
raise ImproperlyConfigured('The attribute or key (name), must be specified in the workflow configuration.')
# building transition methods
transitions = get_wf_dict_value(wf_item, 'transitions', wf_name)
for transition in transitions:
name = get_wf_dict_value(transition, 'name', wf_name, 'transitions')
condition = transition.get('condition', '')
# building method name
method_name = "do_%s" % name.lower().replace(' ', '_')
# building method
cls_transition_method = getattr(cls, method_name, None)
if not cls_transition_method:
setattr(cls, method_name, create_transition_method(name, condition))
class CustomQuerySetMixin(object):
pass
class CustomManagerMixin(object):
def get_queryset(self):
return CustomQuerySetMixin(self.model, using=self._db)
cls._default_manager = CustomManagerMixin()
# building state methods
initial_state = get_wf_dict_value(wf_item, 'initial_state', wf_name)
initial_state_name = get_wf_dict_value(initial_state, 'name', wf_name, 'initial_state')
# building instance method
instance_method_name = "is_%s" % initial_state_name.lower().replace(' ', '_')
cls_instance_method = getattr(cls, instance_method_name, None)
if not cls_instance_method:
setattr(cls, instance_method_name, property(create_state_method(initial_state_name)))
# building manager method
manager_method_name = "%s" % initial_state_name.lower().replace(' ', '_')
cls_manager_method = getattr(CustomManagerMixin, manager_method_name, None)
if not cls_manager_method:
setattr(CustomManagerMixin, manager_method_name, create_manager_state_method(initial_state_name))
cls_queryset_method = getattr(CustomQuerySetMixin, manager_method_name, None)
if not cls_queryset_method:
setattr(CustomQuerySetMixin, manager_method_name, create_queryset_state_method(initial_state_name))
states = get_wf_dict_value(wf_item, 'states', wf_name)
for state in states:
state_name = get_wf_dict_value(state, 'name', wf_name, 'states')
# building method
method_name = "is_%s" % state_name.lower().replace(' ', '_')
cls_state_method = getattr(cls, method_name, None)
if not cls_state_method:
setattr(cls, method_name, property(create_state_method(state_name)))
# building manager method
manager_method_name = "%s" % state_name.lower().replace(' ', '_')
cls_manager_method = getattr(CustomManagerMixin, manager_method_name, None)
if not cls_manager_method:
setattr(CustomManagerMixin, manager_method_name, create_manager_state_method(state_name))
cls_queryset_method = getattr(CustomQuerySetMixin, manager_method_name, None)
if not cls_queryset_method:
setattr(CustomQuerySetMixin, manager_method_name, create_queryset_state_method(state_name))
# extending manager
cls._meta.concrete_managers.sort()
managers = [(mgr_name, manager) for order, mgr_name, manager in cls._meta.concrete_managers]
setattr(cls, '_default_manager', None) # clean the default manager
setattr(cls._meta, 'concrete_managers', []) # clean the managers
for mgr_name, manager in managers:
class ExtendedManager(CustomManagerMixin, manager.__class__):
pass
setattr(ExtendedManager, 'get_queryset', create_manager_get_queryset_method(manager, CustomQuerySetMixin))
cls.add_to_class(mgr_name, ExtendedManager())
return cls | suselrd/django-wflow | workflows/decorators.py | Python | bsd-3-clause | 6,481 | 0.003857 |
__version__ = '0.5.0.dev0+git'
| bjodah/pycompilation | pycompilation/_release.py | Python | bsd-2-clause | 31 | 0 |
# -*- coding: utf-8 -*-
import logging
import os
logger = logging.getLogger(__name__)
class KerberosCacheMiddleware(object):
"""Reloads the KRB5CCNAME environmental variable from the session for potential use in future
LDAP requests.
For a login request, the KRB5CCNAME environmental variable has
already been set in the authentication backend, but for all other
requests, it must be reset from the Kerberos cache stored in a
user's session. Otherwise all requests to a a particular Gunicorn
worker would use the Kerberos cache of the user who most recently
logged in through that worker.
The environmental variable must be set by middleware so it is
available for requests to any view and so each view does not have
to load the environmental variable. The LDAP wrapper
(intranet.db.ldap_db) cannot set the environmental variable because
it does not have access to the current session (request.session).
"""
def process_request(self, request):
"""Propogate KRB5CCNAME session variable to the environmental variable."""
if "KRB5CCNAME" in request.session:
# It is important to check that the environmental variable
# matches the session variable because environmentals stay
# on the worker after requests.
if "KRB5CCNAME" in os.environ:
if os.environ["KRB5CCNAME"] != request.session["KRB5CCNAME"]:
logger.debug("Reloading KRB5CCNAME environmental variable from session.")
os.environ["KRB5CCNAME"] = request.session["KRB5CCNAME"]
else:
logger.debug("KRB5CCNAME environmental variable not set - setting it to KRB5CCNAME from session vars.")
os.environ["KRB5CCNAME"] = request.session["KRB5CCNAME"]
return None
| jacobajit/ion | intranet/middleware/environment.py | Python | gpl-2.0 | 1,848 | 0.002165 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import json
import random
import urllib
import logging
import argparse
import coloredlogs
from pyquery import PyQuery
BASEDIR = os.path.dirname(os.path.abspath(__name__))
OUTPUTDIR = os.path.join(BASEDIR, 'data/output')
coloredlogs.install()
class Topic(object):
"""Topic class is used for representing Topic on Zhihu"""
def __init__(self, name, id_):
"""Init topic object with name and id
:name: name of topic
:id_: id of topic
"""
self._name = name
self._id = id_
def __unicode__(self):
return '[topic: %s (%d)]' % (self.name, self.id)
def __repr__(self):
return unicode(self)
@property
def name(self):
return self._name
@property
def id(self):
return self._id
@property
def url(self):
return 'http://www.zhihu.com/topic/%d/questions' % self._id
@property
def filepath(self):
return os.path.join(OUTPUTDIR, '%d.json' % self.id)
@property
def finished(self):
return os.path.exists(self.filepath)
def url_for_page(self, page_number):
if page_number <= 1: return self.url
return self.url + '?' + urllib.urlencode({'page': page_number})
def get_question(self, item):
subtopicdom = item.children('.subtopic a')
subtopic = subtopicdom.text().strip()
subtopicid = int(subtopicdom.attr('href').split('/')[2]) if subtopicdom.attr('href') else self.id
titledom = item.children('.question-item-title a')
title = titledom.text().strip()
questionid = int(titledom.attr('href').split('/')[2])
logging.debug('question: %s(%d)' % (title, questionid))
return {
'id': questionid,
'title': title,
'subtopic': {
'title': subtopic,
'id': subtopicid,
},
}
def get_questions(self, page):
logging.info('processing: %s (page %d)' % (self, page))
url = self.url_for_page(page)
logging.debug('fetching: %s' % url)
items = PyQuery(url)('.feed-item')
return [self.get_question(PyQuery(item)) for item in items]
def persist(self, count=400):
if self.finished:
logging.info("skipped %s" % self)
return
page = 1
questions = []
logging.info("start fetching %s" % self)
while len(questions) < count and page < 100:
try:
questions.extend(self.get_questions(page))
except Exception, e:
logging.error("failed to fetch and parse %s(page %d)" % (self, page))
logging.exception(e)
logging.debug("skipped %s(page %d)" % (self, page))
finally:
page += 1
wait = random.randint(5, 20)
logging.debug('wait for %d seconds' % wait)
time.sleep(wait)
if len(questions) == 0:
logging.error("failed to fetch or parse %s" % self)
return
obj = {
'id': self.id,
'name': self.name,
'questions': questions,
}
logging.info('saving data for %s' % self)
logging.debug('writing path: %s' % self.filepath)
with open(self.filepath, 'w') as f:
json.dump(obj, f)
def readtopics(path):
topics = []
with open(path) as f:
for l in f.readlines():
l = l.decode('utf8').strip()
if not l: continue
topicpair = l.split()
topics.append((topicpair[0], int(topicpair[1])))
return topics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="The file which contains the topics to be processed")
args = parser.parse_args()
if args.filename.strip():
if not os.path.isdir(OUTPUTDIR):
logging.debug('making output directory: %s' % OUTPUTDIR)
os.mkdir(OUTPUTDIR)
topics = readtopics(args.filename.strip())
logging.info('%d topics to process' % len(topics))
for tname, tid in topics:
topic = Topic(tname, tid)
topic.persist()
| shanzi/thesiscode | topiccrawler.py | Python | bsd-2-clause | 4,289 | 0.001632 |
# -*- coding: utf-8 -*-
#
# spherepy documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 7 21:35:42 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
MOCK_MODULES = ['numpy','six','six.moves','matplotlib','_csphi']
for mod in MOCK_MODULES:
sys.modules[mod] = mock.Mock()
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
#print(sys.path)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'spherepy'
copyright = u'2015, Randy Direen, James Direen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_sidebars = {
'**': ['localtoc.html', 'searchbox.html'],
'using/windows': ['windowssidebar.html', 'searchbox.html'],
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {
# "collapsiblesidebar": "true"
#}
html_theme_options = {
'navbar_title': "SpherePy",
'navbar_site_name': "Site",
'navbar_links': [
("DireenTech", "http://www.direentech.com", True),
],
'navbar_sidebarrel': False,
'navbar_pagenav': True,
'navbar_pagenav_name': "This Page",
'globaltoc_depth': 2,
'globaltoc_includehidden': "true",
'navbar_class': "navbar",
'source_link_position': "nfooter",
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "_static/logo_spherepy.png"
#PUT COOL PICTURE NEXT TO SPHEREPY AT TOP LEFT
#html_logo = "_static/icon_spherepy.ico"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/icon_spherepy.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'spherepydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'spherepy.tex', u'spherepy Documentation',
u'Randy Direen, James Direen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'spherepy', u'spherepy Documentation',
[u'Randy Direen, James Direen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'spherepy', u'spherepy Documentation',
u'Randy Direen, James Direen', 'spherepy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| rdireen/spherepy | documentation/source/conf.py | Python | gpl-3.0 | 9,496 | 0.008951 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for relu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_relu_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"fully_quantize": [True, False],
"input_range": [(-8, 8)]
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
min_value, max_value = parameters["input_range"]
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value, max_value)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| arborh/tensorflow | tensorflow/lite/testing/op_tests/relu.py | Python | apache-2.0 | 2,072 | 0.002896 |
from AppKit import NSDocument
from PyObjCTools import AppHelper
from tinyTextEditorDocumentWindow import TinyTextEditorDocumentWindow
class TinyTextEditorDocument(NSDocument):
def init(self):
self = super(TinyTextEditorDocument, self).init()
self.vanillaWindowController = TinyTextEditorDocumentWindow()
self.vanillaWindowController.assignToDocument(self)
return self
def readFromFile_ofType_(self, path, tp):
# refer to the NSDocument reference for information about this method
f = open(path, 'rb')
text = f.read()
f.close()
self.vanillaWindowController.setText(text)
return True
def writeWithBackupToFile_ofType_saveOperation_(self, fileName, fileType, operation):
# refer to the NSDocument reference for information about this method
text = self.vanillaWindowController.getText()
f = open(fileName, 'wb')
f.write(text)
f.close()
return True
if __name__ == "__main__":
AppHelper.runEventLoop() | bitforks/vanilla | Demos/TinyTextEditor/TinyTextEditor.py | Python | mit | 1,067 | 0.005623 |
#
#
# Copyright 2011,2013 Luis Ariel Vega Soliz, Uremix (http://www.uremix.org) and contributors.
#
#
# This file is part of UADH (Uremix App Developer Helper).
#
# UADH is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UADH is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UADH. If not, see <http://www.gnu.org/licenses/>.
#
#
'''
Created on 08/09/2012
@author: Luis Ariel Vega Soliz (ariel.vega@uremix.org)
@contact: Uremix Team (http://uremix.org)
''' | arielvega/uremix-app-developer-helper | src/uadh/gui/tkinter/__init__.py | Python | gpl-3.0 | 936 | 0.003205 |
#ImportModules
import ShareYourSystem as SYS
#Define and set with #key dict for the KeyVariable
MySetter=SYS.SetterClass(
).set(
'MyStr',
'HelloStr'
).set(
{'#key':"MyStr"},
"hello"
)
#Define and set with a #get in the value
MySetter.set(
"FirstCloneMyStr",
'#get:MyStr'
)
#Define and set with a recursive #get in the value
MySetter.set(
"FirstCloneHelloStr",
'#get:#get:MyStr'
)
#Define and set with a #value dict for the ValueVariable
MySetter.set(
"RedirectStr",
{'#value':'MyStr'}
)
#Define and set with a #value dict for the ValueVariable
MySetter.set(
"MyDict",
{'#value':{'MyInt':0}}
)
#Define and set with a #value:#get dict for the ValueVariable
MySetter.set(
"SecondCloneStr",
{'#value:#get':'MyStr'}
)
#Define and set with a #value:#map@get dict for the ValueVariable
MySetter.set(
"MyList",
{'#value:#map@get':['MyStr','MyInt','#direct:FooStr']}
)
#Define and set with a #value:#map@get dict for the ValueVariable
MySetter.set(
MySetter.MyList.append,
{'#value':'MyStr'}
)
#Define and set with a #value:#map@get dict for the ValueVariable
MySetter.set(
MySetter.MyList.append,
{'#value:#map@get':['MyInt']}
)
#print
print('MySetter is ')
SYS._print(MySetter)
| Ledoux/ShareYourSystem | Pythonlogy/build/lib/ShareYourSystem/Standards/Itemizers/Setter/11_ExampleDoc.py | Python | mit | 1,233 | 0.042985 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.