text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class ModelExportDocumentItem(models.Model):
_description = 'Model Export Document Item'
_name = "clv.model_export.document_item"
_order = "sequence"
name = fields.Char(string='Alias', index=False, required=False)
model_export_id = fields.Many2one(
comodel_name='clv.model_export',
string='Model Export',
ondelete='restrict'
)
document_item_id = fields.Many2one(
comodel_name='clv.document.item',
string='Document Item',
ondelete='restrict',
domain="[('document_type_id','!=','False')]"
)
document_item_code = fields.Char(
string='Item Code',
related='document_item_id.code',
store=False
)
document_item_document_type_id = fields.Many2one(
string='Item Type',
related='document_item_id.document_type_id',
store=True
)
document_item_name = fields.Char(
string='Item',
related='document_item_id.name',
store=False
)
sequence = fields.Integer(
string='Sequence',
default=10
)
model_export_display = fields.Boolean(string='Display in Export', default=True)
class ModelExport(models.Model):
_inherit = 'clv.model_export'
use_document_items = fields.Boolean(string='Use Document Items', default=False)
model_export_document_item_ids = fields.One2many(
comodel_name='clv.model_export.document_item',
inverse_name='model_export_id',
string='Model Export Document Items'
)
count_model_export_document_items = fields.Integer(
string='Model Export Document Items (count)',
compute='_compute_count_model_export_document_item',
store=True
)
@api.depends('model_export_document_item_ids')
def _compute_count_model_export_document_item(self):
for r in self:
r.count_model_export_document_items = len(r.model_export_document_item_ids)
|
CLVsol/clvsol_odoo_addons
|
clv_document_export/models/model_export_document_item.py
|
Python
|
agpl-3.0
| 2,203 | 0.001362 |
# -*- coding: utf-8 -*-}
"""
A dummy executor that process Lemonade jobs and only returns fake statuses and
data. Used to test Stand clients in an integration test.
"""
import datetime
import json
import logging
import logging.config
import random
import eventlet
import socketio
from flask_script import Manager
# Logging configuration
from sqlalchemy import and_
from stand.factory import create_app, create_redis_store
from stand.models import Job, StatusExecution, db, JobStep, JobStepLog
app = create_app(log_level=logging.WARNING)
redis_store = create_redis_store(app)
manager = Manager(app)
MESSAGES = [
"The greatest discovery of my generation is that a human being can alter "
"his life by altering his attitudes of mind.",
"Human beings, by changing the inner attitudes of their minds, can change "
"the outer aspects of their lives.",
"Complaining is good for you as long as you're not complaining to the "
"person you're complaining about.",
"Education is what survives when what has been learned has been forgotten.",
"A man's character is his fate.",
"The farther behind I leave the past, the closer I am to forging my own "
"character.",
"All our dreams can come true, if we have the courage to pursue them.",
"Always remember that you are absolutely unique. Just like everyone else. ",
"A woman's mind is cleaner than a man's: She changes it more often. ",
"I can resist everything except temptation. "
]
@manager.command
def simulate():
logging.config.fileConfig('logging_config.ini')
logger = logging.getLogger(__name__)
# ap = argparse.ArgumentParser()
# ap.add_argument('-c', '')
mgr = socketio.RedisManager(app.config.get('REDIS_URL'), 'job_output')
statuses = [StatusExecution.RUNNING,
# StatusExecution.CANCELED, StatusExecution.ERROR,
# StatusExecution.PENDING, StatusExecution.INTERRUPTED,
StatusExecution.WAITING, StatusExecution.COMPLETED]
while True:
try:
_, job_json = redis_store.blpop('queue_start')
job = json.loads(job_json)
logger.debug('Simulating workflow %s with job %s',
job.get('workflow_id'), job.get('job_id'))
eventlet.sleep(3)
for k in ['job_id']:
if k in job:
logger.info('Room for %s', k)
room = str(job[k])
mgr.emit('update job',
data={'message': random.choice(MESSAGES),
'status': StatusExecution.RUNNING,
'id': job['workflow_id']},
room=room, namespace="/stand")
job_entity = Job.query.get(job.get('job_id'))
job_entity.status = StatusExecution.RUNNING
job_entity.finished = datetime.datetime.utcnow()
db.session.add(job_entity)
db.session.commit()
for task in job.get('workflow', {}).get('tasks', []):
if task['operation']['id'] == 25: # comment
continue
job_step_entity = JobStep.query.filter(and_(
JobStep.job_id == job.get('job_id'),
JobStep.task_id == task['id'])).first()
# Updates task in database
try:
job_step_entity.status = StatusExecution.RUNNING
job_step_entity.logs.append(JobStepLog(
level='WARNING', date=datetime.datetime.now(),
message=random.choice(MESSAGES)))
db.session.add(job_step_entity)
db.session.commit()
except Exception as ex:
logger.error(ex)
for k in ['job_id']:
if k in job:
logger.info('Room for %s and task %s', k,
task.get('id'))
room = str(job[k])
mgr.emit('update task',
data={'message': random.choice(MESSAGES),
'status': random.choice(statuses[:-2]),
'id': task.get('id')}, room=room,
namespace="/stand")
eventlet.sleep(random.randint(2, 5))
for k in ['job_id']:
if k in job:
room = str(job[k])
mgr.emit('update task',
data={'message': random.choice(MESSAGES),
'status': StatusExecution.COMPLETED,
'id': task.get('id')}, room=room,
namespace="/stand")
# Updates task in database
try:
# Visualizations
if task['operation']['id'] in [35, 68, 69, 70, 71]:
# import pdb
# pdb.set_trace()
for k in ['job_id']:
room = str(job[k])
mgr.emit('task result',
data={'msg': 'Result generated',
'status': StatusExecution.COMPLETED,
'id': task['id'],
'task': {'id': task['id']},
'title': 'Table with results',
'type': 'VISUALIZATION',
'operation': {
'id': task['operation']['id']},
'operation_id':
task['operation']['id']},
room=room,
namespace="/stand")
#
# result = JobResult(task_id=task['id'],
# title="Table with results",
# operation_id=task['operation']['id'],
# type=ResultType.VISUALIZATION, )
# logger.info('Result created for job %s', job['job_id'])
# job_entity.results.append(result)
job_step_entity.status = StatusExecution.COMPLETED
job_step_entity.logs.append(JobStepLog(
level='WARNING', date=datetime.datetime.now(),
message=random.choice(MESSAGES)))
db.session.add(job_step_entity)
except Exception as ex:
logger.error(ex)
# eventlet.sleep(5)
for k in ['job_id']:
if k in job:
logger.info('Room for %s', k)
room = str(job[k])
mgr.emit('update job',
data={'message': random.choice(MESSAGES),
'status': StatusExecution.COMPLETED,
'finished': job_entity.finished.isoformat(),
'id': job['job_id']},
room=room, namespace="/stand")
if job_entity:
job_entity.status = StatusExecution.COMPLETED
db.session.add(job_entity)
db.session.commit()
except KeyError as ke:
logger.error('Invalid json? KeyError: %s', ke)
raise
except Exception as ex:
logger.error(ex.message)
raise
logger.info('Simulation finished')
if __name__ == "__main__":
manager.run()
|
eubr-bigsea/stand
|
stand/util/dummy_executor.py
|
Python
|
apache-2.0
| 7,975 | 0.000502 |
# -*- Mode: python; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
#
# Copyright (C) 2012 - fossfreedom
# Copyright (C) 2012 - Agustin Carrasco
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
from gi.repository import GObject
from gi.repository import GLib
from coverart_widgets import AbstractView
class ListShowingPolicy(GObject.Object):
'''
Policy that mostly takes care of how and when things should be showed on
the view that makes use of the `AlbumsModel`.
'''
def __init__(self, list_view):
super(ListShowingPolicy, self).__init__()
self.counter = 0
self._has_initialised = False
def initialise(self, album_manager):
if self._has_initialised:
return
self._has_initialised = True
class ListView(AbstractView):
__gtype_name__ = "ListView"
name = 'listview'
use_plugin_window = False
def __init__(self):
super(ListView, self).__init__()
self.view = self
self._has_initialised = False
self.show_policy = ListShowingPolicy(self)
def initialise(self, source):
if self._has_initialised:
return
self._has_initialised = True
self.view_name = "list_view"
super(ListView, self).initialise(source)
# self.album_manager = source.album_manager
self.shell = source.shell
def switch_to_view(self, source, album):
self.initialise(source)
GLib.idle_add(self.shell.props.display_page_tree.select,
self.shell.props.library_source)
def get_selected_objects(self):
'''
finds what has been selected
returns an array of `Album`
'''
return []
|
fossfreedom/coverart-browser
|
coverart_listview.py
|
Python
|
gpl-3.0
| 2,367 | 0 |
letras=[]
i=1
while i <= 10:
letras.append(input("letras : "))
i+=1
i=0
cont=0
while i <=9:
if letras[i] not in 'aeiou':
cont+=1
i+=1
print("foram lidos %d consoantes" %cont)
|
andersonsilvade/python_C
|
Python32/aulas/letras.py
|
Python
|
mit
| 199 | 0.045226 |
#!/cshome/pranjali/410/CMPUT410-Lab6-Django/v1/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
pranner/CMPUT410-Lab6-Django
|
v1/bin/django-admin.py
|
Python
|
apache-2.0
| 164 | 0 |
# -*- coding: utf8 -*-
#
# 国家统计局 行政区域代码 文本:
#
# http://www.stats.gov.cn/tjsj/tjbz/xzqhdm/
#
from __future__ import print_function
from collections import OrderedDict
class Area(object):
level = None
def __init__(self, code, name, parent=None):
'''use unicode name'''
self.code = int(code)
try:
self.name = name.decode('utf8')
except UnicodeEncodeError:
self.name = name
self.parent = parent
def __unicode__(self):
return u'%6d %1d %6d %s' % (self.code,
self.level, self.parent.code, self.name)
def __str__(self):
return unicode(self).encode('utf8')
class ContainerArea(Area):
sub_unit_kls = None
def __init__(self, code, name, parent=None):
super(ContainerArea, self).__init__(code, name, parent)
self.subunits = OrderedDict()
def add_sub_unit(self, code, name):
code = int(code)
unit = self.sub_unit_kls(code, name, self)
self.subunits[code] = unit
def get_sub_unit(self, code):
code = int(code)
return self.subunits[code]
def __unicode__(self):
ret = []
if self.name == u'市辖区':
ret.append(u'%6d %1d %6d %s' % (self.code,
self.level, self.parent.code, self.parent.name))
elif self.name != u'县':
ret.append(u'%6d %1d %6d %s' % (self.code,
self.level, self.parent.code, self.name))
for code, unit in self.subunits.items():
ret.append(unicode(unit))
return u'\n'.join(ret)
class County(Area):
level = 3
class City(ContainerArea):
level = 2
sub_unit_kls = County
class Province(ContainerArea):
level = 1
sub_unit_kls = City
class Nation(ContainerArea):
sub_unit_kls = Province
def __init__(self):
super(Nation, self).__init__(0, u'中国', None)
def load(self, istream):
for line in istream:
self.parse_entry(line)
def parse_entry(self, line):
line = line.strip()
# ignore comments
if not line or line.startswith('#'):
return
code, name = line.split()
prov_part = int(code[:2])
city_part = int(code[2:4])
county_part = int(code[4:])
if county_part == 0:
if city_part == 0: # province
self.add_sub_unit(code, name)
else: # city
prov_code = prov_part * 10000
prov = self.get_sub_unit(prov_code)
prov.add_sub_unit(code, name)
else:
prov_code = prov_part * 10000
prov = self.get_sub_unit(prov_code)
city_code = prov_code + city_part * 100
city = prov.get_sub_unit(city_code)
city.add_sub_unit(code, name)
def __unicode__(self):
return u'\n'.join([unicode(u) for u in self.subunits.values()])
if __name__ == '__main__':
with open('nation.txt') as f:
cn = Nation()
cn.load(f)
print(cn)
|
dlutxx/memo
|
data/nation_parse.py
|
Python
|
mit
| 3,087 | 0.000984 |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import numpy as NP
import matplotlib.pyplot as plt
# Declare variables (use simple, efficient DAG)
x0=SX.sym("x0"); x1=SX.sym("x1")
x = vertcat((x0,x1))
# Control
u = SX.sym("u")
# ODE right hand side
xdot = vertcat([(1 - x1*x1)*x0 - x1 + u, x0])
# Lagrangian function
L = x0*x0 + x1*x1 + u*u
# Costate
lam = SX.sym("lam",2)
# Hamiltonian function
H = inner_prod(lam,xdot) + L
# Costate equations
ldot = -gradient(H,x)
## The control must minimize the Hamiltonian, which is:
print "Hamiltonian: ", H
# H is of a convex quadratic form in u: H = u*u + p*u + q, let's get the coefficient p
p = gradient(H,u) # this gives us 2*u + p
p = substitute(p,u,0) # replace u with zero: gives us p
# H's unconstrained minimizer is: u = -p/2
u_opt = -p/2
# We must constrain u to the interval [-0.75, 1.0], convexity of H ensures that the optimum is obtain at the bound when u_opt is outside the interval
u_opt = min(u_opt,1.0)
u_opt = max(u_opt,-0.75)
print "optimal control: ", u_opt
# Augment f with lam_dot and substitute in the value for the optimal control
f = vertcat((xdot,ldot))
f = substitute(f,u,u_opt)
# Create the right hand side function
rhs_in = daeIn(x=vertcat((x,lam)))
rhs = SXFunction(rhs_in,daeOut(ode=f))
# Create an integrator (CVodes)
I = Integrator("cvodes", rhs)
I.setOption("abstol",1e-8) # abs. tolerance
I.setOption("reltol",1e-8) # rel. tolerance
I.setOption("t0",0.0)
I.setOption("tf",10.0)
I.init()
# The initial state
x_init = NP.array([0.,1.])
# The initial costate
l_init = MX.sym("l_init",2)
# The initial condition for the shooting
X = vertcat((x_init,l_init))
# Call the integrator
X, = integratorOut(I.call(integratorIn(x0=X)),"xf")
# Costate at the final time should be zero (cf. Bryson and Ho)
lam_f = X[2:4]
g = lam_f
# Formulate root-finding problem
rfp = MXFunction([l_init],[g])
# Select a solver for the root-finding problem
Solver = "nlp"
#Solver = "newton"
#Solver = "kinsol"
# Allocate an implict solver
solver = ImplicitFunction(Solver, rfp)
if Solver=="nlp":
solver.setOption("nlp_solver", "ipopt")
solver.setOption("nlp_solver_options",{"hessian_approximation":"limited-memory"})
elif Solver=="newton":
solver.setOption("linear_solver",CSparse)
elif Solver=="kinsol":
solver.setOption("linear_solver_type","user_defined")
solver.setOption("linear_solver",CSparse)
solver.setOption("max_iter",1000)
# Initialize the solver
solver.init()
# Pass initial guess
#solver.setInput([ 0, 0], "x0")
# Solve the problem
solver.evaluate()
# Retrieve the optimal solution
l_init_opt = NP.array(solver.output().data())
# Time grid for visualization
tgrid = NP.linspace(0,10,100)
# Output functions
output_fcn = SXFunction(rhs_in,[x0,x1,u_opt])
# Simulator to get optimal state and control trajectories
simulator = Simulator(I, output_fcn, tgrid)
simulator.init()
# Pass initial conditions to the simulator
simulator.setInput(NP.concatenate((x_init,l_init_opt)),"x0")
# Simulate to get the trajectories
simulator.evaluate()
# Get optimal control
x_opt = simulator.getOutput(0).T
y_opt = simulator.getOutput(1).T
u_opt = simulator.getOutput(2).T
# Plot the results
plt.figure(1)
plt.clf()
plt.plot(tgrid,x_opt,'--')
plt.plot(tgrid,y_opt,'-')
plt.plot(tgrid,u_opt,'-.')
plt.title("Van der Pol optimization - indirect single shooting")
plt.xlabel('time')
plt.legend(['x trajectory','y trajectory','u trajectory'])
plt.grid()
plt.show()
|
ghorn/debian-casadi
|
docs/examples/python/vdp_indirect_single_shooting.py
|
Python
|
lgpl-3.0
| 4,479 | 0.013842 |
from tests.unit.dataactcore.factories.staging import AppropriationFactory
from tests.unit.dataactcore.factories.domain import SF133Factory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'a26_appropriations'
_TAS = 'a26_appropriations_tas'
def test_column_headers(database):
expected_subset = {'row_number', 'contract_authority_amount_cpe',
'lines', 'amounts'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that ContractAuthorityAmountTotal_CPE is provided if TAS has contract authority value
provided in GTAS """
tas = "".join([_TAS, "_success"])
sf1 = SF133Factory(tas=tas, period=1, fiscal_year=2016, line=1540, amount=1)
sf2 = SF133Factory(tas=tas, period=1, fiscal_year=2016, line=1640, amount=1)
ap = AppropriationFactory(tas=tas, contract_authority_amount_cpe=1)
assert number_of_errors(_FILE, database, models=[sf1, sf2, ap]) == 0
def test_failure(database):
""" Tests that ContractAuthorityAmountTotal_CPE is not provided if TAS has contract authority value
provided in GTAS """
tas = "".join([_TAS, "_failure"])
sf1 = SF133Factory(tas=tas, period=1, fiscal_year=2016, line=1540, amount=1)
sf2 = SF133Factory(tas=tas, period=1, fiscal_year=2016, line=1640, amount=1)
ap = AppropriationFactory(tas=tas, contract_authority_amount_cpe=0)
assert number_of_errors(_FILE, database, models=[sf1, sf2, ap]) == 1
|
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend
|
tests/unit/dataactvalidator/test_a26_appropriations.py
|
Python
|
cc0-1.0
| 1,546 | 0.003881 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import os
import time
import urllib, urllib2
import xml
from conary import conarycfg, versions
from conary.repository import errors
from conary.lib import digestlib, sha1helper, tracelog
from conary.dbstore import sqlerrors
from conary.repository.netrepos import items, versionops, accessmap
from conary.server.schema import resetTable
# FIXME: remove these compatibilty error classes later
UserAlreadyExists = errors.UserAlreadyExists
GroupAlreadyExists = errors.GroupAlreadyExists
MAX_ENTITLEMENT_LENGTH = 255
nameCharacterSet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-\\@'
class UserAuthorization:
def __init__(self, db, pwCheckUrl = None, cacheTimeout = None):
self.db = db
self.pwCheckUrl = pwCheckUrl
self.cacheTimeout = cacheTimeout
self.pwCache = {}
def addUserByMD5(self, cu, user, salt, password):
for letter in user:
if letter not in nameCharacterSet:
raise errors.InvalidName(user)
try:
cu.execute("INSERT INTO Users (userName, salt, password) "
"VALUES (?, ?, ?)",
(user, cu.binary(salt), cu.binary(password)))
uid = cu.lastrowid
except sqlerrors.ColumnNotUnique:
raise errors.UserAlreadyExists, 'user: %s' % user
# make sure we don't conflict with another entry based on case; this
# avoids races from other processes adding case differentiated
# duplicates
cu.execute("SELECT userId FROM Users WHERE LOWER(userName)=LOWER(?)",
user)
if len(cu.fetchall()) > 1:
raise errors.UserAlreadyExists, 'user: %s' % user
return uid
def changePassword(self, cu, user, salt, password):
if self.pwCheckUrl:
raise errors.CannotChangePassword
cu.execute("UPDATE Users SET password=?, salt=? WHERE userName=?",
cu.binary(password), cu.binary(salt), user)
def _checkPassword(self, user, salt, password, challenge, remoteIp = None):
if challenge is ValidPasswordToken:
# Short-circuit for shim-using code that does its own
# authentication, e.g. through one-time tokens or session
# data.
return True
if self.cacheTimeout:
cacheEntry = sha1helper.sha1String("%s%s" % (user, challenge))
timeout = self.pwCache.get(cacheEntry, None)
if timeout is not None and time.time() < timeout:
return True
if self.pwCheckUrl:
try:
url = "%s?user=%s;password=%s" \
% (self.pwCheckUrl, urllib.quote(user),
urllib.quote(challenge))
if remoteIp is not None:
url += ';remote_ip=%s' % urllib.quote(remoteIp)
f = urllib2.urlopen(url)
xmlResponse = f.read()
except:
return False
p = PasswordCheckParser()
p.parse(xmlResponse)
isValid = p.validPassword()
else:
m = digestlib.md5()
m.update(salt)
m.update(challenge)
isValid = m.hexdigest() == password
if isValid and self.cacheTimeout:
# cacheEntry is still around from above
self.pwCache[cacheEntry] = time.time() + self.cacheTimeout
return isValid
def deleteUser(self, cu, user):
userId = self.getUserIdByName(user)
# First delete the user from all the groups
sql = "DELETE from UserGroupMembers WHERE userId=?"
cu.execute(sql, userId)
# Now delete the user itself
sql = "DELETE from Users WHERE userId=?"
cu.execute(sql, userId)
def getAuthorizedRoles(self, cu, user, password, allowAnonymous = True,
remoteIp = None):
"""
Given a user and password, return the list of roles that are
authorized via these credentials
"""
if isinstance(user, ValidUser):
# Short-circuit for shim-using code that knows what roles
# it wants.
roles = set()
if '*' in user.roles:
cu.execute("SELECT userGroupId FROM UserGroups")
else:
roles = set([x for x in user.roles if isinstance(x, int)])
names = set([x for x in user.roles if not isinstance(x, int)])
if not names:
return roles
places = ', '.join('?' for x in names)
cu.execute("""SELECT userGroupId FROM UserGroups
WHERE userGroup IN ( %s )""" % (places,), *names)
roles.update(x[0] for x in cu)
return roles
cu.execute("""
SELECT Users.salt, Users.password, UserGroupMembers.userGroupId,
Users.userName, UserGroups.canMirror
FROM Users
JOIN UserGroupMembers USING(userId)
JOIN UserGroups USING(userGroupId)
WHERE Users.userName = ? OR Users.userName = 'anonymous'
""", user)
result = [ x for x in cu ]
if not result:
return set()
canMirror = (sum(x[4] for x in result) > 0)
# each user can only appear once (by constraint), so we only
# need to validate the password once. we don't validate the
# password for 'anonymous'. Using a bad password still allows
# anonymous access
userPasswords = [ x for x in result if x[3] != 'anonymous' ]
# mirror users do not have an anonymous fallback
if userPasswords and canMirror:
allowAnonymous = False
if not allowAnonymous:
result = userPasswords
if userPasswords and not self._checkPassword(
user,
cu.frombinary(userPasswords[0][0]),
cu.frombinary(userPasswords[0][1]),
password, remoteIp):
result = [ x for x in result if x[3] == 'anonymous' ]
return set(x[2] for x in result)
def getRolesByUser(self, user):
cu = self.db.cursor()
cu.execute("""SELECT userGroup FROM Users
JOIN UserGroupMembers USING (userId)
JOIN UserGroups USING (userGroupId)
WHERE Users.userName = ?""", user)
return [ x[0] for x in cu ]
def getUserIdByName(self, userName):
cu = self.db.cursor()
cu.execute("SELECT userId FROM Users WHERE userName=?", userName)
ret = cu.fetchall()
if len(ret):
return ret[0][0]
raise errors.UserNotFound(userName)
def getUserList(self):
cu = self.db.cursor()
cu.execute("SELECT userName FROM Users")
return [ x[0] for x in cu ]
class EntitlementAuthorization:
def __init__(self, entCheckUrl = None, cacheTimeout = None):
self.entCheckUrl = entCheckUrl
self.cacheTimeout = cacheTimeout
self.cache = {}
def getAuthorizedRoles(self, cu, serverName, remoteIp,
entitlementClass, entitlement):
"""
Given an entitlement, return the list of roles that the
credentials authorize.
"""
cacheEntry = sha1helper.sha1String("%s%s%s" % (
serverName, entitlementClass, entitlement))
roleIds, timeout, autoRetry = \
self.cache.get(cacheEntry, (None, None, None))
if (timeout is not None) and time.time() < timeout:
return roleIds
elif (timeout is not None):
del self.cache[cacheEntry]
if autoRetry is not True:
raise errors.EntitlementTimeout([entitlement])
if self.entCheckUrl:
if entitlementClass is not None:
url = "%s?server=%s;class=%s;key=%s" \
% (self.entCheckUrl, urllib.quote(serverName),
urllib.quote(entitlementClass),
urllib.quote(entitlement))
else:
url = "%s?server=%s;key=%s" \
% (self.entCheckUrl, urllib.quote(serverName),
urllib.quote(entitlement))
if remoteIp is not None:
url += ';remote_ip=%s' % urllib.quote(remoteIp)
try:
f = urllib2.urlopen(url)
xmlResponse = f.read()
except Exception, e:
return set()
p = conarycfg.EntitlementParser()
try:
p.parse(xmlResponse)
except:
return set()
if p['server'] != serverName:
return set()
entitlementClass = p['class']
entitlement = p['key']
entitlementRetry = p['retry']
if p['timeout'] is None:
entitlementTimeout = self.cacheTimeout
else:
entitlementTimeout = p['timeout']
if entitlementTimeout is None:
entitlementTimeout = -1
# look up entitlements
cu.execute("""
SELECT userGroupId FROM Entitlements
JOIN EntitlementAccessMap USING (entGroupId)
WHERE entitlement=?
""", entitlement)
roleIds = set(x[0] for x in cu)
if self.entCheckUrl:
# cacheEntry is still set from the cache check above
self.cache[cacheEntry] = (roleIds,
time.time() + entitlementTimeout,
entitlementRetry)
return roleIds
class NetworkAuthorization:
def __init__(self, db, serverNameList, cacheTimeout = None, log = None,
passwordURL = None, entCheckURL = None):
"""
@param cacheTimeout: Timeout, in seconds, for authorization cache
entries. If None, no cache is used.
@type cacheTimeout: int
@param passwordURL: URL base to use for an http get request to
externally validate user passwords. When this is specified, the
passwords int the local database are ignored, and the changePassword()
call is disabled.
@param entCheckURL: URL base for mapping an entitlement received
over the network to an entitlement to check for in the database.
"""
self.serverNameList = serverNameList
self.db = db
self.log = log or tracelog.getLog(None)
self.userAuth = UserAuthorization(
self.db, passwordURL, cacheTimeout = cacheTimeout)
self.entitlementAuth = EntitlementAuthorization(
cacheTimeout = cacheTimeout, entCheckUrl = entCheckURL)
self.items = items.Items(db)
self.ri = accessmap.RoleInstances(db)
def getAuthRoles(self, cu, authToken, allowAnonymous = True):
self.log(4, authToken[0], authToken[2])
# Find what role(s) this user belongs to
# anonymous users should come through as anonymous, not None
assert(authToken[0])
# we need a hashable tuple, a list won't work
authToken = tuple(authToken)
if type(authToken[2]) is not list:
# this code is for compatibility with old callers who
# form up an old (user, pass, entclass, entkey) authToken.
# rBuilder is one such caller.
entList = []
entClass = authToken[2]
entKey = authToken[3]
if entClass is not None and entKey is not None:
entList.append((entClass, entKey))
remoteIp = None
elif len(authToken) == 3:
entList = authToken[2]
remoteIp = None
else:
entList = authToken[2]
remoteIp = authToken[3]
roleSet = self.userAuth.getAuthorizedRoles(
cu, authToken[0], authToken[1],
allowAnonymous = allowAnonymous,
remoteIp = remoteIp)
timedOut = []
for entClass, entKey in entList:
# XXX serverName is passed only for compatibility with the server
# and entitlement class based entitlement design; it's only used
# here during external authentication (used by some rPath
# customers)
try:
rolesFromEntitlement = \
self.entitlementAuth.getAuthorizedRoles(
cu, self.serverNameList[0], remoteIp,
entClass, entKey)
roleSet.update(rolesFromEntitlement)
except errors.EntitlementTimeout, e:
timedOut += e.getEntitlements()
if timedOut:
raise errors.EntitlementTimeout(timedOut)
return roleSet
def batchCheck(self, authToken, troveList, write = False, cu = None):
""" checks access permissions for a set of *existing* troves in the repository """
# troveTupList is a list of (name, VFS) tuples
self.log(3, authToken[0], "entitlements=%s write=%s" %(authToken[2], int(bool(write))),
troveList)
# process/check the troveList, which can be an iterator
checkList = []
for i, (n,v,f) in enumerate(troveList):
h = versions.VersionFromString(v).getHost()
if h not in self.serverNameList:
raise errors.RepositoryMismatch(self.serverNameList, h)
checkList.append((i,n,v,f))
# default to all failing
retlist = [ False ] * len(checkList)
if not authToken[0]:
return retlist
# check groupIds
if cu is None:
cu = self.db.cursor()
try:
groupIds = self.getAuthRoles(cu, authToken)
except errors.InsufficientPermission:
return retlist
if not len(groupIds):
return retlist
resetTable(cu, "tmpNVF")
self.db.bulkload("tmpNVF", checkList, ["idx","name","version", "flavor"],
start_transaction=False)
self.db.analyze("tmpNVF")
writeCheck = ''
if write:
writeCheck = "and ugi.canWrite = 1"
cu.execute("""
select t.idx, i.instanceId
from tmpNVF as t
join Items on t.name = Items.item
join Versions on t.version = Versions.version
join Flavors on t.flavor = Flavors.flavor
join Instances as i on
i.itemId = Items.itemId and
i.versionId = Versions.versionId and
i.flavorId = Flavors.flavorId
join UserGroupInstancesCache as ugi on i.instanceId = ugi.instanceId
where ugi.userGroupId in (%s)
%s""" % (",".join("%d" % x for x in groupIds), writeCheck) )
for i, instanceId in cu:
retlist[i] = True
return retlist
def commitCheck(self, authToken, nameVersionList):
""" checks that we can commit to a list of (name, version) tuples """
self.log(3, authToken[0], "entitlements=%s" % (authToken[2],), nameVersionList)
checkDict = {}
# nameVersionList can actually be an iterator, so we need to keep
# a list of the trove names we're dealing with
troveList = []
# first check that we handle all the labels we're asked about
for i, (n, v) in enumerate(nameVersionList):
label = v.branch().label()
if label.getHost() not in self.serverNameList:
raise errors.RepositoryMismatch(self.serverNameList, label.getHost())
l = checkDict.setdefault(label.asString(), set())
troveList.append(n)
l.add(i)
# default to all failing
retlist = [ False ] * len(troveList)
if not authToken[0]:
return retlist
# check groupIds. this is the same as the self.check() function
cu = self.db.cursor()
try:
groupIds = self.getAuthRoles(cu, authToken)
except errors.InsufficientPermission:
return retlist
if not len(groupIds):
return retlist
# build the query statement for permissions check
stmt = """
select Items.item
from Permissions join Items using (itemId)
"""
where = ["Permissions.canWrite=1"]
where.append("Permissions.userGroupId IN (%s)" %
",".join("%d" % x for x in groupIds))
if len(checkDict):
where.append("""(
Permissions.labelId = 0 OR
Permissions.labelId in (select labelId from Labels where label=?)
)""")
stmt += "WHERE " + " AND ".join(where)
# we need to test for each label separately in case we have
# mutiple troves living of multiple lables with different
# permission settings
for label in checkDict.iterkeys():
cu.execute(stmt, label)
patterns = [ x[0] for x in cu ]
for i in checkDict[label]:
for pattern in patterns:
if self.checkTrove(pattern, troveList[i]):
retlist[i] = True
break
return retlist
# checks for group-wide permissions like admin and mirror
def authCheck(self, authToken, admin=False, mirror=False):
self.log(3, authToken[0],
"entitlements=%s admin=%s mirror=%s" %(
authToken[2], int(bool(admin)), int(bool(mirror)) ))
if not authToken[0]:
return False
cu = self.db.cursor()
try:
groupIds = self.getAuthRoles(cu, authToken)
except errors.InsufficientPermission:
return False
if len(groupIds) < 1:
return False
cu.execute("select canMirror, admin from UserGroups "
"where userGroupId in (%s)" %(
",".join("%d" % x for x in groupIds)))
hasAdmin = False
hasMirror = False
for mirrorBit, adminBit in cu.fetchall():
if admin and adminBit:
hasAdmin = True
if mirror and (mirrorBit or adminBit):
hasMirror = True
admin = (not admin) or (admin and hasAdmin)
mirror = (not mirror) or (mirror and hasMirror)
return admin and mirror
def checkPassword(self, authToken):
cu = self.db.cursor()
user = authToken[0]
password = authToken[1]
cu.execute('SELECT salt, password FROM Users WHERE userName=?', user)
rows = cu.fetchall()
if not len(rows):
return False
salt, challenge = rows[0]
salt = cu.frombinary(salt)
challenge = cu.frombinary(challenge)
return self.userAuth._checkPassword(user, salt, challenge, password)
# a simple call to auth.check(authToken) checks that the role
# has an entry into the Permissions table - questionable
# usefullness since we can't check that permission against the
# label or the troves
def check(self, authToken, write = False, label = None,
trove = None, remove = False, allowAnonymous = True):
self.log(3, authToken[0],
"entitlements=%s write=%s label=%s trove=%s remove=%s" %(
authToken[2], int(bool(write)), label, trove, int(bool(remove))))
if label and label.getHost() not in self.serverNameList:
raise errors.RepositoryMismatch(self.serverNameList, label.getHost())
if not authToken[0]:
return False
cu = self.db.cursor()
try:
groupIds = self.getAuthRoles(cu, authToken,
allowAnonymous = allowAnonymous)
except errors.InsufficientPermission:
return False
if len(groupIds) < 1:
return False
elif not label and not trove and not remove and not write:
# no more checks to do -- the authentication information is valid
return True
stmt = """
select Items.item
from Permissions join items using (itemId)
"""
params = []
where = []
if len(groupIds):
where.append("Permissions.userGroupId IN (%s)" %
",".join("%d" % x for x in groupIds))
if label:
where.append("""
(
Permissions.labelId = 0 OR
Permissions.labelId in
( select labelId from Labels where Labels.label = ? )
)
""")
params.append(label.asString())
if write:
where.append("Permissions.canWrite=1")
if remove:
where.append("Permissions.canRemove=1")
if where:
stmt += "WHERE " + " AND ".join(where)
self.log(4, stmt, params)
cu.execute(stmt, params)
for (pattern,) in cu:
if self.checkTrove(pattern, trove):
return True
return False
def checkTrove(self, pattern, trove):
return items.checkTrove(pattern, trove)
def addAcl(self, role, trovePattern, label, write = False,
remove = False):
self.log(3, role, trovePattern, label, write, remove)
cu = self.db.cursor()
# these need to show up as 0/1 regardless of what we pass in
write = int(bool(write))
remove = int(bool(remove))
if trovePattern:
itemId = self.items.addPattern(trovePattern)
else:
itemId = 0
# XXX This functionality is available in the TroveStore class
# refactor so that the code is not in two places
if label:
cu.execute("SELECT * FROM Labels WHERE label=?", label)
labelId = cu.fetchone()
if labelId:
labelId = labelId[0]
else:
cu.execute("INSERT INTO Labels (label) VALUES(?)", label)
labelId = cu.lastrowid
else:
labelId = 0
roleId = self._getRoleIdByName(role)
try:
cu.execute("""
INSERT INTO Permissions
(userGroupId, labelId, itemId, canWrite, canRemove)
VALUES (?, ?, ?, ?, ?)""", (
roleId, labelId, itemId, write, remove))
permissionId = cu.lastrowid
except sqlerrors.ColumnNotUnique:
self.db.rollback()
raise errors.PermissionAlreadyExists, "labelId: '%s', itemId: '%s'" %(
labelId, itemId)
self.ri.addPermissionId(permissionId, roleId)
self.db.commit()
def editAcl(self, role, oldTroveId, oldLabelId, troveId, labelId,
write = False, canRemove = False):
self.log(3, role, (oldTroveId, oldLabelId), (troveId, labelId),
write, canRemove)
cu = self.db.cursor()
roleId = self._getRoleIdByName(role)
# these need to show up as 0/1 regardless of what we pass in
write = int(bool(write))
canRemove = int(bool(canRemove))
# find out what permission we're changing
cu.execute("""
select permissionId from Permissions
where userGroupId = ? and labelId = ? and itemId = ?""",
(roleId, oldLabelId, oldTroveId))
ret = cu.fetchall()
if not ret: # noop, nothing clear to do
return
permissionId = ret[0][0]
try:
cu.execute("""
UPDATE Permissions
SET labelId = ?, itemId = ?, canWrite = ?, canRemove = ?
WHERE permissionId = ?""", (labelId, troveId, write, canRemove, permissionId))
except sqlerrors.ColumnNotUnique:
self.db.rollback()
raise errors.PermissionAlreadyExists, "labelId: '%s', itemId: '%s'" %(
labelId, troveId)
if oldLabelId != labelId or oldTroveId != troveId:
# a permission has changed the itemId or the labelId...
self.ri.updatePermissionId(permissionId, roleId)
else: # just set the new canWrite flag
self.ri.updateCanWrite(permissionId, roleId)
self.db.commit()
def deleteAcl(self, role, label, item):
self.log(3, role, label, item)
# check the validity of the role
roleId = self._getRoleIdByName(role)
if item is None: item = 'ALL'
if label is None: label = 'ALL'
cu = self.db.cursor()
# lock the Permissions records we are about to delete. This is
# a crude hack for sqlite's lack of "select for update"
cu.execute("""
update Permissions set canWrite=0, canRemove=0
where userGroupId = ?
and labelId = (select labelId from Labels where label=?)
and itemId = (select itemId from Items where item=?)
""", (roleId, label, item))
cu.execute("""
select permissionId from Permissions
where userGroupId = ?
and labelId = (select labelId from Labels where label=?)
and itemId = (select itemId from Items where item=?)
""", (roleId, label, item))
for permissionId, in cu.fetchall():
self.ri.deletePermissionId(permissionId, roleId)
cu.execute("delete from Permissions where permissionId = ?",
permissionId)
self.db.commit()
def addUser(self, user, password):
self.log(3, user)
salt = os.urandom(4)
m = digestlib.md5()
m.update(salt)
m.update(password)
self.addUserByMD5(user, salt, m.hexdigest())
def roleIsAdmin(self, role):
cu = self.db.cursor()
cu.execute("SELECT admin FROM UserGroups WHERE userGroup=?",
role)
ret = cu.fetchall()
if len(ret):
return ret[0][0]
raise errors.RoleNotFound
def roleCanMirror(self, role):
cu = self.db.cursor()
cu.execute("SELECT canMirror FROM UserGroups WHERE userGroup=?",
role)
ret = cu.fetchall()
if len(ret):
return ret[0][0]
raise errors.RoleNotFound
def setAdmin(self, role, admin):
self.log(3, role, admin)
cu = self.db.transaction()
cu.execute("UPDATE userGroups SET admin=? WHERE userGroup=?",
(int(bool(admin)), role))
self.db.commit()
def setUserRoles(self, userName, roleList):
cu = self.db.cursor()
userId = self.userAuth.getUserIdByName(userName)
cu.execute("""DELETE FROM userGroupMembers WHERE userId=?""", userId)
for role in roleList:
self.addRoleMember(role, userName, commit = False)
self.db.commit()
def setMirror(self, role, canMirror):
self.log(3, role, canMirror)
cu = self.db.transaction()
cu.execute("UPDATE userGroups SET canMirror=? WHERE userGroup=?",
(int(bool(canMirror)), role))
self.db.commit()
def _checkValidName(self, name):
for letter in name:
if letter not in nameCharacterSet:
raise errors.InvalidName(name)
def addUserByMD5(self, user, salt, password):
self.log(3, user)
self._checkValidName(user)
cu = self.db.transaction()
try:
uid = self.userAuth.addUserByMD5(cu, user, salt, password)
except:
self.db.rollback()
raise
else:
self.db.commit()
def deleteUserByName(self, user, deleteRole=True):
self.log(3, user)
cu = self.db.cursor()
if deleteRole:
# for historical reasons:
# - if the role of the same name exists
# - and the role is empty or the user is the sole member
# - and the role doesn't have any special permissions
# - and the role doesn't have any acls
# then we attempt to delete it as well
cu.execute("""
select sum(c) from (
select count(*) as c from UserGroups
where userGroup = :user and admin + canMirror > 0
union
select count(*) as c from Users
join UserGroupMembers using(userId)
join UserGroups using(userGroupId) where userGroup = :user and userName != :user
union
select count(*) as c from Permissions
join UserGroups using(userGroupId) where userGroup = :user
union
select count(*) as c from UserGroupTroves
join UserGroups using(userGroupId) where userGroup = :user
) as counters """, {"user": user})
# a !0 sum means this role can't be deleted
if cu.fetchone()[0] == 0:
try:
self.deleteRole(user, False)
except errors.RoleNotFound, e:
pass
self.userAuth.deleteUser(cu, user)
self.db.commit()
def changePassword(self, user, newPassword):
self.log(3, user)
salt = os.urandom(4)
m = digestlib.md5()
m.update(salt)
m.update(newPassword)
cu = self.db.cursor()
self.userAuth.changePassword(cu, user, salt, m.hexdigest())
self.db.commit()
def getRoles(self, user):
cu = self.db.cursor()
cu.execute("""SELECT UserGroups.userGroup
FROM UserGroups, Users, UserGroupMembers
WHERE UserGroups.userGroupId = UserGroupMembers.userGroupId AND
UserGroupMembers.userId = Users.userId AND
Users.userName = ?""", user)
return [row[0] for row in cu]
def getRoleList(self):
cu = self.db.cursor()
cu.execute("SELECT userGroup FROM UserGroups")
return [ x[0] for x in cu ]
def getRoleMembers(self, role):
cu = self.db.cursor()
cu.execute("""SELECT Users.userName FROM UserGroups
JOIN UserGroupMembers USING (userGroupId)
JOIN Users USING (userId)
WHERE userGroup = ? """, role)
return [ x[0] for x in cu ]
def _queryPermsByRole(self, role):
cu = self.db.cursor()
cu.execute("""SELECT Labels.label,
PerItems.item,
canWrite, canRemove
FROM UserGroups
JOIN Permissions USING (userGroupId)
LEFT OUTER JOIN Items AS PerItems ON
PerItems.itemId = Permissions.itemId
LEFT OUTER JOIN Labels ON
Permissions.labelId = Labels.labelId
WHERE userGroup=?""", role)
return cu
def iterPermsByRole(self, role):
cu = self._queryPermsByRole(role)
for row in cu:
yield row
def getPermsByRole(self, roleName):
cu = self._queryPermsByRole(roleName)
results = cu.fetchall()
# reconstruct the dictionary of values (because some
# database engines like PostgreSQL lowercase all column names)
l = []
for result in results:
d = {}
for key in ('label', 'item', 'canWrite', 'canRemove'):
d[key] = result[key]
l.append(d)
return l
def _getRoleIdByName(self, role):
cu = self.db.cursor()
cu.execute("SELECT userGroupId FROM UserGroups WHERE userGroup=?",
role)
ret = cu.fetchall()
if len(ret):
return ret[0][0]
raise errors.RoleNotFound
def _checkDuplicates(self, cu, role):
# check for case insensitive user conflicts -- avoids race with
# other adders on case-differentiated names
cu.execute("SELECT userGroupId FROM UserGroups "
"WHERE LOWER(UserGroup)=LOWER(?)", role)
if len(cu.fetchall()) > 1:
# undo our insert
self.db.rollback()
raise errors.RoleAlreadyExists('role: %s' % role)
def addRole(self, role):
self.log(3, role)
self._checkValidName(role)
cu = self.db.transaction()
try:
cu.execute("INSERT INTO UserGroups (userGroup) VALUES (?)", role)
ugid = cu.lastrowid
except sqlerrors.ColumnNotUnique:
self.db.rollback()
raise errors.RoleAlreadyExists, "role: %s" % role
self._checkDuplicates(cu, role)
self.db.commit()
return ugid
def renameRole(self, oldRole, newRole):
cu = self.db.cursor()
if oldRole == newRole:
return True
try:
cu.execute("UPDATE UserGroups SET userGroup=? WHERE userGroup=?",
(newRole, oldRole))
except sqlerrors.ColumnNotUnique:
self.db.rollback()
raise errors.RoleAlreadyExists("role: %s" % newRole)
self._checkDuplicates(cu, newRole)
self.db.commit()
return True
def updateRoleMembers(self, role, members):
#Do this in a transaction
cu = self.db.cursor()
roleId = self._getRoleIdByName(role)
#First drop all the current members
cu.execute ("DELETE FROM UserGroupMembers WHERE userGroupId=?", roleId)
#now add the new members
for userName in members:
self.addRoleMember(role, userName, commit=False)
self.db.commit()
def addRoleMember(self, role, userName, commit = True):
cu = self.db.cursor()
# we do this in multiple select to let us generate the proper
# exceptions when the names don't xist
roleId = self._getRoleIdByName(role)
userId = self.userAuth.getUserIdByName(userName)
cu.execute("""INSERT INTO UserGroupMembers (userGroupId, userId)
VALUES (?, ?)""", roleId, userId)
if commit:
self.db.commit()
def deleteRole(self, role, commit = True):
self.deleteRoleById(self._getRoleIdByName(role), commit)
def deleteRoleById(self, roleId, commit = True):
cu = self.db.cursor()
cu.execute("DELETE FROM EntitlementAccessMap WHERE userGroupId=?",
roleId)
cu.execute("DELETE FROM Permissions WHERE userGroupId=?", roleId)
cu.execute("DELETE FROM UserGroupMembers WHERE userGroupId=?", roleId)
cu.execute("DELETE FROM UserGroupInstancesCache WHERE userGroupId = ?",
roleId)
cu.execute("DELETE FROM UserGroupTroves WHERE userGroupId = ?", roleId)
cu.execute("DELETE FROM LatestCache WHERE userGroupId = ?", roleId)
#Note, there could be a user left behind with no associated group
#if the group being deleted was created with a user. This user is not
#deleted because it is possible for this user to be a member of
#another group.
cu.execute("DELETE FROM UserGroups WHERE userGroupId=?", roleId)
if commit:
self.db.commit()
def getItemList(self):
cu = self.db.cursor()
cu.execute("SELECT item FROM Items")
return [ x[0] for x in cu ]
def getLabelList(self):
cu = self.db.cursor()
cu.execute("SELECT label FROM Labels")
return [ x[0] for x in cu ]
def __checkEntitlementOwner(self, cu, roleIds, entClass):
"""
Raises an error or returns the group Id.
"""
if not roleIds:
raise errors.InsufficientPermission
# verify that the user has permission to change this entitlement
# group
cu.execute("""
SELECT entGroupId FROM EntitlementGroups
JOIN EntitlementOwners USING (entGroupId)
WHERE
ownerGroupId IN (%s)
AND
entGroup = ?
""" % ",".join(str(x) for x in roleIds), entClass)
entClassIdList = [ x[0] for x in cu ]
if entClassIdList:
assert(max(entClassIdList) == min(entClassIdList))
return entClassIdList[0]
# admins can do everything
cu.execute("select userGroupId from UserGroups "
"where userGroupId in (%s) "
"and admin = 1" % ",".join([str(x) for x in roleIds]))
if not len(cu.fetchall()):
raise errors.InsufficientPermission
cu.execute("SELECT entGroupId FROM EntitlementGroups WHERE "
"entGroup = ?", entClass)
entClassIds = [ x[0] for x in cu ]
if len(entClassIds) == 1:
entClassId = entClassIds[0]
else:
assert(not entClassIds)
entClassId = -1
return entClassId
def deleteEntitlementClass(self, authToken, entClass):
cu = self.db.cursor()
if not self.authCheck(authToken, admin = True):
raise errors.InsufficientPermission
cu.execute("SELECT entGroupId FROM entitlementGroups "
"WHERE entGroup = ?", entClass)
ret = cu.fetchall()
# XXX: should we raise an error here or just go about it silently?
if not len(ret):
raise errors.UnknownEntitlementClass
entClassId = ret[0][0]
cu.execute("DELETE FROM EntitlementAccessMap WHERE entGroupId=?",
entClassId)
cu.execute("DELETE FROM Entitlements WHERE entGroupId=?",
entClassId)
cu.execute("DELETE FROM EntitlementOwners WHERE entGroupId=?",
entClassId)
cu.execute("DELETE FROM EntitlementGroups WHERE entGroupId=?",
entClassId)
self.db.commit()
def addEntitlementKey(self, authToken, entClass, entKey):
cu = self.db.cursor()
# validate the password
roleIds = self.getAuthRoles(cu, authToken)
self.log(2, "entClass=%s entKey=%s" % (entClass, entKey))
if len(entKey) > MAX_ENTITLEMENT_LENGTH:
raise errors.InvalidEntitlement
entClassId = self.__checkEntitlementOwner(cu, roleIds, entClass)
if entClassId == -1:
raise errors.UnknownEntitlementClass
# check for duplicates
cu.execute("SELECT * FROM Entitlements WHERE entGroupId = ? AND entitlement = ?",
(entClassId, entKey))
if len(cu.fetchall()):
raise errors.EntitlementKeyAlreadyExists
cu.execute("INSERT INTO Entitlements (entGroupId, entitlement) VALUES (?, ?)",
(entClassId, entKey))
self.db.commit()
def deleteEntitlementKey(self, authToken, entClass, entKey):
cu = self.db.cursor()
# validate the password
roleIds = self.getAuthRoles(cu, authToken)
self.log(2, "entClass=%s entKey=%s" % (entClass, entKey))
if len(entKey) > MAX_ENTITLEMENT_LENGTH:
raise errors.InvalidEntitlement
entClassId = self.__checkEntitlementOwner(cu, roleIds, entClass)
# if the entitlement doesn't exist, return an error
cu.execute("SELECT * FROM Entitlements WHERE entGroupId = ? AND entitlement = ?",
(entClassId, entKey))
if not len(cu.fetchall()):
raise errors.InvalidEntitlement
cu.execute("DELETE FROM Entitlements WHERE entGroupId=? AND "
"entitlement=?", (entClassId, entKey))
self.db.commit()
def addEntitlementClass(self, authToken, entClass, role):
"""
Adds a new entitlement class to the server, and populates it with
an initial role
"""
cu = self.db.cursor()
if not self.authCheck(authToken, admin = True):
raise errors.InsufficientPermission
self.log(2, "entClass=%s role=%s" % (entClass, role))
# check for duplicate
cu.execute("SELECT entGroupId FROM EntitlementGroups WHERE entGroup = ?",
entClass)
if len(cu.fetchall()):
raise errors.EntitlementClassAlreadyExists
roleId = self._getRoleIdByName(role)
cu.execute("INSERT INTO EntitlementGroups (entGroup) "
"VALUES (?)", entClass)
entClassId = cu.lastrowid
cu.execute("INSERT INTO EntitlementAccessMap (entGroupId, userGroupId) "
"VALUES (?, ?)", entClassId, roleId)
self.db.commit()
def getEntitlementClassOwner(self, authToken, entClass):
"""
Returns the role which owns the entitlement class
"""
if not self.authCheck(authToken, admin = True):
raise errors.InsufficientPermission
cu = self.db.cursor()
cu.execute("""
SELECT userGroup FROM EntitlementGroups
JOIN EntitlementOwners USING (entGroupId)
JOIN UserGroups ON UserGroups.userGroupId = EntitlementOwners.ownerGroupId
WHERE entGroup = ?""", entClass)
ret = cu.fetchall()
if len(ret):
return ret[0][0]
return None
def _getIds(self, cu, entClass, role):
cu.execute("SELECT entGroupId FROM entitlementGroups "
"WHERE entGroup = ?", entClass)
ent = cu.fetchall()
if not len(ent):
raise errors.UnknownEntitlementClass
cu.execute("SELECT userGroupId FROM userGroups "
"WHERE userGroup = ?", role)
user = cu.fetchall()
if not len(user):
raise errors.RoleNotFound
return ent[0][0], user[0][0]
def addEntitlementClassOwner(self, authToken, role, entClass):
"""
Gives the role management permission for the entitlement class.
"""
if not self.authCheck(authToken, admin = True):
raise errors.InsufficientPermission
self.log(2, "role=%s entClass=%s" % (role, entClass))
cu = self.db.cursor()
entClassId, roleId = self._getIds(cu, entClass, role)
cu.execute("INSERT INTO EntitlementOwners (entGroupId, ownerGroupId) "
"VALUES (?, ?)",
(entClassId, roleId))
self.db.commit()
def deleteEntitlementClassOwner(self, authToken, role, entClass):
if not self.authCheck(authToken, admin = True):
raise errors.InsufficientPermission
self.log(2, "role=%s entClass=%s" % (role, entClass))
cu = self.db.cursor()
entClassId, roleId = self._getIds(cu, entClass, role)
cu.execute("DELETE FROM EntitlementOwners WHERE "
"entGroupId=? AND ownerGroupId=?",
entClassId, roleId)
self.db.commit()
def iterEntitlementKeys(self, authToken, entClass):
# validate the password
cu = self.db.cursor()
roleIds = self.getAuthRoles(cu, authToken)
entClassId = self.__checkEntitlementOwner(cu, roleIds, entClass)
cu.execute("SELECT entitlement FROM Entitlements WHERE "
"entGroupId = ?", entClassId)
return [ cu.frombinary(x[0]) for x in cu ]
def listEntitlementClasses(self, authToken):
cu = self.db.cursor()
if self.authCheck(authToken, admin = True):
# admins can see everything
cu.execute("SELECT entGroup FROM EntitlementGroups")
else:
roleIds = self.getAuthRoles(cu, authToken)
if not roleIds:
return []
cu.execute("""SELECT entGroup FROM EntitlementOwners
JOIN EntitlementGroups USING (entGroupId)
WHERE ownerGroupId IN (%s)""" %
",".join([ "%d" % x for x in roleIds ]))
return [ x[0] for x in cu ]
def getEntitlementClassesRoles(self, authToken, classList):
if not self.authCheck(authToken, admin = True):
raise errors.InsufficientPermission
cu = self.db.cursor()
placeholders = ','.join(['?' for x in classList])
names = classList
cu.execute("""SELECT entGroup, userGroup FROM EntitlementGroups
LEFT OUTER JOIN EntitlementAccessMap USING (entGroupId)
LEFT OUTER JOIN UserGroups USING (userGroupId)
WHERE entGroup IN (%s)"""
% (placeholders,), names)
d = {}
for entClass, role in cu:
l = d.setdefault(entClass, [])
if role is not None:
l.append(role)
if len(d) != len(classList):
raise errors.RoleNotFound
return d
def setEntitlementClassesRoles(self, authToken, classInfo):
"""
@param classInfo: Dictionary indexed by entitlement class, each
entry being a list of exactly the roles that entitlement group
should have access to.
@type classInfo: dict
"""
if not self.authCheck(authToken, admin = True):
raise errors.InsufficientPermission
cu = self.db.cursor()
# Get entitlement group ids
placeholders = ','.join(['?' for x in classInfo])
names = classInfo.keys()
cu.execute("""SELECT entGroup, entGroupId FROM EntitlementGroups
WHERE entGroup IN (%s)""" %
(placeholders,), names)
entClassMap = dict(cu)
if len(entClassMap) != len(classInfo):
raise errors.RoleNotFound
# Get user group ids
rolesNeeded = list(set(itertools.chain(*classInfo.itervalues())))
if rolesNeeded:
placeholders = ','.join(['?' for x in rolesNeeded])
cu.execute("""SELECT userGroup, userGroupId FROM UserGroups
WHERE userGroup IN (%s)""" %
(placeholders,), rolesNeeded)
roleMap = dict(cu)
else:
roleMap = {}
if len(roleMap) != len(rolesNeeded):
raise errors.RoleNotFound
# Clear any existing entries for the specified entitlement classes
entClassIds = ','.join(['%d' % x for x in entClassMap.itervalues()])
cu.execute("""DELETE FROM EntitlementAccessMap
WHERE entGroupId IN (%s)""" %
(entClassIds,))
# Add new entries.
for entClass, roles in classInfo.iteritems():
for role in roles:
cu.execute("""INSERT INTO EntitlementAccessMap
(entGroupId, userGroupId) VALUES (?, ?)""",
entClassMap[entClass], roleMap[role])
self.db.commit()
class PasswordCheckParser(dict):
def StartElementHandler(self, name, attrs):
if name not in [ 'auth' ]:
raise SyntaxError
val = attrs.get('valid', None)
self.valid = (val == '1' or str(val).lower() == 'true')
def EndElementHandler(self, name):
pass
def CharacterDataHandler(self, data):
if data.strip():
self.valid = False
def parse(self, s):
return self.p.Parse(s)
def validPassword(self):
return self.valid
def __init__(self):
self.p = xml.parsers.expat.ParserCreate()
self.p.StartElementHandler = self.StartElementHandler
self.p.EndElementHandler = self.EndElementHandler
self.p.CharacterDataHandler = self.CharacterDataHandler
self.valid = False
dict.__init__(self)
class ValidPasswordTokenType(object):
"""
Type of L{ValidPasswordToken}, a token used in lieu of a password in
authToken to represent a user that has been authorized by other
means (e.g. a one-time token).
For example, a script that needs to perform some operation from a
particular user's viewpoint, but has direct access to the database
via a shim client, may use L{ValidPasswordToken} instead of a
password in authToken to bypass password checks while still adhering
to the user's own capabilities and limitations.
This type should be instantiated exactly once (as
L{ValidPasswordToken}).
"""
__slots__ = ()
def __str__(self):
return '<Valid Password>'
def __repr__(self):
return 'ValidPasswordToken'
ValidPasswordToken = ValidPasswordTokenType()
class ValidUser(object):
"""
Object used in lieu of a username in authToken to represent an imaginary
user with a given set of roles.
For example, a script that needs to perform a repository operation with a
particular set of permissions, but has direct access to the database via
a shim client, may use an instance of L{ValidUser} instead of a username
in authToken to bypass username and password checks while still adhering
to the limitations of the specified set of roles.
The set of roles is given as a list containing role names, or integer
roleIds. Mixing of names and IDs is allowed. Additionally, a role of '*'
will entitle the user to all roles in the repository; if no arguments are
given this is the default.
"""
__slots__ = ('roles', 'username')
def __init__(self, *roles, **kwargs):
if not roles:
roles = ['*']
if isinstance(roles[0], (list, tuple)):
roles = roles[0]
self.roles = frozenset(roles)
self.username = kwargs.pop('username', None)
if kwargs:
raise TypeError("Unexpected keyword argument %s" %
(kwargs.popitem()[0]))
def __str__(self):
if self.username:
user_fmt = '%r ' % (self.username,)
else:
user_fmt = ''
if '*' in self.roles:
return '<User %swith all roles>' % (user_fmt,)
else:
return '<User %swith roles %s>' % (user_fmt,
', '.join(unicode(x) for x in self.roles))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, sorted(self.roles))
def __reduce__(self):
# Be pickleable, but don't actually pickle the object as it could
# then cross a RPC boundary and become a security vulnerability. Plus,
# it would confuse logcat.
if self.username:
return str, (str(self.username),)
else:
return str, (str(self),)
|
fedora-conary/conary
|
conary/repository/netrepos/netauth.py
|
Python
|
apache-2.0
| 50,922 | 0.003613 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Image utilities
Created by: Rui Carmo
License: MIT (see LICENSE for details)
"""
from operator import itemgetter
def linear_partition(seq, k):
if k <= 0:
return []
n = len(seq) - 1
if k > n:
return map(lambda x: [x], seq)
table, solution = linear_partition_table(seq, k)
k, ans = k-2, []
while k >= 0:
ans = [[seq[i] for i in xrange(solution[n-1][k]+1, n+1)]] + ans
n, k = solution[n-1][k], k-1
return [[seq[i] for i in xrange(0, n+1)]] + ans
def linear_partition_table(seq, k):
n = len(seq)
table = [[0] * k for x in xrange(n)]
solution = [[0] * (k-1) for x in xrange(n-1)]
for i in xrange(n):
table[i][0] = seq[i] + (table[i-1][0] if i else 0)
for j in xrange(k):
table[0][j] = seq[0]
for i in xrange(1, n):
for j in xrange(1, k):
table[i][j], solution[i-1][j-1] = min(
((max(table[x][j-1], table[i][0]-table[x][0]), x) for x in xrange(i)),
key=itemgetter(0))
return (table, solution)
def get_info(data):
"""Parses a small buffer and attempts to return basic image metadata"""
data = str(data)
size = len(data)
height = -1
width = -1
content_type = ''
# handle GIFs
if (size >= 10) and data[:6] in ('GIF87a', 'GIF89a'):
# Check to see if content_type is correct
content_type = 'image/gif'
w, h = struct.unpack("<HH", data[6:10])
width = int(w)
height = int(h)
# See PNG 2. Edition spec (http://www.w3.org/TR/PNG/)
# Bytes 0-7 are below, 4-byte chunk length, then 'IHDR'
# and finally the 4-byte width, height
elif ((size >= 24) and data.startswith('\211PNG\r\n\032\n')
and (data[12:16] == 'IHDR')):
content_type = 'image/png'
w, h = struct.unpack(">LL", data[16:24])
width = int(w)
height = int(h)
# Maybe this is for an older PNG version.
elif (size >= 16) and data.startswith('\211PNG\r\n\032\n'):
# Check to see if we have the right content type
content_type = 'image/png'
w, h = struct.unpack(">LL", data[8:16])
width = int(w)
height = int(h)
# Check for a JPEG
elif (size >= 4):
jpeg = StringIO.StringIO(data)
b = jpeg.read(4)
if b.startswith('\xff\xd8\xff\xe0'):
content_type = 'image/jpeg'
bs = jpeg.tell()
b = jpeg.read(2)
bl = (ord(b[0]) << 8) + ord(b[1])
b = jpeg.read(4)
if b.startswith("JFIF"):
bs += bl
while(bs < len(data)):
jpeg.seek(bs)
b = jpeg.read(4)
bl = (ord(b[2]) << 8) + ord(b[3])
if bl >= 7 and b[0] == '\xff' and b[1] == '\xc0':
jpeg.read(1)
b = jpeg.read(4)
height = (ord(b[0]) << 8) + ord(b[1])
width = (ord(b[2]) << 8) + ord(b[3])
break
bs = bs + bl + 2
return width, height, content_type
|
Velmont/digital-signage-server
|
lib/utils/imagekit.py
|
Python
|
mit
| 4,036 | 0.006442 |
#!/usr/bin/env python3
import os, sys
import time
from ..utils import (std, strip_site, MGet, urlparse, HTMLParser)
class FileDownloader(MGet):
def __init__(self, info = {}):
self.last_len = 0
self.alt_prog = 0.0
def getLocalFilesize(self, filename):
tmp_name = self.temp_name(filename)
if os.path.exists(filename): return os.path.getsize(os.path.join('.', filename))
elif os.path.exists(tmp_name): return os.path.getsize(os.path.join('.', tmp_name))
else: return None
def flush_bar (self, result = []):
line = "".join(["%s" % x for x in result])
if self.info.get('newline'): sys.stdout.write('\n')
else: sys.stdout.write('\r')
if self.last_len: sys.stdout.write('\b' * self.last_len)
sys.stdout.write("\r")
sys.stdout.write(line)
sys.stdout.flush()
self.last_len = len(line)
def _progress_bar(self, s_dif = None, progress = None, bytes = None, dif = None, width = 46):
width = self.get_term_width() - width
data_len = (self.cursize - self.resume_len)
quit_size = (self.quit_size / 100.0)
if progress > quit_size: quit_size = progress
prog = int(progress * width)
prog_bal = width - int(progress * width)
if self.quit_size != 100.0:
expect = int(quit_size * width)
ex_bal = int((width - expect) - 2)
ex_prog_bal = int(expect - int(progress * width))
progress_bar = "["+"="*(prog)+">"+" "*(ex_prog_bal)+"]["+" "*(ex_bal)+"] "
else:
progress_bar = "["+"="*(prog)+">"+" "*(prog_bal)+"] "
_res = ["%-6s" % ("{0:.1f}%".format(float(progress) * 100.0)), progress_bar,
"%-12s " % ("{:02,}".format(self.cursize)),
"%9s " % (self.calc_speed(dif,bytes).decode()),
"eta "+ self.calc_eta(s_dif, bytes, data_len, self.remaining).decode()]
self.flush_bar (_res)
def progress_bar_2(self, s_dif = None, progress = None, bytes = None, dif = None, width = 48):
width = self.get_term_width() - width
prog = int(self.alt_prog * width)
prog_bal = width - int(self.alt_prog * width)
progress_bar = "[" + " " * (prog) + "<=>" + " " * (prog_bal) + "] "
_res = [ "(^_^) " if int(self.alt_prog * 10) in list(range(0, 10, 4)) else "(0_0) ",
progress_bar, "%-12s " % ("{:02,}".format(self.cursize)),
"%9s%12s" % (self.calc_speed(dif,bytes).decode()," ")]
self.flush_bar (_res)
if self.alt_prog < 0.1: self.reach_end = False
if self.alt_prog == 1.0: self.reach_end = True
if self.alt_prog < 1.0 and not self.reach_end: self.alt_prog += 0.1
else: self.alt_prog -= 0.1
def _progress(self): return self.get_progress(self.cursize, self.filesize)
def temp_name (self, filename):
if self.info.get('nopart', False) or\
(os.path.exists(filename) and not os.path.isfile(filename)):
return filename
return filename + ".part"
def undo_temp_name (self, filename):
if filename.endswith (".part"): return filename[:-len(".part")]
return filename
def try_rename (self, old_filename, new_filename):
try:
if old_filename == new_filename: return
os.rename (old_filename, new_filename)
except (IOError, OSError) as err:
common.report ('Unable to rename file: %s' % str(err))
class MyHTMLParser(HTMLParser):
def __init__(self, html, tag = {}, hostname = None):
HTMLParser.__init__(self)
self.data = {}
self.start_tag = tag
self.hostname = hostname
self.html = html
def load(self):
self.feed(self.html)
self.close()
def handle_starttag(self, tag, attrs):
if tag not in self.start_tag: return
for name, value in attrs:
if name in self.name or value in self.value:
hostname, site = strip_site(value)
if hostname in std.site_list:
self.data[self.hostname] = value
def get_result(self, tag, name=None, value=None):
self.start_tag = tag
self.name = name or ''
self.value = value or ''
self.load()
if self.hostname in self.data:
return self.data[self.hostname]
else: return
|
FrogLogics/mget
|
build/lib.linux-x86_64-3.5/Mget/downloader/common.py
|
Python
|
gpl-2.0
| 3,835 | 0.041982 |
# [\0] #
# #
# This code is confidential and proprietary, All rights reserved. #
# #
# Tamar Labs 2015. #
# #
# @author: Adam Lev-Libfeld (adam@tamarlabs.com) #
# #
from __future__ import absolute_import, print_function, unicode_literals
from kombu import simple, Connection
from streamparse.bolt import Bolt
#import common.logger
from external.actions import ActionDB
class ActionBolt(Bolt):
def initialize(self, storm_conf, context):
actionDB = ActionDB()
def process(self, tup):
try:
action_num = tup.values[0]
if action_num < len(actionDB.actions):
data = actionDB.actions[action_num](tup.values[1])
self.emit([action_num+1 , data], stream = "next_action")
else:
self.emit([data], stream = "output_to_queue")
except:
import sys, traceback
msg = "Unexpected ActionBolt (action: %d) error:%s" % (action_num, "\n".join(traceback.format_exception(*sys.exc_info())))
#self.logger.error(msg)
|
daTokenizer/quickstorm
|
storm/virtualenvs/_resources/resources/bolts/action_bolt.py
|
Python
|
apache-2.0
| 1,292 | 0.023994 |
"""Constants for the pi_hole intergration."""
from datetime import timedelta
DOMAIN = "pi_hole"
CONF_LOCATION = "location"
CONF_SLUG = "slug"
DEFAULT_LOCATION = "admin"
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "Pi-Hole"
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
SERVICE_DISABLE = "disable"
SERVICE_DISABLE_ATTR_DURATION = "duration"
SERVICE_DISABLE_ATTR_NAME = "name"
SERVICE_ENABLE = "enable"
SERVICE_ENABLE_ATTR_NAME = SERVICE_DISABLE_ATTR_NAME
ATTR_BLOCKED_DOMAINS = "domains_blocked"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
SENSOR_DICT = {
"ads_blocked_today": ["Ads Blocked Today", "ads", "mdi:close-octagon-outline"],
"ads_percentage_today": [
"Ads Percentage Blocked Today",
"%",
"mdi:close-octagon-outline",
],
"clients_ever_seen": ["Seen Clients", "clients", "mdi:account-outline"],
"dns_queries_today": [
"DNS Queries Today",
"queries",
"mdi:comment-question-outline",
],
"domains_being_blocked": ["Domains Blocked", "domains", "mdi:block-helper"],
"queries_cached": ["DNS Queries Cached", "queries", "mdi:comment-question-outline"],
"queries_forwarded": [
"DNS Queries Forwarded",
"queries",
"mdi:comment-question-outline",
],
"unique_clients": ["DNS Unique Clients", "clients", "mdi:account-outline"],
"unique_domains": ["DNS Unique Domains", "domains", "mdi:domain"],
}
SENSOR_LIST = list(SENSOR_DICT)
|
leppa/home-assistant
|
homeassistant/components/pi_hole/const.py
|
Python
|
apache-2.0
| 1,452 | 0.002066 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters
=======
Filter bank construction
------------------------
.. autosummary::
:toctree: generated/
mel
chroma
constant_q
semitone_filterbank
Window functions
----------------
.. autosummary::
:toctree: generated/
window_bandwidth
get_window
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
constant_q_lengths
cq_to_chroma
mr_frequencies
window_sumsquare
diagonal_filter
"""
import warnings
import numpy as np
import scipy
import scipy.signal
import scipy.ndimage
from numba import jit
from ._cache import cache
from . import util
from .util.exceptions import ParameterError
from .core.convert import note_to_hz, hz_to_midi, midi_to_hz, hz_to_octs
from .core.convert import fft_frequencies, mel_frequencies
__all__ = [
"mel",
"chroma",
"constant_q",
"constant_q_lengths",
"cq_to_chroma",
"window_bandwidth",
"get_window",
"mr_frequencies",
"semitone_filterbank",
"window_sumsquare",
"diagonal_filter",
]
# Dictionary of window function bandwidths
WINDOW_BANDWIDTHS = {
"bart": 1.3334961334912805,
"barthann": 1.4560255965133932,
"bartlett": 1.3334961334912805,
"bkh": 2.0045975283585014,
"black": 1.7269681554262326,
"blackharr": 2.0045975283585014,
"blackman": 1.7269681554262326,
"blackmanharris": 2.0045975283585014,
"blk": 1.7269681554262326,
"bman": 1.7859588613860062,
"bmn": 1.7859588613860062,
"bohman": 1.7859588613860062,
"box": 1.0,
"boxcar": 1.0,
"brt": 1.3334961334912805,
"brthan": 1.4560255965133932,
"bth": 1.4560255965133932,
"cosine": 1.2337005350199792,
"flat": 2.7762255046484143,
"flattop": 2.7762255046484143,
"flt": 2.7762255046484143,
"halfcosine": 1.2337005350199792,
"ham": 1.3629455320350348,
"hamm": 1.3629455320350348,
"hamming": 1.3629455320350348,
"han": 1.50018310546875,
"hann": 1.50018310546875,
"hanning": 1.50018310546875,
"nut": 1.9763500280946082,
"nutl": 1.9763500280946082,
"nuttall": 1.9763500280946082,
"ones": 1.0,
"par": 1.9174603174603191,
"parz": 1.9174603174603191,
"parzen": 1.9174603174603191,
"rect": 1.0,
"rectangular": 1.0,
"tri": 1.3331706523555851,
"triang": 1.3331706523555851,
"triangle": 1.3331706523555851,
}
@cache(level=10)
def mel(
sr,
n_fft,
n_mels=128,
fmin=0.0,
fmax=None,
htk=False,
norm="slaney",
dtype=np.float32,
):
"""Create a Mel filter-bank.
This produces a linear transformation matrix to project
FFT bins onto Mel-frequency bins.
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use ``fmax = sr / 2.0``
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 'slaney', or number} [scalar]
If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization).
If numeric, use `librosa.util.normalize` to normalize each filter by to unit l_p norm.
See `librosa.util.normalize` for a full description of supported norm values
(including `+-np.inf`).
Otherwise, leave all the triangles aiming for a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
See also
--------
librosa.util.normalize
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> img = librosa.display.specshow(melfb, x_axis='linear', ax=ax)
>>> ax.set(ylabel='Mel filter', title='Mel filter bank')
>>> fig.colorbar(img, ax=ax)
"""
if fmax is None:
fmax = float(sr) / 2
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i + 2] / fdiff[i + 1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
if norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
else:
weights = util.normalize(weights, norm=norm, axis=-1)
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn(
"Empty filters detected in mel frequency basis. "
"Some channels will produce empty responses. "
"Try increasing your sampling rate (and fmax) or "
"reducing n_mels."
)
return weights
@cache(level=10)
def chroma(
sr,
n_fft,
n_chroma=12,
tuning=0.0,
ctroct=5.0,
octwidth=2,
norm=2,
base_c=True,
dtype=np.float32,
):
"""Create a chroma filter bank.
This creates a linear transformation matrix to project
FFT bins onto chroma bins (i.e. pitch classes).
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
tuning : float
Tuning deviation from A440 in fractions of a chroma bin.
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
``ctroct`` and ``octwidth`` specify a dominance window:
a Gaussian weighting centered on ``ctroct`` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of ``octwidth``.
Set ``octwidth`` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
librosa.util.normalize
librosa.feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> img = librosa.display.specshow(chromafb, x_axis='linear', ax=ax)
>>> ax.set(ylabel='Chroma filter', title='Chroma filter bank')
>>> fig.colorbar(img, ax=ax)
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(
frequencies, tuning=tuning, bins_per_octave=n_chroma
)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1], 1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype="d")).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10 * n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2 * D / np.tile(binwidthbins, (n_chroma, 1))) ** 2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins / n_chroma - ctroct) / octwidth) ** 2)),
(n_chroma, 1),
)
if base_c:
wts = np.roll(wts, -3 * (n_chroma // 12), axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, : int(1 + n_fft / 2)], dtype=dtype)
def __float_window(window_spec):
"""Decorator function for windows with fractional input.
This function guarantees that for fractional ``x``, the following hold:
1. ``__float_window(window_function)(x)`` has length ``np.ceil(x)``
2. all values from ``np.floor(x)`` are set to 0.
For integer-valued ``x``, there should be no change in behavior.
"""
def _wrap(n, *args, **kwargs):
"""The wrapped window"""
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = get_window(window_spec, n_min)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))], mode="constant")
window[n_min:] = 0.0
return window
return _wrap
@cache(level=10)
def constant_q(
sr,
fmin=None,
n_bins=84,
bins_per_octave=12,
window="hann",
filter_scale=1,
pad_fft=True,
norm=1,
dtype=np.complex64,
gamma=0,
**kwargs,
):
r"""Construct a constant-Q basis.
This function constructs a filter bank similar to Morlet wavelets,
where complex exponentials are windowed to different lengths
such that the number of cycles remains fixed for all frequencies.
By default, a Hann window (rather than the Gaussian window of Morlet wavelets)
is used, but this can be controlled by the ``window`` parameter.
Frequencies are spaced geometrically, increasing by a factor of
``(2**(1./bins_per_octave))`` at each successive band.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
window : string, tuple, number, or function
Windowing function to apply to filters.
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the ``mode=`` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
gamma : number >= 0
Bandwidth offset for variable-Q transforms.
``gamma=0`` produces a constant-Q filterbank.
dtype : np.dtype
The data type of the output basis.
By default, uses 64-bit (single precision) complex floating point.
kwargs : additional keyword arguments
Arguments to `np.pad()` when ``pad==True``.
Returns
-------
filters : np.ndarray, ``len(filters) == n_bins``
``filters[i]`` is ``i``\ th time-domain CQT basis filter
lengths : np.ndarray, ``len(lengths) == n_bins``
The (fractional) length of each filter
Notes
-----
This function caches at level 10.
See Also
--------
constant_q_lengths
librosa.cqt
librosa.vqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> fig, ax = plt.subplots(nrows=2, figsize=(10, 6))
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... ax[0].plot(i + f_scale.real)
... ax[0].plot(i + f_scale.imag, linestyle=':')
>>> ax[0].set(yticks=np.arange(len(notes[:12])), yticklabels=notes[:12],
... ylabel='CQ filters',
... title='CQ filters (one octave, time domain)',
... xlabel='Time (samples at 22050 Hz)')
>>> ax[0].legend(['Real', 'Imaginary'])
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear', y_axis='cqt_note', ax=ax[1])
>>> ax[1].set(ylabel='CQ filters', title='CQ filter magnitudes (frequency domain)')
"""
if fmin is None:
fmin = note_to_hz("C1")
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(
sr,
fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
window=window,
filter_scale=filter_scale,
gamma=gamma,
)
freqs = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(
np.arange(-ilen // 2, ilen // 2, dtype=float) * 1j * 2 * np.pi * freq / sr
)
# Apply the windowing function
sig = sig * __float_window(window)(len(sig))
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0 ** (np.ceil(np.log2(max_len))))
else:
max_len = int(np.ceil(max_len))
filters = np.asarray(
[util.pad_center(filt, max_len, **kwargs) for filt in filters], dtype=dtype
)
return filters, np.asarray(lengths)
@cache(level=10)
def constant_q_lengths(
sr, fmin, n_bins=84, bins_per_octave=12, window="hann", filter_scale=1, gamma=0
):
r"""Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
Notes
-----
This function caches at level 10.
See Also
--------
constant_q
librosa.cqt
"""
if fmin <= 0:
raise ParameterError("fmin must be positive")
if bins_per_octave <= 0:
raise ParameterError("bins_per_octave must be positive")
if filter_scale <= 0:
raise ParameterError("filter_scale must be positive")
if n_bins <= 0 or not isinstance(n_bins, (int, np.integer)):
raise ParameterError("n_bins must be a positive integer")
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
alpha = 2.0 ** (1.0 / bins_per_octave) - 1.0
Q = float(filter_scale) / alpha
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if freq[-1] * (1 + 0.5 * window_bandwidth(window) / Q) > sr / 2.0:
raise ParameterError("Filter pass-band lies beyond Nyquist")
# Convert frequencies to filter lengths
lengths = Q * sr / (freq + gamma / alpha)
return lengths
@cache(level=10)
def cq_to_chroma(
n_input,
bins_per_octave=12,
n_chroma=12,
fmin=None,
window=None,
base_c=True,
dtype=np.float32,
):
"""Construct a linear transformation matrix to map Constant-Q bins
onto chroma bins (i.e., pitch classes).
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with ``window``.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: ``Chroma = np.dot(cq_to_chroma, CQT)``
Raises
------
ParameterError
If ``n_input`` is not an integer multiple of ``n_chroma``
Notes
-----
This function caches at level 10.
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> CQT = np.abs(librosa.cqt(y, sr=sr))
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=3, sharex=True)
>>> imgcq = librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... y_axis='cqt_note', x_axis='time',
... ax=ax[0])
>>> ax[0].set(title='CQT Power')
>>> ax[0].label_outer()
>>> librosa.display.specshow(chromagram, y_axis='chroma', x_axis='time',
... ax=ax[1])
>>> ax[1].set(title='Chroma (wrapped CQT)')
>>> ax[1].label_outer()
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> imgchroma = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time', ax=ax[2])
>>> ax[2].set(title='librosa.feature.chroma_stft')
"""
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz("C1")
if np.mod(n_merge, 1) != 0:
raise ParameterError(
"Incompatible CQ merge: "
"input bins must be an "
"integer multiple of output bins."
)
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, -int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.0)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(dtype)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch, np.atleast_2d(window), mode="same")
return cq_to_ch
@cache(level=10)
def window_bandwidth(window, n=1000):
"""Get the equivalent noise bandwidth of a window function.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
n : int > 0
The number of coefficients to use in estimating the
window bandwidth
Returns
-------
bandwidth : float
The equivalent noise bandwidth (in FFT bins) of the
given window function
Notes
-----
This function caches at level 10.
See Also
--------
get_window
"""
if hasattr(window, "__name__"):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
win = get_window(window, n)
WINDOW_BANDWIDTHS[key] = n * np.sum(win ** 2) / np.sum(np.abs(win)) ** 2
return WINDOW_BANDWIDTHS[key]
@cache(level=10)
def get_window(window, Nx, fftbins=True):
"""Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
"""
if callable(window):
return window(Nx)
elif isinstance(window, (str, tuple)) or np.isscalar(window):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise ParameterError(
"Window size mismatch: " "{:d} != {:d}".format(len(window), Nx)
)
else:
raise ParameterError("Invalid window specification: {}".format(window))
@cache(level=10)
def _multirate_fb(
center_freqs=None,
sample_rates=None,
Q=25.0,
passband_ripple=1,
stopband_attenuation=50,
ftype="ellip",
flayout="sos",
):
r"""Helper function to construct a multirate filterbank.
A filter bank consists of multiple band-pass filters which divide the input signal
into subbands. In the case of a multirate filter bank, the band-pass filters
operate with resampled versions of the input signal, e.g. to keep the length
of a filter constant while shifting its center frequency.
This implementation uses `scipy.signal.iirdesign` to design the filters.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter (used for multirate filterbank).
Q : float
Q factor (influences the filter bandwith).
passband_ripple : float
The maximum loss in the passband (dB)
See `scipy.signal.iirdesign` for details.
stopband_attenuation : float
The minimum attenuation in the stopband (dB)
See `scipy.signal.iirdesign` for details.
ftype : str
The type of IIR filter to design
See `scipy.signal.iirdesign` for details.
flayout : string
Valid `output` argument for `scipy.signal.iirdesign`.
- If `ba`, returns numerators/denominators of the transfer functions,
used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, returns a series of second-order filters,
used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
- If `zpk`, returns zeros, poles, and system gains of the transfer functions.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry comprises the filter coefficients for a single filter.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter.
Notes
-----
This function caches at level 10.
See Also
--------
scipy.signal.iirdesign
Raises
------
ParameterError
If ``center_freqs`` is ``None``.
If ``sample_rates`` is ``None``.
If ``center_freqs.shape`` does not match ``sample_rates.shape``.
"""
if center_freqs is None:
raise ParameterError("center_freqs must be provided.")
if sample_rates is None:
raise ParameterError("sample_rates must be provided.")
if center_freqs.shape != sample_rates.shape:
raise ParameterError(
"Number of provided center_freqs and sample_rates must be equal."
)
nyquist = 0.5 * sample_rates
filter_bandwidths = center_freqs / float(Q)
filterbank = []
for cur_center_freq, cur_nyquist, cur_bw in zip(
center_freqs, nyquist, filter_bandwidths
):
passband_freqs = [
cur_center_freq - 0.5 * cur_bw,
cur_center_freq + 0.5 * cur_bw,
] / cur_nyquist
stopband_freqs = [
cur_center_freq - cur_bw,
cur_center_freq + cur_bw,
] / cur_nyquist
cur_filter = scipy.signal.iirdesign(
passband_freqs,
stopband_freqs,
passband_ripple,
stopband_attenuation,
analog=False,
ftype=ftype,
output=flayout,
)
filterbank.append(cur_filter)
return filterbank, sample_rates
@cache(level=10)
def mr_frequencies(tuning):
r"""Helper function for generating center frequency and sample rate pairs.
This function will return center frequency and corresponding sample rates
to obtain similar pitch filterbank settings as described in [#]_.
Instead of starting with MIDI pitch `A0`, we start with `C0`.
.. [#] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
tuning : float [scalar]
Tuning deviation from A440, measure as a fraction of the equally
tempered semitone (1/12 of an octave).
Returns
-------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter, used for multirate filterbank.
Notes
-----
This function caches at level 10.
See Also
--------
librosa.filters.semitone_filterbank
"""
center_freqs = midi_to_hz(np.arange(24 + tuning, 109 + tuning))
sample_rates = np.asarray(
len(np.arange(0, 36)) * [882, ]
+ len(np.arange(36, 70)) * [4410, ]
+ len(np.arange(70, 85)) * [22050, ]
)
return center_freqs, sample_rates
def semitone_filterbank(
center_freqs=None, tuning=0.0, sample_rates=None, flayout="ba", **kwargs
):
r"""Construct a multi-rate bank of infinite-impulse response (IIR)
band-pass filters at user-defined center frequencies and sample rates.
By default, these center frequencies are set equal to the 88 fundamental
frequencies of the grand piano keyboard, according to a pitch tuning standard
of A440, that is, note A above middle C set to 440 Hz. The center frequencies
are tuned to the twelve-tone equal temperament, which means that they grow
exponentially at a rate of 2**(1/12), that is, twelve notes per octave.
The A440 tuning can be changed by the user while keeping twelve-tone equal
temperament. While A440 is currently the international standard in the music
industry (ISO 16), some orchestras tune to A441-A445, whereas baroque musicians
tune to A415.
See [#]_ for details.
.. [#] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
tuning : float [scalar]
Tuning deviation from A440 as a fraction of a semitone (1/12 of an octave
in equal temperament).
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rates of each filter in the multirate filterbank.
flayout : string
- If `ba`, the standard difference equation is used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, a series of second-order filters is used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
kwargs : additional keyword arguments
Additional arguments to the private function `_multirate_fb()`.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry contains the filter coefficients for a single filter.
fb_sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter.
See Also
--------
librosa.cqt
librosa.iirt
librosa.filters.mr_frequencies
scipy.signal.iirdesign
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import scipy.signal
>>> semitone_filterbank, sample_rates = librosa.filters.semitone_filterbank()
>>> fig, ax = plt.subplots()
>>> for cur_sr, cur_filter in zip(sample_rates, semitone_filterbank):
... w, h = scipy.signal.freqz(cur_filter[0], cur_filter[1], worN=2000)
... ax.semilogx((cur_sr / (2 * np.pi)) * w, 20 * np.log10(abs(h)))
>>> ax.set(xlim=[20, 10e3], ylim=[-60, 3], title='Magnitude Responses of the Pitch Filterbank',
... xlabel='Log-Frequency (Hz)', ylabel='Magnitude (dB)')
"""
if (center_freqs is None) and (sample_rates is None):
center_freqs, sample_rates = mr_frequencies(tuning)
filterbank, fb_sample_rates = _multirate_fb(
center_freqs=center_freqs, sample_rates=sample_rates, flayout=flayout, **kwargs
)
return filterbank, fb_sample_rates
@jit(nopython=True, cache=True)
def __window_ss_fill(x, win_sq, n_frames, hop_length): # pragma: no cover
"""Helper function for window sum-square calculation."""
n = len(x)
n_fft = len(win_sq)
for i in range(n_frames):
sample = i * hop_length
x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
def window_sumsquare(
window,
n_frames,
hop_length=512,
win_length=None,
n_fft=2048,
dtype=np.float32,
norm=None,
):
"""Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time Fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches ``n_fft``.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=``(n_fft + hop_length * (n_frames - 1))``
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=3, sharey=True)
>>> ax[0].plot(wss_256)
>>> ax[0].set(title='hop_length=256')
>>> ax[1].plot(wss_512)
>>> ax[1].set(title='hop_length=512')
>>> ax[2].plot(wss_1024)
>>> ax[2].set(title='hop_length=1024')
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm) ** 2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x
@cache(level=10)
def diagonal_filter(window, n, slope=1.0, angle=None, zero_mean=False):
"""Build a two-dimensional diagonal filter.
This is primarily used for smoothing recurrence or self-similarity matrices.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window function to use for the filter.
See `get_window` for details.
Note that the window used here should be non-negative.
n : int > 0
the length of the filter
slope : float
The slope of the diagonal filter to produce
angle : float or None
If given, the slope parameter is ignored,
and angle directly sets the orientation of the filter (in radians).
Otherwise, angle is inferred as `arctan(slope)`.
zero_mean : bool
If True, a zero-mean filter is used.
Otherwise, a non-negative averaging filter is used.
This should be enabled if you want to enhance paths and suppress
blocks.
Returns
-------
kernel : np.ndarray, shape=[(m, m)]
The 2-dimensional filter kernel
Notes
-----
This function caches at level 10.
"""
if angle is None:
angle = np.arctan(slope)
win = np.diag(get_window(window, n, fftbins=False))
if not np.isclose(angle, np.pi / 4):
win = scipy.ndimage.rotate(
win, 45 - angle * 180 / np.pi, order=5, prefilter=False
)
np.clip(win, 0, None, out=win)
win /= win.sum()
if zero_mean:
win -= win.mean()
return win
|
bmcfee/librosa
|
librosa/filters.py
|
Python
|
isc
| 37,764 | 0.001059 |
# coding: utf-8
import numpy as np
from chainer import cuda
from builtins import range
class Experience:
def __init__(self, use_gpu=0, data_size=10**5, replay_size=32, hist_size=1, initial_exploration=10**3, dim=10240):
self.use_gpu = use_gpu
self.data_size = data_size
self.replay_size = replay_size
self.hist_size = hist_size
# self.initial_exploration = 10
self.initial_exploration = initial_exploration
self.dim = dim
self.d = [np.zeros((self.data_size, self.hist_size, self.dim), dtype=np.uint8),
np.zeros(self.data_size, dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.int8),
np.zeros((self.data_size, self.hist_size, self.dim), dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.bool)]
def stock(self, time, state, action, reward, state_dash, episode_end_flag):
data_index = time % self.data_size
if episode_end_flag is True:
self.d[0][data_index] = state
self.d[1][data_index] = action
self.d[2][data_index] = reward
else:
self.d[0][data_index] = state
self.d[1][data_index] = action
self.d[2][data_index] = reward
self.d[3][data_index] = state_dash
self.d[4][data_index] = episode_end_flag
def replay(self, time):
replay_start = False
if self.initial_exploration < time:
replay_start = True
# Pick up replay_size number of samples from the Data
if time < self.data_size: # during the first sweep of the History Data
replay_index = np.random.randint(0, time, (self.replay_size, 1))
else:
replay_index = np.random.randint(0, self.data_size, (self.replay_size, 1))
s_replay = np.ndarray(shape=(self.replay_size, self.hist_size, self.dim), dtype=np.float32)
a_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.uint8)
r_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.float32)
s_dash_replay = np.ndarray(shape=(self.replay_size, self.hist_size, self.dim), dtype=np.float32)
episode_end_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.bool)
for i in range(self.replay_size):
s_replay[i] = np.asarray(self.d[0][replay_index[i]], dtype=np.float32)
a_replay[i] = self.d[1][replay_index[i]]
r_replay[i] = self.d[2][replay_index[i]]
s_dash_replay[i] = np.array(self.d[3][replay_index[i]], dtype=np.float32)
episode_end_replay[i] = self.d[4][replay_index[i]]
if self.use_gpu >= 0:
s_replay = cuda.to_gpu(s_replay)
s_dash_replay = cuda.to_gpu(s_dash_replay)
return replay_start, s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay
else:
return replay_start, 0, 0, 0, 0, False
def end_episode(self, time, last_state, action, reward):
self.stock(time, last_state, action, reward, last_state, True)
replay_start, s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay = \
self.replay(time)
return replay_start, s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay
|
wbap/hackathon-2017-sample
|
agent/ml/experience.py
|
Python
|
apache-2.0
| 3,373 | 0.004447 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-16 21:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user', '0002_profile_validated'),
]
operations = [
migrations.CreateModel(
name='EmailValidationToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(max_length=100, unique=True)),
('expire', models.DateTimeField()),
('consumed', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
ava-project/ava-website
|
website/apps/user/migrations/0003_emailvalidationtoken.py
|
Python
|
mit
| 973 | 0.002055 |
# -*- coding: utf-8 -*-
print '''<!DOCTYPE html><html>'''
incluir(data,"head")
print '''<body>'''
incluir(data,"header")
print '''</body></html>'''
|
ZerpaTechnology/AsenZor
|
apps/votSys/user/vistas/templates/inicio.py
|
Python
|
lgpl-3.0
| 147 | 0.020408 |
# Copyright 2017 onwards LabsLand Experimentia S.L.
# This software is licensed under the GNU AGPL v3:
# GNU Affero General Public License version 3 (see the file LICENSE)
# Read in the documentation about the license
from __future__ import unicode_literals, print_function, division
from .redis_manager import RedisManager
|
weblabdeusto/weblablib
|
weblablib/backends/__init__.py
|
Python
|
agpl-3.0
| 326 | 0 |
from django.conf.urls import patterns, url
urlpatterns = patterns('cityproblems.accounts.views',
url(r'^register/$', 'register', name="accounts_register"),
url(r'^profile/edit/$', 'accounts_profile_edit', name="accounts_profile_edit"),
url(r'^profile/$', 'accounts_profile_view', name="accounts_profile_view"),
url(r'^profile/(\w+)/$', 'accounts_profile_view', name="accounts_profile_view"),
url(r'^send_email_confirm_link/$', 'accounts_send_email_confirm_link', name="accounts_send_email_confirm_link"),
url(r'^send_passwd_reset_link/$', 'accounts_send_passwd_reset_link', name="accounts_send_passwd_reset_link"),
url(r'^process_email_confirm/(\d+)/$', 'accounts_process_email_confirm', name="accounts_process_email_confirm"),
url(r'^passwd_reset/(\d+)/$', 'accounts_passwd_reset', name="accounts_passwd_reset"),
url(r'^passwd_change/$', 'accounts_passwd_change', name="accounts_passwd_change"),
)
urlpatterns += patterns('',
url(r'^logout/$',
'django.contrib.auth.views.logout',
{'next_page': '/'}, name="logout"),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'accounts_login.html'}, name="login"),
)
|
SimplyCo/cityproblems
|
cityproblems/accounts/urls.py
|
Python
|
mit
| 1,489 | 0.006716 |
from math import radians, cos, sin, asin, sqrt
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# 6367 km is the radius of the Earth
km = 6367 * c
return km
|
anduslim/codex
|
codex_project/actors/haversine.py
|
Python
|
mit
| 554 | 0.001805 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals, absolute_import
import sys
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
import pytest
from espwrap.base import batch
from espwrap.adaptors.sendgrid_v3 import SendGridMassEmail, _HTTP_EXC_MSG
from espwrap.adaptors.sendgrid_common import breakdown_recipients
from python_http_client.exceptions import BadRequestsError # this is a dependency of sendgrid-python
if sys.version_info < (3,):
range = xrange
API_KEY = 'unit_test'
def test_breakdown_recipients():
me = SendGridMassEmail(API_KEY)
# This function will split straight up duplicates
me.add_recipient(name='Test', email='test@test.com')
me.add_recipient(name='Test', email='test@test.com')
# This function will split aliases we've already seen
# the base of
me.add_recipient(name='Test', email='test+1@test.com')
broken = breakdown_recipients(me.get_recipients())
# So it should send as three separate batches
assert len(broken) == 3
def test_delimiters():
me = SendGridMassEmail(API_KEY)
start = '*|'
end = '|*'
me.set_variable_delimiters(start, end)
delimiters = me.get_variable_delimiters(True)
assert delimiters.get('start') == start
assert delimiters.get('end') == end
assert me.get_variable_delimiters() == (start, end)
def test_add_tags():
me = SendGridMassEmail(API_KEY)
with pytest.raises(Exception) as err:
me.add_tags(*[str(tag) for tag in range(11)])
assert 'Too many tags' in str(err)
me.add_tags(*[str(tag) for tag in range(9)])
with pytest.raises(Exception) as err:
me.add_tags(*['foo', 'bar'])
assert 'limit' in str(err)
me.add_tags('tenth')
def test_message_construction():
me = SendGridMassEmail(API_KEY)
template_name = 'test template'
ip_pool = 'testPool'
company_name = 'UnitTest Spam Corp the Second'
tags = ['unit', 'test', 'for', 'the', 'win']
webhook_data = {
'm_id': '56f2c1341a89ddc8c04d5407',
'env': 'local',
'p_id': '56f2c1571a89ddc8c04d540a',
}
me.set_reply_to_addr('custsupport@spam.com')
me.set_from_addr('donotreply@spam.com')
me.add_recipients([
{
'name': 'Josh',
'email': 'spam@spam.com',
'merge_vars': {
'CUSTOMER_NAME': 'Josh',
},
},
{
'name': 'Jim',
'email': 'spam2@spam.com',
'merge_vars': {
'CUSTOMER_NAME': 'Jim',
'SOMETHING_UNIQUE': 'tester',
},
},
{
'name': '姓',
'email': 'test@test.com',
'merge_vars': {
'CUSTOMER_NAME': '姓',
'SOMETHING_UNIQUE': '独特'
}
}
])
me.add_global_merge_vars(COMPANY_NAME=company_name)
me.set_variable_delimiters('*|', '|*')
me.set_ip_pool(ip_pool)
me.set_template_name(template_name)
me.enable_click_tracking()
me.enable_open_tracking()
me.set_webhook_data(webhook_data)
me.add_tags(*tags)
me.set_subject('test subject')
delims = me.get_variable_delimiters()
grouped_recipients = batch(list(me.recipients), me.partition)
for grp in grouped_recipients:
# Note: The order of recipients in this test case is reversed from what's listed above
to_send = breakdown_recipients(grp)
message = me.message_constructor(to_send)
message_dict = message.get()
print (message_dict)
assert set(message_dict['categories']) == set(tags)
assert message_dict['tracking_settings']['open_tracking']['enable'] == True
assert message_dict['tracking_settings']['click_tracking']['enable'] == True
print(message_dict['personalizations'])
assert message_dict['personalizations'][0]['to'][0]['name'] == '姓'
assert message_dict['personalizations'][0]['to'][0]['email'] == 'test@test.com'
assert message_dict['personalizations'][1]['to'][0]['name'] == 'Jim'
assert message_dict['personalizations'][1]['to'][0]['email'] == 'spam2@spam.com'
assert message_dict['personalizations'][2]['to'][0]['name'] == 'Josh'
assert message_dict['personalizations'][2]['to'][0]['email'] == 'spam@spam.com'
company_name_key = delims[0] + 'COMPANY_NAME' + delims[1]
assert message_dict['personalizations'][0]['substitutions'][company_name_key] == 'UnitTest Spam Corp the Second'
assert message_dict['personalizations'][1]['substitutions'][company_name_key] == 'UnitTest Spam Corp the Second'
customer_name_key = delims[0] + 'CUSTOMER_NAME' + delims[1]
# assert message_dict['personalizations'][0]['substitutions'][customer_name_key] == '姓'
assert message_dict['personalizations'][1]['substitutions'][customer_name_key] == 'Jim'
assert message_dict['personalizations'][2]['substitutions'][customer_name_key] == 'Josh'
something_unique_key = delims[0] + 'SOMETHING_UNIQUE' + delims[1]
# assert message_dict['personalizations'][0]['substitutions'][something_unique_key] == '独特'
assert something_unique_key not in message_dict['personalizations'][2]['substitutions'].keys()
assert message_dict['ip_pool_name'] == ip_pool
assert message_dict['custom_args']['template_name'] == template_name
def test_send_error_400(caplog):
"""
Test the handling of HTTP 400 Bad Request responses. The Sendgrid V3 API will return data
along with a 400 response that has details on why it was rejected. Make sure this data
makes it back to the caller, pretty pretty please.
"""
subject = 'subject'
resp_status_code = 400
resp_reason = 'main reason for error'
resp_body = 'the body of the response'
me = SendGridMassEmail(API_KEY)
me.subject = subject
me.from_addr = 'noreply@mailinator.com'
me.add_recipient(email='recip@mailinator.com')
with patch('sendgrid.SendGridAPIClient.send') as mock_send:
error = Mock()
error.code = resp_status_code
error.reason = resp_reason
error.read = lambda: resp_body
mock_send.side_effect = BadRequestsError(error)
me.send()
assert mock_send.called, 'It should have made it to the send method in the sendgrid lib.'
assert len(caplog.record_tuples) == 1, 'There should a log message in the exception block.'
severity = caplog.record_tuples[0][1]
msg = caplog.record_tuples[0][2]
assert severity == 40, 'The log should be an Error level log.'
assert msg == _HTTP_EXC_MSG % (subject, resp_status_code, resp_reason, resp_body),\
'The log message should contain details from the response.'
|
SpotOnInc/espwrap
|
tests/test_sendgrid_v3.py
|
Python
|
mit
| 6,880 | 0.003351 |
#!/usr/bin/env python
import os
import re
import sys
group_id = '142394'
def stamp(html):
"""Stamp a Python HTML documentation page with the SourceForge logo"""
def replace(m):
return ('<span class="release-info">%s '
'Hosted on <a href="http://sourceforge.net">'
'<img src="http://sourceforge.net/'
'sflogo.php?group_id=%s&type=1" width="88" height="31"'
'border="0" alt="SourceForge Logo"></a></span>'
% (m.group(1), group_id))
mailRe = re.compile(r'<span class="release-info">(.*)</span>')
## m = mailRe.search(html)
## if m:
## print m.groups()
return re.sub(mailRe, replace, html)
# stamp()
if __name__ == '__main__':
for name in sys.argv[1:]:
html = open(name, 'r').read()
text = stamp(html)
if text != html:
os.remove(name)
file = open(name, 'w')
file.write(text)
file.close()
|
PyQwt/PyQwt5
|
Doc/sourceforge.py
|
Python
|
gpl-2.0
| 990 | 0.005051 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('get_feedback', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='course',
name='feedback_amount',
field=models.DecimalField(default=0, decimal_places=0, max_digits=10),
),
migrations.AlterField(
model_name='course',
name='feedback_FU',
field=models.FloatField(),
),
migrations.AlterField(
model_name='course',
name='feedback_GPA',
field=models.FloatField(),
),
migrations.AlterField(
model_name='course',
name='feedback_easy',
field=models.FloatField(),
),
migrations.AlterField(
model_name='course',
name='feedback_freedom',
field=models.FloatField(),
),
migrations.AlterField(
model_name='course',
name='feedback_knowledgeable',
field=models.FloatField(),
),
]
|
david30907d/feedback_django
|
example/get_feedback/migrations/0002_auto_20160519_0326.py
|
Python
|
mit
| 1,184 | 0.000845 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FormPlugin'
db.create_table(u'cmsplugin_formplugin', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('form_class', self.gf('django.db.models.fields.CharField')(max_length=200)),
('success_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True)),
('post_to_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal(u'cms_form_plugin', ['FormPlugin'])
def backwards(self, orm):
# Deleting model 'FormPlugin'
db.delete_table(u'cmsplugin_formplugin')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'cms_form_plugin.formplugin': {
'Meta': {'object_name': 'FormPlugin', 'db_table': "u'cmsplugin_formplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'form_class': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'post_to_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'success_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['cms_form_plugin']
|
metzlar/cms-form-plugin
|
cms_form_plugin/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 3,411 | 0.007916 |
import datetime as dt
from io import StringIO
from locale import LC_CTYPE, getlocale, setlocale
from django.contrib.auth.models import Permission, User
from django.contrib.gis.geos import Point
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, override_settings
from model_mommy import mommy
from enhydris.admin.station import LatLonField, LatLonWidget, TimeseriesInlineAdminForm
from enhydris.models import Station, Timeseries
class LatLonWidgetTestCase(TestCase):
def test_decompress_value(self):
result = LatLonWidget().decompress(Point(12.34567891234567, -23.456789123456))
self.assertAlmostEqual(result[0], 12.345679, places=13)
self.assertAlmostEqual(result[1], -23.456789, places=13)
def test_decompress_none(self):
result = LatLonWidget().decompress(None)
self.assertIsNone(result[0])
self.assertIsNone(result[1])
class LatLonFieldTestCase(TestCase):
def test_compress(self):
self.assertEqual(
LatLonField().compress([12.345678, -23.456789]),
"SRID=4326;POINT(12.345678 -23.456789)",
)
class StationPermsTestCaseBase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
alice = User.objects.create_user(
username="alice", password="topsecret", is_staff=True, is_superuser=True
)
bob = User.objects.create_user(
username="bob", password="topsecret", is_staff=True, is_superuser=False
)
charlie = User.objects.create_user(
username="charlie", password="topsecret", is_staff=True, is_superuser=False
)
User.objects.create_user(
username="david", password="topsecret", is_staff=True, is_superuser=False
)
elaine = User.objects.create_user(
username="elaine", password="topsecret", is_staff=True, is_superuser=False
)
cls.azanulbizar = mommy.make(
Station, name="Azanulbizar", creator=bob, maintainers=[]
)
cls.barazinbar = mommy.make(
Station, name="Barazinbar", creator=bob, maintainers=[charlie]
)
cls.calenardhon = mommy.make(
Station, name="Calenardhon", creator=alice, maintainers=[]
)
po = Permission.objects
elaine.user_permissions.add(po.get(codename="add_station"))
elaine.user_permissions.add(po.get(codename="change_station"))
elaine.user_permissions.add(po.get(codename="delete_station"))
class CommonTests:
"""Tests that will run both for ENHYDRIS_USERS_CAN_ADD_CONTENT=True and False.
Below we have two TestCase subclasses (actually StationPermissionsTestCaseBase
subclasses); one of them overrides setting ENHYDRIS_USERS_CAN_ADD_CONTENT to True,
and the other one to False. This is a mixin containing tests that should have the
same results in both cases.
"""
def test_station_list_has_all_stations_for_superuser(self):
self.client.login(username="alice", password="topsecret")
response = self.client.get("/admin/enhydris/station/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Azanulbizar")
self.assertContains(response, "Barazinbar")
self.assertContains(response, "Calenardhon")
def test_station_list_has_all_stations_for_user_with_model_permissions(self):
self.client.login(username="elaine", password="topsecret")
response = self.client.get("/admin/enhydris/station/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Azanulbizar")
self.assertContains(response, "Barazinbar")
self.assertContains(response, "Calenardhon")
def test_station_list_has_nothing_when_user_does_not_have_permissions(self):
self.client.login(username="david", password="topsecret")
response = self.client.get("/admin/enhydris/station/")
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Azanulbizar")
self.assertNotContains(response, "Barazinbar")
self.assertNotContains(response, "Calenardhon")
def test_station_detail_is_inaccessible_if_user_does_not_have_perms(self):
self.client.login(username="david", password="topsecret")
response = self.client.get(
"/admin/enhydris/station/{}/change/".format(self.barazinbar.id)
)
self.assertEqual(response.status_code, 302)
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
class StationPermsTestCaseWhenUsersCanAddContent(StationPermsTestCaseBase, CommonTests):
def test_station_list_has_created_stations(self):
self.client.login(username="bob", password="topsecret")
response = self.client.get("/admin/enhydris/station/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Azanulbizar")
self.assertContains(response, "Barazinbar")
self.assertNotContains(response, "Calenardhon")
def test_station_list_has_maintained_stations(self):
self.client.login(username="charlie", password="topsecret")
response = self.client.get("/admin/enhydris/station/")
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Azanulbizar")
self.assertContains(response, "Barazinbar")
self.assertNotContains(response, "Calenardhon")
def test_station_detail_has_creator_and_maintainers_for_superuser(self):
self.client.login(username="alice", password="topsecret")
response = self.client.get(
"/admin/enhydris/station/{}/change/".format(self.azanulbizar.id)
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Creator")
self.assertContains(response, "Maintainers")
def test_station_detail_has_creator_and_maintainers_for_user_with_model_perms(self):
self.client.login(username="elaine", password="topsecret")
response = self.client.get(
"/admin/enhydris/station/{}/change/".format(self.azanulbizar.id)
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Creator")
self.assertContains(response, "Maintainers")
def test_station_detail_has_only_maintainers_for_creator(self):
self.client.login(username="bob", password="topsecret")
response = self.client.get(
"/admin/enhydris/station/{}/change/".format(self.azanulbizar.id)
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Creator")
self.assertContains(response, "Maintainers")
def test_station_detail_has_neither_creator_nor_maintainers_for_maintainer(self):
self.client.login(username="charlie", password="topsecret")
response = self.client.get(
"/admin/enhydris/station/{}/change/".format(self.barazinbar.id)
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Creator")
self.assertNotContains(response, "Maintainers")
def test_add_station_has_creator_and_maintainers_for_superuser(self):
self.client.login(username="alice", password="topsecret")
response = self.client.get("/admin/enhydris/station/add/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Creator")
self.assertContains(response, "Maintainers")
def test_add_station_has_creator_and_maintainers_for_user_with_model_perms(self):
self.client.login(username="elaine", password="topsecret")
response = self.client.get("/admin/enhydris/station/add/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Creator")
self.assertContains(response, "Maintainers")
def test_add_station_has_only_maintainers_for_user_without_model_perms(self):
self.client.login(username="bob", password="topsecret")
response = self.client.get("/admin/enhydris/station/add/")
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Creator")
self.assertContains(response, "Maintainers")
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=False)
class StationPermsTestCaseWhenUsersCannotAddCont(StationPermsTestCaseBase, CommonTests):
def test_station_list_is_empty_even_if_user_is_creator(self):
self.client.login(username="bob", password="topsecret")
response = self.client.get("/admin/enhydris/station/")
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Azanulbizar")
self.assertNotContains(response, "Barazinbar")
self.assertNotContains(response, "Calenardhon")
def test_station_list_is_empty_even_if_user_is_maintainer(self):
self.client.login(username="charlie", password="topsecret")
response = self.client.get("/admin/enhydris/station/")
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Azanulbizar")
self.assertNotContains(response, "Barazinbar")
self.assertNotContains(response, "Calenardhon")
def test_station_detail_does_not_have_creator_and_maintainers(self):
self.client.login(username="elaine", password="topsecret")
response = self.client.get(
"/admin/enhydris/station/{}/change/".format(self.azanulbizar.id)
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Creator")
self.assertNotContains(response, "Maintainers")
def test_add_station_has_no_creator_or_maintainers_for_user_with_model_perms(self):
self.client.login(username="elaine", password="topsecret")
response = self.client.get("/admin/enhydris/station/add/")
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Creator")
self.assertNotContains(response, "Maintainers")
class TimeseriesInlineAdminFormRefusesToAppendIfNotInOrderTestCase(TestCase):
def setUp(self):
station = mommy.make(Station)
self.timeseries = mommy.make(
Timeseries, gentity=station, time_zone__utc_offset=0
)
self.timeseries.set_data(StringIO("2019-01-01 00:30,25,\n"))
self.data = {
"replace_or_append": "APPEND",
"gentity": station.id,
"unit_of_measurement": self.timeseries.unit_of_measurement.id,
"variable": self.timeseries.variable.id,
"time_zone": self.timeseries.time_zone.id,
}
self.files = {
"data": SimpleUploadedFile(
"mytimeseries.csv", b"2005-12-01 18:35,7,\n2019-04-09 13:36,0,\n"
)
}
self.form = TimeseriesInlineAdminForm(
data=self.data, files=self.files, instance=self.timeseries
)
def test_form_is_not_valid(self):
self.assertFalse(self.form.is_valid())
def test_form_errors(self):
self.assertIn(
"the first record of the time series to append is earlier than the last "
"record of the existing time series",
self.form.errors["__all__"][0],
)
class TimeseriesInlineAdminFormAcceptsAppendingIfInOrderTestCase(TestCase):
def setUp(self):
station = mommy.make(Station)
self.timeseries = mommy.make(
Timeseries, gentity=station, time_zone__utc_offset=0
)
self.timeseries.set_data(StringIO("2019-01-01 00:30,25,\n"))
self.data = {
"replace_or_append": "APPEND",
"gentity": station.id,
"unit_of_measurement": self.timeseries.unit_of_measurement.id,
"variable": self.timeseries.variable.id,
"time_zone": self.timeseries.time_zone.id,
}
self.files = {
"data": SimpleUploadedFile("mytimeseries.csv", b"2019-04-09 13:36,0,\n")
}
self.form = TimeseriesInlineAdminForm(
data=self.data, files=self.files, instance=self.timeseries
)
self.form.save()
def test_form_is_valid(self):
self.assertTrue(self.form.is_valid())
def test_data_length(self):
self.assertEqual(len(self.timeseries.get_data().data), 2)
def test_first_record(self):
self.assertEqual(
self.timeseries.get_data().data.index[0], dt.datetime(2019, 1, 1, 0, 30)
)
def test_second_record(self):
self.assertEqual(
self.timeseries.get_data().data.index[1], dt.datetime(2019, 4, 9, 13, 36)
)
class TimeseriesInlineAdminFormAcceptsReplacingTestCase(TestCase):
def setUp(self):
station = mommy.make(Station)
self.timeseries = mommy.make(
Timeseries, gentity=station, time_zone__utc_offset=0
)
self.timeseries.set_data(StringIO("2019-01-01 00:30,25,\n"))
self.data = {
"replace_or_append": "REPLACE",
"gentity": station.id,
"unit_of_measurement": self.timeseries.unit_of_measurement.id,
"variable": self.timeseries.variable.id,
"time_zone": self.timeseries.time_zone.id,
}
self.files = {
"data": SimpleUploadedFile(
"mytimeseries.csv", b"2005-12-01 18:35,7,\n2019-04-09 13:36,0,\n"
)
}
self.form = TimeseriesInlineAdminForm(
data=self.data, files=self.files, instance=self.timeseries
)
self.form.save()
def test_form_is_valid(self):
self.assertTrue(self.form.is_valid())
def test_data_length(self):
self.assertEqual(len(self.timeseries.get_data().data), 2)
def test_first_record(self):
self.assertEqual(
self.timeseries.get_data().data.index[0], dt.datetime(2005, 12, 1, 18, 35)
)
def test_second_record(self):
self.assertEqual(
self.timeseries.get_data().data.index[1], dt.datetime(2019, 4, 9, 13, 36)
)
class TimeseriesUploadFileWithUnicodeHeadersTestCase(TestCase):
def setUp(self):
station = mommy.make(Station)
self.timeseries = mommy.make(
Timeseries, gentity=station, time_zone__utc_offset=0
)
self.data = {
"replace_or_append": "REPLACE",
"gentity": station.id,
"unit_of_measurement": self.timeseries.unit_of_measurement.id,
"variable": self.timeseries.variable.id,
"time_zone": self.timeseries.time_zone.id,
}
self.files = {
"data": SimpleUploadedFile(
"mytimeseries.csv",
"Station=Πάπιγκο\n\n2019-04-09 13:36,0,\n".encode("utf-8"),
)
}
try:
# We check that the file is read without problem even if the locale
# is set to C (i.e. ascii only)
saved_locale = getlocale(LC_CTYPE)
setlocale(LC_CTYPE, "C")
self.form = TimeseriesInlineAdminForm(
data=self.data, files=self.files, instance=self.timeseries
)
self.form.save()
finally:
setlocale(LC_CTYPE, saved_locale)
def test_form_is_valid(self):
self.assertTrue(self.form.is_valid())
def test_data_length(self):
self.assertEqual(len(self.timeseries.get_data().data), 1)
def test_first_record(self):
self.assertEqual(
self.timeseries.get_data().data.index[0], dt.datetime(2019, 4, 9, 13, 36)
)
class TimeseriesInlineAdminFormProcessWithoutFileTestCase(TestCase):
def setUp(self):
station = mommy.make(Station)
self.timeseries = mommy.make(
Timeseries, gentity=station, time_zone__utc_offset=0
)
self.timeseries.set_data(StringIO("2019-01-01 00:30,25,\n"))
self.data = {
"replace_or_append": "REPLACE",
"gentity": station.id,
"unit_of_measurement": self.timeseries.unit_of_measurement.id,
"variable": self.timeseries.variable.id,
"time_zone": self.timeseries.time_zone.id,
}
self.form = TimeseriesInlineAdminForm(data=self.data, instance=self.timeseries)
def test_form_is_valid(self):
self.assertTrue(self.form.is_valid())
def test_form_saves_without_exception(self):
self.form.save()
|
kickapoo/enhydris
|
enhydris/tests/admin/test_station.py
|
Python
|
agpl-3.0
| 16,487 | 0.00182 |
#!/usr/bin/env python3
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.calculators.AnaPot import AnaPot
from pysisyphus.calculators.MullerBrownSympyPot import MullerBrownPot
from pysisyphus.calculators.PySCF import PySCF
from pysisyphus.calculators import Gaussian16, Turbomole
from pysisyphus.constants import BOHR2ANG
from pysisyphus.helpers import geom_loader
from pysisyphus.irc import *
from pysisyphus.testing import using
@pytest.fixture
def this_dir(request):
return Path(request.module.__file__).parents[0]
def assert_anapot_irc(irc):
fc = irc.all_coords[0]
bc = irc.all_coords[-1]
forward_ref = np.array((-1.0527, 1.0278, 0.))
backward_ref = np.array((1.941, 3.8543, 0.))
forward_diff = np.linalg.norm(fc - forward_ref)
backward_diff = np.linalg.norm(bc - backward_ref)
assert forward_diff == pytest.approx(0.05, abs=0.1)
assert backward_diff == pytest.approx(0.05, abs=0.1)
def plot_irc(irc, title=None):
geom = irc.geometry
calc = geom.calculator
levels = np.linspace(-3, 4, 120)
calc.plot()
ax = calc.ax
ax.plot(*irc.all_coords.T[:2], "ro-")
if title:
ax.set_title(title)
plt.show()
@pytest.mark.parametrize(
"irc_cls, mod_kwargs, ref", [
(DampedVelocityVerlet, {"v0": 0.1, "max_cycles": 400,}, None),
(Euler, {"step_length": 0.05,}, None),
(EulerPC, {}, None),
(GonzalezSchlegel, {}, None),
(IMKMod, {}, None),
(RK4, {}, None),
(LQA, {}, None),
]
)
def test_anapot_irc(irc_cls, mod_kwargs, ref):
geom = AnaPot().get_geom((0.61173, 1.49297, 0.))
kwargs = {
"step_length": 0.1,
"rms_grad_thresh": 1e-2,
}
kwargs.update(**mod_kwargs)
irc = irc_cls(geom, **kwargs)
irc.run()
# geom.calculator.plot_irc(irc, show=True)
assert_anapot_irc(irc)
@pytest.mark.parametrize(
"step_length", [
(0.1),
(0.2),
(0.3),
(0.4),
]
)
def test_imk(step_length):
geom = AnaPot().get_geom((0.61173, 1.49297, 0.))
irc_kwargs = {
"step_length": step_length,
"rms_grad_thresh": 1e-2,
"corr_first": True,
"corr_first_energy": True,
"corr_bisec_size": 0.0025,
"corr_bisec_energy": True,
}
irc = IMKMod(geom, **irc_kwargs)
irc.run()
# plot_irc(irc, irc.__class__.__name__)
assert_anapot_irc(irc)
@pytest.mark.parametrize(
"calc_cls, kwargs_", [
pytest.param(PySCF,
{"basis": "321g", },
marks=using("pyscf")
),
pytest.param(Gaussian16,
{"route": "HF/3-21G"},
marks=using("gaussian16")
),
pytest.param(Turbomole,
{"control_path": "./hf_abstr_control_path", "pal": 1},
marks=using("turbomole")
),
]
)
def test_hf_abstraction_dvv(calc_cls, kwargs_, this_dir):
geom = geom_loader("lib:hfabstraction_hf321g_displ_forward.xyz")
calc_kwargs = {
"pal": 2,
}
calc_kwargs.update(kwargs_)
if "control_path" in calc_kwargs:
calc_kwargs["control_path"] = this_dir / calc_kwargs["control_path"]
print("Using", calc_cls)
calc = calc_cls(**calc_kwargs)
geom.set_calculator(calc)
irc_kwargs = {
"dt0": 0.5,
"v0": 0.04,
"downhill": True,
"max_cycles": 150,
}
dvv = DampedVelocityVerlet(geom, **irc_kwargs)
dvv.run()
c3d = geom.coords3d * BOHR2ANG
def bond(i,j): return np.linalg.norm(c3d[i]-c3d[j])
assert bond(2, 7) == pytest.approx(0.93, abs=0.01)
assert bond(4, 7) == pytest.approx(2.42, abs=0.01)
assert bond(2, 0) == pytest.approx(2.23, abs=0.01)
@using("pyscf")
@pytest.mark.parametrize(
"irc_cls, irc_kwargs, fw_cycle, bw_cycle",
[
(EulerPC, {"hessian_recalc": 10, "dump_dwi": False,}, 30, 38),
# (EulerPC, {"hessian_recalc": 10, "corr_func": "scipy",}, 19, 23),
]
)
def test_hcn_irc(irc_cls, irc_kwargs, fw_cycle, bw_cycle):
geom = geom_loader("lib:hcn_iso_hf_sto3g_ts_opt.xyz")
calc = PySCF(
basis="sto3g",
)
geom.set_calculator(calc)
irc = irc_cls(geom, **irc_kwargs, rms_grad_thresh=1e-4)
irc.run()
# approx. +- 0.5 kJ/mol
ref_energies = [pytest.approx(en) for en in (-91.6444238, -91.67520895)]
assert irc.forward_energies[0] in ref_energies
assert irc.backward_energies[-1] in ref_energies
@pytest.mark.parametrize(
"scipy_method",
[
(None),
("RK45"),
("DOP853"),
]
)
def test_eulerpc_scipy(scipy_method):
geom = AnaPot().get_geom((0.61173, 1.49297, 0.))
kwargs = {
"step_length": 0.2,
"rms_grad_thresh": 1e-2,
"corr_func": "scipy",
"scipy_method": scipy_method,
}
irc = EulerPC(geom, **kwargs)
irc.run()
# plot_irc(irc, irc.__class__.__name__)
assert_anapot_irc(irc)
@using("pyscf")
@pytest.mark.parametrize(
"hessian_init, ref_cycle", [
("calc", 28),
pytest.param("fischer", 0, marks=pytest.mark.xfail),
pytest.param("unit", 0, marks=pytest.mark.xfail),
("lindh", 28),
("simple", 28),
("swart", 28),
]
)
def test_downhill_irc_model_hessian(hessian_init, ref_cycle):
geom = geom_loader("lib:hcn_downhill_model_hessian.xyz")
calc = PySCF(basis="sto3g", pal=2)
geom.set_calculator(calc)
irc_kwargs = {
"hessian_init": hessian_init,
"rms_grad_thresh": 5e-3,
"downhill": True,
}
irc = EulerPC(geom, **irc_kwargs)
irc.run()
assert irc.downhill_energies[-1] == pytest.approx(-91.67517096968325)
assert irc.downhill_cycle == ref_cycle
# @pytest.mark.skip()
@pytest.mark.parametrize(
"step_length", [
0.1,
0.2,
0.3,
# 0.4 # requires hessian_recalc=1
]
)
def test_mb_gs2(step_length):
calc = MullerBrownPot()
geom = calc.get_saddle(0)
irc_kwargs = {
"step_length": step_length,
"line_search": False,
# "hessian_recalc": 1,
}
irc = GonzalezSchlegel(geom, **irc_kwargs)
irc.run()
# calc.plot_irc(irc, show=True, title=f"length {step_length:.2f}")
assert irc.forward_is_converged
assert irc.backward_is_converged
@using("pyscf")
@pytest.mark.parametrize(
"step_length", [
0.1,
0.2,
0.3,
# 0.4, # sometimes fails in the CI
]
)
def test_hcn_iso_gs2(step_length):
geom = geom_loader("lib:hcn_iso_hf_sto3g_ts_opt.xyz")
calc = PySCF(basis="sto3g", verbose=0)
geom.set_calculator(calc)
irc_kwargs = {
"step_length": step_length,
"displ_energy": 0.0005,
}
irc = GonzalezSchlegel(geom, **irc_kwargs)
irc.run()
assert irc.forward_is_converged
assert irc.backward_is_converged
@pytest.mark.parametrize(
"step_length", [
0.1,
0.2,
# 0.3,
# 0.4,
]
)
def test_mb_eulerpc(step_length):
calc = MullerBrownPot()
geom = calc.get_saddle(0)
irc_kwargs = {
"step_length": step_length,
# Using Scipy here takes forever...
# "corr_func": "scipy",
# "scipy_method": "BDF",
}
irc = EulerPC(geom, **irc_kwargs)
irc.run()
# calc.plot_irc(irc, show=True, title=f"length {step_length:.2f}")
forward_coords = irc.all_coords[0]
backward_coords = irc.all_coords[-1]
assert np.linalg.norm(forward_coords - (-0.558, 1.441, 0.0)) <= 2e-2
assert np.linalg.norm(backward_coords - (-0.050, 0.466, 0.0)) <= 5e-3
|
eljost/pysisyphus
|
tests/test_irc/test_irc.py
|
Python
|
gpl-3.0
| 7,603 | 0.000921 |
"""Support for IKEA Tradfri."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json
from .const import (
CONF_IMPORT_GROUPS, CONF_IDENTITY, CONF_HOST, CONF_KEY, CONF_GATEWAY_ID)
from . import config_flow # noqa pylint_disable=unused-import
REQUIREMENTS = ['pytradfri[async]==6.0.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'tradfri'
CONFIG_FILE = '.tradfri_psk.conf'
KEY_GATEWAY = 'tradfri_gateway'
KEY_API = 'tradfri_api'
CONF_ALLOW_TRADFRI_GROUPS = 'allow_tradfri_groups'
DEFAULT_ALLOW_TRADFRI_GROUPS = False
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_ALLOW_TRADFRI_GROUPS,
default=DEFAULT_ALLOW_TRADFRI_GROUPS): cv.boolean,
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Tradfri component."""
conf = config.get(DOMAIN)
if conf is None:
return True
configured_hosts = [entry.data['host'] for entry in
hass.config_entries.async_entries(DOMAIN)]
legacy_hosts = await hass.async_add_executor_job(
load_json, hass.config.path(CONFIG_FILE))
for host, info in legacy_hosts.items():
if host in configured_hosts:
continue
info[CONF_HOST] = host
info[CONF_IMPORT_GROUPS] = conf[CONF_ALLOW_TRADFRI_GROUPS]
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data=info
))
host = conf.get(CONF_HOST)
import_groups = conf[CONF_ALLOW_TRADFRI_GROUPS]
if host is None or host in configured_hosts or host in legacy_hosts:
return True
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={CONF_HOST: host, CONF_IMPORT_GROUPS: import_groups}
))
return True
async def async_setup_entry(hass, entry):
"""Create a gateway."""
# host, identity, key, allow_tradfri_groups
from pytradfri import Gateway, RequestError # pylint: disable=import-error
from pytradfri.api.aiocoap_api import APIFactory
factory = APIFactory(
entry.data[CONF_HOST],
psk_id=entry.data[CONF_IDENTITY],
psk=entry.data[CONF_KEY],
loop=hass.loop
)
async def on_hass_stop(event):
"""Close connection when hass stops."""
await factory.shutdown()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop)
api = factory.request
gateway = Gateway()
try:
gateway_info = await api(gateway.get_gateway_info())
except RequestError:
_LOGGER.error("Tradfri setup failed.")
return False
hass.data.setdefault(KEY_API, {})[entry.entry_id] = api
hass.data.setdefault(KEY_GATEWAY, {})[entry.entry_id] = gateway
dev_reg = await hass.helpers.device_registry.async_get_registry()
dev_reg.async_get_or_create(
config_entry_id=entry.entry_id,
connections=set(),
identifiers={
(DOMAIN, entry.data[CONF_GATEWAY_ID])
},
manufacturer='IKEA',
name='Gateway',
# They just have 1 gateway model. Type is not exposed yet.
model='E1526',
sw_version=gateway_info.firmware_version,
)
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
entry, 'light'
))
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
entry, 'sensor'
))
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
entry, 'switch'
))
return True
|
HydrelioxGitHub/home-assistant
|
homeassistant/components/tradfri/__init__.py
|
Python
|
apache-2.0
| 3,856 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class ReissueCertificateOrderRequest(ProxyOnlyResource):
"""Class representing certificate reissue request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key_size: Certificate Key Size.
:type key_size: int
:param delay_existing_revoke_in_hours: Delay in hours to revoke existing
certificate after the new certificate is issued.
:type delay_existing_revoke_in_hours: int
:param csr: Csr to be used for re-key operation.
:type csr: str
:param is_private_key_external: Should we change the ASC type (from
managed private key to external private key and vice versa).
:type is_private_key_external: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'delay_existing_revoke_in_hours': {'key': 'properties.delayExistingRevokeInHours', 'type': 'int'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
}
def __init__(self, kind=None, key_size=None, delay_existing_revoke_in_hours=None, csr=None, is_private_key_external=None):
super(ReissueCertificateOrderRequest, self).__init__(kind=kind)
self.key_size = key_size
self.delay_existing_revoke_in_hours = delay_existing_revoke_in_hours
self.csr = csr
self.is_private_key_external = is_private_key_external
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/reissue_certificate_order_request.py
|
Python
|
mit
| 2,522 | 0.00119 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import os
import configparser
from fabric.context_managers import cd
from fabric.decorators import task
from fabric.api import env
from fabric.operations import put, run, local
from fabric.utils import abort
from zip_dir.utils import create_zip_archive
ini_parser = configparser.ConfigParser()
# Example of "deploy.ini"
# =======================
# [remote]
# host : 80.xxx.xxx.xx
# user : john
# key_filename : ~/.ssh/id_rsa.private
ini_parser.read("deploy.ini")
remote_section = ini_parser["remote"]
env.hosts = [remote_section["host"]]
env.user = remote_section["user"]
env.key_filename = os.path.normpath(remote_section["key_filename"])
APP_BASE_DIR = '/var/www/gtasksapp_com/www/app'
DIST_ZIP_FILENAME = "dist.zip"
DIST_DIRECTORY_NAME = "dist"
def create_tmp_if_doesnt_exist():
if not os.path.isdir(".tmp"):
os.mkdir(".tmp")
@task()
def build_app():
local("grunt")
@task()
def grunt_clean():
local("grunt clean")
@task(alias='app')
def deploy_app():
"""Deploy app"""
create_tmp_if_doesnt_exist()
current_path = os.path.dirname(os.path.realpath(__file__))
dist_path = os.path.join(current_path, DIST_DIRECTORY_NAME)
if not os.path.isdir(dist_path) or not os.listdir(dist_path):
abort("Dist path doesn't exist or dist directory is empty")
create_zip_archive(dist_path, os.path.join(".tmp", DIST_ZIP_FILENAME))
run("mkdir -p {0}".format(APP_BASE_DIR))
put(os.path.join(".tmp", DIST_ZIP_FILENAME), APP_BASE_DIR)
with cd(APP_BASE_DIR):
run("unzip -o {0}".format(DIST_ZIP_FILENAME))
run("rm {0}".format(DIST_ZIP_FILENAME))
grunt_clean()
@task(alias='landing')
def deploy_landing_page():
"""Deploy landing page"""
create_tmp_if_doesnt_exist()
current_path = os.path.dirname(os.path.realpath(__file__))
dist_path = os.path.join(current_path, "landing_page")
create_zip_archive(dist_path, ".tmp/landing_page.zip")
put(".tmp/landing_page.zip", "/var/www/gtasksapp_com/www/")
with cd("/var/www/gtasksapp_com/www/"):
run("unzip -o {0}".format("landing_page.zip"))
run("rm {0}".format("landing_page.zip"))
grunt_clean()
@task(alias='all')
def deploy_all():
"""Deploy all"""
build_app()
deploy_app()
deploy_landing_page()
|
illagrenan/gtask-client
|
fabfile.py
|
Python
|
mit
| 2,356 | 0.000424 |
# Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from timelinelib.db.utils import safe_locking
from timelinelib.repositories.dbwrapper import DbWrapperEventRepository
from timelinelib.wxgui.dialogs.editcontainer.view import EditContainerDialog
from timelinelib.wxgui.dialogs.editevent.controller import EditEventDialogController
from timelinelib.wxgui.framework import Dialog
from timelinelib.wxgui.utils import _set_focus_and_select
import timelinelib.wxgui.utils as gui_utils
class EditEventDialog(Dialog):
"""
<BoxSizerVertical>
<StaticBoxSizerVertical label="$(properties_label)" border="ALL" proportion="1">
<FlexGridSizer name="grid_sizer" columns="2" growableColumns="1" border="ALL" proportion="1">
%s
</FlexGridSizer>
</StaticBoxSizerVertical>
<CheckBox
name="add_more_checkbox"
label="$(add_more_label)"
border="LEFT|RIGHT|BOTTOM"
/>
<BoxSizerHorizontal border="LEFT|RIGHT|BOTTOM">
<TwoStateButton
initial_state_label="$(enlarge)"
second_state_label="$(reduce)"
event_EVT_INITIAL_STATE_CLICKED="on_enlarge_click"
event_EVT_SECOND_STATE_CLICKED="on_reduce_click"
/>
<StretchSpacer />
<DialogButtonsOkCancelSizer
event_EVT_BUTTON__ID_OK="on_ok_clicked"
/>
</BoxSizerHorizontal>
</BoxSizerVertical>
"""
TIME_DETAILS_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(when_label)" />
<BoxSizerHorizontal>
<TimePicker
name="start_time"
time_type="$(time_type)"
config="$(config)"
/>
<Spacer />
<StaticText
label="$(to_label)"
name="to_label"
align="ALIGN_CENTER_VERTICAL"
/>
<Spacer />
<TimePicker
name="end_time"
time_type="$(time_type)"
config="$(config)"
/>
</BoxSizerHorizontal>
"""
CHECKBOX_ROW = """
<Spacer />
<FlexGridSizer rows="1">
<CheckBox
name="period_checkbox"
event_EVT_CHECKBOX="on_period_checkbox_changed"
label="$(period_checkbox_text)" />
<CheckBox
name="show_time_checkbox"
event_EVT_CHECKBOX="on_show_time_checkbox_changed"
label="$(show_time_checkbox_text)"
/>
<CheckBox
name="fuzzy_checkbox"
label="$(fuzzy_checkbox_text)"
/>
<CheckBox
name="locked_checkbox"
event_EVT_CHECKBOX="on_locked_checkbox_changed"
label="$(locked_checkbox_text)"
/>
<CheckBox
name="ends_today_checkbox"
label="$(ends_today_checkbox_text)"
/>
</FlexGridSizer>
"""
TEXT_FIELD_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(text_label)" />
<TextCtrl name="name" />
"""
CATEGORY_LISTBOX_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(category_label)" />
<CategoryChoice
name="category_choice"
allow_add="True"
allow_edit="True"
timeline="$(db)"
align="ALIGN_LEFT"
/>
"""
CONTAINER_LISTBOX_ROW = """
<StaticText align="ALIGN_CENTER_VERTICAL" label="$(container_label)" />
<ContainerChoice
name="container_choice"
event_EVT_CONTAINER_CHANGED="on_container_changed"
db="$(db)"
align="ALIGN_LEFT"
/>
"""
NOTEBOOK_ROW = """
<Spacer />
<Notebook name="notebook" style="BK_DEFAULT">
<DescriptionEditor
name="description"
notebookLabel="$(page_description)"
editor="$(self)"
proportion="1"
/>
<IconEditor
name="icon"
notebookLabel="$(page_icon)"
editor="$(self)"
proportion="1"
/>
<AlertEditor
name="alert"
notebookLabel="$(page_alert)"
editor="$(self)"
proportion="1"
/>
<HyperlinkEditor
name="hyperlink"
notebookLabel="$(page_hyperlink)"
editor="$(self)"
proportion="1"
/>
<ProgressEditor
name="progress"
notebookLabel="$(page_progress)"
editor="$(self)"
proportion="1"
/>
</Notebook>
"""
def __init__(self, parent, config, title, db, start=None, end=None, event=None):
self.timeline = db
self.config = config
self.start = start
self.event = event
self._insert_rows_in_correct_order_in_xml()
Dialog.__init__(self, EditEventDialogController, parent, {
"self": self,
"db": db,
"time_type": db.get_time_type(),
"config": config,
"properties_label": _("Event Properties"),
"when_label": _("When:"),
"period_checkbox_text": _("Period"),
"show_time_checkbox_text": _("Show time"),
"fuzzy_checkbox_text": _("Fuzzy"),
"locked_checkbox_text": _("Locked"),
"ends_today_checkbox_text": _("Ends today"),
"to_label": _("to"),
"text_label": _("Text:"),
"category_label": _("Category:"),
"container_label": _("Container:"),
"page_description": _("Description"),
"page_icon": _("Icon"),
"page_alert": _("Alert"),
"page_hyperlink": _("Hyperlink"),
"page_progress": _("Progress"),
"add_more_label": _("Add more events after this one"),
"enlarge": _("&Enlarge"),
"reduce": _("&Reduce"),
}, title=title, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.controller.on_init(
config,
db.get_time_type(),
DbWrapperEventRepository(db),
db,
start,
end,
event)
self._make_row_with_notebook_growable()
self.SetMinSize((800, -1))
self.Fit()
self.SetMinSize(self.GetSize())
def GetStart(self):
return self.start_time.get_value()
def SetStart(self, value):
self.start_time.set_value(value)
def GetEnd(self):
return self.end_time.get_value()
def SetEnd(self, value):
self.end_time.set_value(value)
def GetShowPeriod(self):
return self.period_checkbox.GetValue()
def SetShowPeriod(self, value):
self.period_checkbox.SetValue(value)
self.ShowToTime(value)
def ShowToTime(self, show):
self.to_label.Show(show)
self.end_time.Show(show)
def GetShowTime(self):
return self.show_time_checkbox.GetValue()
def SetShowTime(self, value):
if self.timeline.get_time_type().is_date_time_type():
self.show_time_checkbox.SetValue(value)
self.start_time.show_time(value)
self.end_time.show_time(value)
else:
self.show_time_checkbox.Hide()
def GetFuzzy(self):
return self.fuzzy_checkbox.GetValue()
def SetFuzzy(self, value):
self.fuzzy_checkbox.SetValue(value)
def GetLocked(self):
return self.locked_checkbox.GetValue()
def SetLocked(self, value):
self.locked_checkbox.SetValue(value)
def EnableLocked(self, value):
self.locked_checkbox.Enable(value)
def GetEndsToday(self):
return self.ends_today_checkbox.GetValue()
def SetEndsToday(self, value):
self.ends_today_checkbox.SetValue(value)
def EnableEndsToday(self, value):
self.ends_today_checkbox.Enable(value)
def GetName(self):
return self.name.GetValue().strip()
def SetName(self, value):
self.name.SetValue(value)
def GetCategory(self):
return self.category_choice.GetSelectedCategory()
def SetCategory(self, value):
self.category_choice.Populate(select=value)
def GetContainer(self):
return self.container_choice.GetSelectedContainer()
def SetContainer(self, value):
self.container_choice.Fill(value)
def GetEventData(self):
event_data = {}
for data_id, editor in self._get_event_data():
data = editor.get_data()
if data is not None:
event_data[data_id] = editor.get_data()
return event_data
def SetEventData(self, event_data):
for data_id, editor in self._get_event_data():
if data_id in event_data:
data = event_data[data_id]
if data is not None:
editor.set_data(data)
def ClearEventData(self):
for _, editor in self._get_event_data():
editor.clear_data()
def IsAddMoreChecked(self):
return self.add_more_checkbox.GetValue()
def SetShowAddMoreCheckbox(self, value):
self.add_more_checkbox.Show(value)
self.add_more_checkbox.SetValue(False)
self.SetSizerAndFit(self.GetSizer())
def SetFocusOnFirstControl(self):
control = {
"0": self.start_time,
"1": self.period_checkbox,
"2": self.name,
"3": self.category_choice,
"4": self.container_choice,
":": self.notebook,
}[self.config.event_editor_tab_order[0]]
_set_focus_and_select(control)
def DisplayInvalidStart(self, message):
self._display_invalid_input(message, self.start_time)
def DisplayInvalidEnd(self, message):
self._display_invalid_input(message, self.end_time)
def _display_invalid_input(self, message, control):
self.DisplayErrorMessage(message)
_set_focus_and_select(control)
def _get_event_data(self):
return [
("description", self.description),
("alert", self.alert),
("icon", self.icon),
("hyperlink", self.hyperlink),
("progress", self.progress),
]
def _insert_rows_in_correct_order_in_xml(self):
rows_by_key = {
"0": self.TIME_DETAILS_ROW,
"1": self.CHECKBOX_ROW,
"2": self.TEXT_FIELD_ROW,
"3": self.CATEGORY_LISTBOX_ROW,
"4": self.CONTAINER_LISTBOX_ROW,
":": self.NOTEBOOK_ROW,
}
placeholder_content = "".join(rows_by_key[key] for key in self.config.event_editor_tab_order)
self.__doc__ = self.__doc__ % placeholder_content
def _make_row_with_notebook_growable(self):
self.grid_sizer.AddGrowableRow(self.config.event_editor_tab_order.index(":"))
def open_event_editor_for(parent, config, db, handle_db_error, event):
def create_event_editor():
if event.is_container():
title = _("Edit Container")
return EditContainerDialog(parent, title, db, event)
else:
return EditEventDialog(
parent, config, _("Edit Event"), db, event=event)
def edit_function():
gui_utils.show_modal(create_event_editor, handle_db_error)
safe_locking(parent, edit_function)
def open_create_event_editor(parent, config, db, handle_db_error, start=None, end=None):
def create_event_editor():
label = _("Create Event")
return EditEventDialog(parent, config, label, db, start, end)
def edit_function():
gui_utils.show_modal(create_event_editor, handle_db_error)
safe_locking(parent, edit_function)
|
ezequielpereira/Time-Line
|
timelinelib/wxgui/dialogs/editevent/view.py
|
Python
|
gpl-3.0
| 12,695 | 0.000788 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import sh
import shutil
import stat
import tempfile
from dlrn.config import ConfigOptions
from dlrn import db
from dlrn.drivers.kojidriver import KojiBuildDriver
from dlrn.tests import base
from six.moves import configparser
from time import localtime
from time import strftime
def _mocked_listdir(directory):
return ['python-pysaml2-3.0-1a.el7.centos.src.rpm']
def _mocked_time():
return float(1533293385.545039)
def _mocked_call(*args, **kwargs):
if args[0] == '/usr/bin/git log':
return '1 2'
return True
@mock.patch('sh.restorecon', create=True)
@mock.patch('sh.env', create=True)
@mock.patch('os.listdir', side_effect=_mocked_listdir)
class TestDriverKoji(base.TestCase):
def setUp(self):
super(TestDriverKoji, self).setUp()
config = configparser.RawConfigParser()
config.read("projects.ini")
config.set("DEFAULT", "build_driver",
"dlrn.drivers.kojidriver.KojiBuildDriver")
self.config = ConfigOptions(config)
self.config.koji_krb_principal = 'test@example.com'
self.config.koji_krb_keytab = '/home/test/test.keytab'
self.config.koji_scratch_build = True
self.config.koji_build_target = 'build-target'
self.temp_dir = tempfile.mkdtemp()
self.config.datadir = self.temp_dir
# Create fake build log
with open("%s/kojibuild.log" % self.temp_dir, 'a') as fp:
fp.write("Created task: 1234")
# In the rhpkg case, we need to create a full dir structure
self.rhpkg_extra_dir = "%s/repos/12/34/1234567890abcdef_1_12345678"\
% self.temp_dir
os.makedirs(self.rhpkg_extra_dir)
with open("%s/rhpkgbuild.log"
% self.rhpkg_extra_dir, 'a') as fp:
fp.write("Created task: 5678")
# Another full-dir structure for the long extended hash test
self.rhpkg_extra_dir_2 = (
"%s/repos/12/34/1234567890abcdef_1_12345678_abcdefgh" %
self.temp_dir)
os.makedirs(self.rhpkg_extra_dir_2)
with open("%s/rhpkgbuild.log"
% self.rhpkg_extra_dir_2, 'a') as fp:
fp.write("Created task: 5678")
# Another full-dir structure for the long extended hash test
# with downstream driver
self.rhpkg_extra_dir_3 = (
"%s/repos/12/34/1234567890abcdef_fedcba09_1_1" %
self.temp_dir)
os.makedirs(self.rhpkg_extra_dir_3)
with open("%s/rhpkgbuild.log"
% self.rhpkg_extra_dir_3, 'a') as fp:
fp.write("Created task: 5678")
# Create a fake rhpkg binary
with open("%s/rhpkg" % self.temp_dir, 'a') as fp:
fp.write("true")
os.chmod("%s/rhpkg" % self.temp_dir,
stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
os.environ['PATH'] = self.temp_dir + ':' + os.environ['PATH']
def tearDown(self):
super(TestDriverKoji, self).tearDown()
shutil.rmtree(self.temp_dir)
def test_build_package(self, ld_mock, env_mock, rc_mock):
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir)
expected = [mock.call(['koji',
'--principal', self.config.koji_krb_principal,
'--keytab', self.config.koji_krb_keytab,
'build', '--wait',
self.config.koji_build_target,
'%s/python-pysaml2-3.0-1a.el7.centos.src.rpm' %
self.temp_dir],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
scratch=True,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'}),
mock.call(['koji', 'download-task', '--logs', '1234'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'})]
# 1- koji build (handled by env_mock)
# 2- koji download (handled by env_mock)
# 3- restorecon (handled by rc_mock)
self.assertEqual(env_mock.call_count, 2)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected)
def test_build_package_no_scratch(self, ld_mock, env_mock, rc_mock):
self.config.koji_scratch_build = False
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir)
expected = [mock.call(['koji',
'--principal', self.config.koji_krb_principal,
'--keytab', self.config.koji_krb_keytab,
'build', '--wait',
self.config.koji_build_target,
'%s/python-pysaml2-3.0-1a.el7.centos.src.rpm' %
self.temp_dir],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
scratch=False,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'}),
mock.call(['koji', 'download-task', '--logs', '1234'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'})]
# 1- koji build (handled by env_mock)
# 2- koji download (handled by env_mock)
# 3- restorecon (handled by rc_mock)
self.assertEqual(env_mock.call_count, 2)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected)
def test_build_package_brew(self, ld_mock, env_mock, rc_mock):
self.config.koji_exe = 'brew'
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir)
expected = [mock.call(['brew',
'--principal', self.config.koji_krb_principal,
'--keytab', self.config.koji_krb_keytab,
'build', '--wait',
self.config.koji_build_target,
'%s/python-pysaml2-3.0-1a.el7.centos.src.rpm' %
self.temp_dir],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
scratch=True,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'}),
mock.call(['brew', 'download-task', '--logs', '1234'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'})]
# 1- koji build (handled by env_mock)
# 2- koji download (handled by env_mock)
# 3- restorecon (handled by rc_mock)
self.assertEqual(env_mock.call_count, 2)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected)
def test_build_package_nokrb(self, ld_mock, env_mock, rc_mock):
self.config.koji_krb_principal = None
self.config.koji_krb_keytab = None
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir)
expected = [mock.call(['koji',
'build', '--wait',
self.config.koji_build_target,
'%s/python-pysaml2-3.0-1a.el7.centos.src.rpm' %
self.temp_dir],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
scratch=True,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'}),
mock.call(['koji', 'download-task', '--logs', '1234'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'})]
# 1- koji build (handled by env_mock)
# 2- koji download (handled by env_mock)
# 3- restorecon (handled by rc_mock)
self.assertEqual(env_mock.call_count, 2)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected)
@mock.patch('os.rename')
@mock.patch.object(sh.Command, '__call__', autospec=True,
side_effect=_mocked_call)
@mock.patch('dlrn.drivers.kojidriver.time', side_effect=_mocked_time)
@mock.patch('sh.kinit', create=True)
def test_build_package_rhpkg(self, ki_mock, tm_mock, rh_mock, rn_mock,
ld_mock, env_mock, rc_mock):
self.config.koji_use_rhpkg = True
commit = db.Commit(dt_commit=123, project_name='python-pysaml2',
commit_hash='1234567890abcdef',
distro_hash='1234567890abcdef',
extended_hash='1234567890abcdef',
dt_distro=123,
dt_extended=123)
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir,
package_name='python-pysaml2',
commit=commit)
expected_env = [mock.call(['koji', 'download-task', '--logs', '5678'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.rhpkg_extra_dir,
_env={'PATH': '/usr/bin/'})]
pkg_date = strftime("%Y-%m-%d-%H%M%S", localtime(_mocked_time()))
expected_rh = [mock.call('%s/rhpkg' % self.temp_dir, 'import',
'--skip-diff',
'%s/python-pysaml2-3.0-1a.el7.centos.src'
'.rpm' % self.temp_dir),
mock.call('%s/rhpkg' % self.temp_dir, 'commit', '-p',
'-m',
'DLRN build at %s\n\n'
'Source SHA: 1234567890abcdef\n'
'Dist SHA: 1234567890abcdef\n'
'NVR: python-pysaml2-3.0-1a.el7.centos\n' %
pkg_date),
mock.call('/usr/bin/git log', '--pretty=format:%H %ct',
'-1', '.'),
mock.call('%s/rhpkg' % self.temp_dir, 'build',
'--skip-nvr-check', scratch=True)]
# 1- kinit (handled by kb_mock)
# 2- rhpkg import (handled by rh_mock)
# 3- rhpkg commit (handled by rh_mock)
# 4- git log (handled by rh_mock)
# 5- rename (handled by rn_mock)
# 5- rhpkg build (handled by rh_mock)
# 6- koji download (handled by env_mock)
# 7- restorecon (handled by rc_mock)
self.assertEqual(ki_mock.call_count, 1)
self.assertEqual(rh_mock.call_count, 4)
self.assertEqual(env_mock.call_count, 1)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(rn_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected_env)
self.assertEqual(rh_mock.call_args_list, expected_rh)
@mock.patch('os.rename')
@mock.patch.object(sh.Command, '__call__', autospec=True,
side_effect=_mocked_call)
@mock.patch('dlrn.drivers.kojidriver.time', side_effect=_mocked_time)
@mock.patch('sh.kinit', create=True)
def test_build_package_rhpkg_longexthash(self, ki_mock, tm_mock, rh_mock,
rn_mock, ld_mock, env_mock,
rc_mock):
self.config.koji_use_rhpkg = True
commit = db.Commit(dt_commit=123, project_name='python-pysaml2',
commit_hash='1234567890abcdef',
distro_hash='fedcba0987654321',
extended_hash='123456789012345678901234567890'
'1234567890_abcdefghijabcdefghij'
'abcdefghijabcdefghij',
dt_distro=123,
dt_extended=123)
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir,
package_name='python-pysaml2',
commit=commit)
expected_env = [mock.call(['koji', 'download-task', '--logs', '5678'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.rhpkg_extra_dir_2,
_env={'PATH': '/usr/bin/'})]
pkg_date = strftime("%Y-%m-%d-%H%M%S", localtime(_mocked_time()))
expected_rh = [mock.call('%s/rhpkg' % self.temp_dir, 'import',
'--skip-diff',
'%s/python-pysaml2-3.0-1a.el7.centos.src'
'.rpm' % self.temp_dir),
mock.call('%s/rhpkg' % self.temp_dir, 'commit', '-p',
'-m',
'DLRN build at %s\n\n'
'Source SHA: 1234567890abcdef\n'
'Dist SHA: fedcba0987654321\n'
'NVR: python-pysaml2-3.0-1a.el7.centos\n' %
pkg_date),
mock.call('/usr/bin/git log', '--pretty=format:%H %ct',
'-1', '.'),
mock.call('%s/rhpkg' % self.temp_dir, 'build',
'--skip-nvr-check', scratch=True)]
expected_rn = [mock.call(self.temp_dir, self.rhpkg_extra_dir_2)]
# 1- kinit (handled by kb_mock)
# 2- rhpkg import (handled by rh_mock)
# 3- rhpkg commit (handled by rh_mock)
# 4- git log (handled by rh_mock)
# 5- rename (handled by rn_mock)
# 5- rhpkg build (handled by rh_mock)
# 6- koji download (handled by env_mock)
# 7- restorecon (handled by rc_mock)
self.assertEqual(ki_mock.call_count, 1)
self.assertEqual(rh_mock.call_count, 4)
self.assertEqual(env_mock.call_count, 1)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(rn_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected_env)
self.assertEqual(rh_mock.call_args_list, expected_rh)
self.assertEqual(rn_mock.call_args_list, expected_rn)
@mock.patch('os.rename')
@mock.patch.object(sh.Command, '__call__', autospec=True,
side_effect=_mocked_call)
@mock.patch('dlrn.drivers.kojidriver.time', side_effect=_mocked_time)
@mock.patch('sh.kinit', create=True)
def test_build_package_rhpkg_longexthash_ds(self, ki_mock, tm_mock,
rh_mock, rn_mock, ld_mock,
env_mock, rc_mock):
self.config.koji_use_rhpkg = True
self.config.pkginfo_driver = (
'dlrn.drivers.downstream.DownstreamInfoDriver')
self.config.use_upstream_spec = False
commit = db.Commit(dt_commit=123, project_name='python-pysaml2',
commit_hash='1234567890abcdef',
distro_hash='fedcba0987654321',
extended_hash='123456789012345678901234567890'
'1234567890_abcdefghijabcdefghij'
'abcdefghijabcdefghij',
dt_distro=123,
dt_extended=123)
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir,
package_name='python-pysaml2',
commit=commit)
expected_env = [mock.call(['koji', 'download-task', '--logs', '5678'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.rhpkg_extra_dir_3,
_env={'PATH': '/usr/bin/'})]
pkg_date = strftime("%Y-%m-%d-%H%M%S", localtime(_mocked_time()))
expected_rh = [mock.call('%s/rhpkg' % self.temp_dir, 'import',
'--skip-diff',
'%s/python-pysaml2-3.0-1a.el7.centos.src'
'.rpm' % self.temp_dir),
mock.call('%s/rhpkg' % self.temp_dir, 'commit', '-p',
'-m',
'DLRN build at %s\n\n'
'Source SHA: 1234567890abcdef\n'
'Dist SHA: fedcba0987654321\n'
'NVR: python-pysaml2-3.0-1a.el7.centos\n' %
pkg_date),
mock.call('/usr/bin/git log', '--pretty=format:%H %ct',
'-1', '.'),
mock.call('/usr/bin/git pull'),
mock.call('/usr/bin/git log', '--pretty=format:%H %ct',
'-1', '.'),
mock.call('%s/rhpkg' % self.temp_dir, 'build',
'--skip-nvr-check', scratch=True)]
expected_rn = [mock.call(self.temp_dir, self.rhpkg_extra_dir_3)]
# 1- kinit (handled by kb_mock)
# 2- rhpkg import (handled by rh_mock)
# 3- rhpkg commit (handled by rh_mock)
# 4- git log (handled by rh_mock)
# 5- git pull (handled by rh_mock)
# 6- git log (handled by rh_mock)
# 7- rename (handled by rn_mock)
# 8- rhpkg build (handled by rh_mock)
# 9- koji download (handled by env_mock)
# 10- restorecon (handled by rc_mock)
self.assertEqual(ki_mock.call_count, 1)
self.assertEqual(rh_mock.call_count, 6)
self.assertEqual(env_mock.call_count, 1)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(rn_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected_env)
self.assertEqual(rh_mock.call_args_list, expected_rh)
self.assertEqual(rn_mock.call_args_list, expected_rn)
def test_write_mock_config(self, ld_mock, env_mock, rc_mock):
self.config.koji_build_target = 'foo-target'
self.config.koji_arch = 'aarch64'
self.config.fetch_mock_config = True
self.config.mock_base_packages = 'foo bar'
driver = KojiBuildDriver(cfg_options=self.config)
output_file = os.path.join(self.temp_dir, 'dlrn-1.cfg')
# Create sample downloaded config file
with open(output_file, "w") as fp:
fp.write("config_opts['root'] = 'dlrn-centos7-x86_64-1'\n")
fp.write("config_opts['chroot_setup_cmd'] = 'install abc def\n")
fp.write("'''")
expected = "config_opts['chroot_setup_cmd'] = 'install foo bar'\n"
driver.write_mock_config(output_file)
with open(output_file, "r") as fp:
for line in fp.readlines():
if line.startswith("config_opts['chroot_setup_cmd']"):
self.assertEqual(expected, line)
self.assertEqual(env_mock.call_count, 1)
def test_write_mock_config_pkg_mgr(self, ld_mock, env_mock, rc_mock):
self.config.koji_build_target = 'foo-target'
self.config.koji_arch = 'aarch64'
self.config.fetch_mock_config = True
self.config.mock_base_packages = 'foo bar'
self.config.mock_package_manager = 'apt'
driver = KojiBuildDriver(cfg_options=self.config)
output_file = os.path.join(self.temp_dir, 'dlrn-1.cfg')
# Create sample downloaded config file
with open(output_file, "w") as fp:
fp.write("config_opts['root'] = 'dlrn-centos7-x86_64-1'\n")
fp.write("config_opts['chroot_setup_cmd'] = 'install abc def\n")
fp.write("'''")
expected = "config_opts['package_manager'] = 'apt'\n"
driver.write_mock_config(output_file)
with open(output_file, "r") as fp:
for line in fp.readlines():
if line.startswith("config_opts['package_manager']"):
self.assertEqual(expected, line)
self.assertEqual(env_mock.call_count, 1)
def test_additional_tags(self, ld_mock, env_mock, rc_mock):
self.config.koji_add_tags = ['foo', 'bar']
self.config.koji_exe = 'brew'
driver = KojiBuildDriver(cfg_options=self.config)
driver.build_package(output_directory=self.temp_dir)
expected = [mock.call(['brew',
'--principal', self.config.koji_krb_principal,
'--keytab', self.config.koji_krb_keytab,
'build', '--wait',
self.config.koji_build_target,
'%s/python-pysaml2-3.0-1a.el7.centos.src.rpm' %
self.temp_dir],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
scratch=True,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'}),
mock.call(['brew', 'tag-build', 'foo',
'python-pysaml2-3.0-1a.el7.centos'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'}),
mock.call(['brew', 'tag-build', 'bar',
'python-pysaml2-3.0-1a.el7.centos'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'}),
mock.call(['brew', 'download-task', '--logs', '1234'],
_err=driver._process_koji_output,
_out=driver._process_koji_output,
_cwd=self.temp_dir,
_env={'PATH': '/usr/bin/'})]
# 1- koji build (handled by env_mock)
# 2 and 3- koji tag (handled by env_mock)
# 4- koji download (handled by env_mock)
# 5- restorecon (handled by rc_mock)
self.assertEqual(env_mock.call_count, 4)
self.assertEqual(rc_mock.call_count, 1)
self.assertEqual(env_mock.call_args_list, expected)
|
openstack-packages/DLRN
|
dlrn/tests/test_driver_koji.py
|
Python
|
apache-2.0
| 24,497 | 0 |
from collections import *
from config import main
import heapq
class UserPreference:
def __init__(self):
self.results = []
self.list1 = []
self.list2 = []
self.list3 = []
self.list4 = []
self.categories = []
self.sold_average = []
self.bought_average = []
def get_preferences(self, user):
# Reset all variable
self.results = []
self.list1 = []
self.list2 = []
self.list3 = []
self.list4 = []
self.categories = []
self.sold_average = []
self.bought_average = []
self.frequency_based(user+'.csv')
return self.results
def frequency_based(self, user):
fp = open(main.path+'data/user/'+user, "r")
lines = fp.readlines()
for i in range(len(lines)):
lines[i] = lines[i].strip()
for i in range(1,len(lines)):
self.list1 = lines[i].split(",")
self.list2.append(self.list1)
self.list3.append(self.list1[3])
d = defaultdict(int)
for i in self.list3:
d[i] += 1
result = max(iter(d.items()), key=lambda x: x[1])
self.results.append(result[0])
self.deviation_based(result[0])
# STANDARD DEVIATION APPROACH
def deviation_based(self,freq_cat):
for i in range(0,len(self.list2)):
self.categories.append(self.list2[i][3])
self.categories = list(set(self.categories))
i = 0
for item in self.list2:
self.list4.append(self.categories.index(item[3]))
self.sold_average = [0]*len(self.categories)
self.bought_average = [0]*len(self.categories)
s_average = []
b_average = []
s=[0]*len(self.categories)
b=[0]*len(self.categories)
for item in self.list2:
cat = item[3]
ind = self.categories.index(cat)
if item[4] == 'sold':
self.sold_average[ind]+= int(float(item[5]))
else:
self.bought_average[ind]+= int(float(item[5]))
for x in self.list4:
if self.list2[i][3] == self.categories[x]:
if self.list2[i][4] == 'sold':
s[x]+=1
if self.list2[i][4] == 'bought':
b[x]+=1
i+=1
for i in range(len(self.categories)):
if s[i]!=0:
s_average.append(self.sold_average[i]/s[i])
else:
s_average.append(0)
for i in range(len(self.categories)):
if b[i]!=0:
b_average.append(self.bought_average[i]/b[i])
else:
b_average.append(0)
deviation = []
for i in range(len(self.categories)):
deviation.append(s_average[i]-b_average[i])
max_category = max(deviation)
max2_category = heapq.nlargest(2, deviation)
if max_category == freq_cat:
self.results.append(self.categories[deviation.index(max_category)])
else:
self.results.append(self.categories[deviation.index(max2_category[1])])
|
adarshdec23/Market
|
core/preference/main.py
|
Python
|
apache-2.0
| 3,319 | 0.008738 |
from time import time
from itertools import groupby
from vial import vfunc, vim, dref
from vial.utils import echon, redraw
from os.path import split
MAX_HISTORY_SIZE = 100
VHIST = 'vial_buf_hist'
VLAST = 'vial_last_buf'
def add_to_history(w, bufnr):
history = list(w.vars[VHIST])
history[:] = [r for r in history if r != bufnr][:MAX_HISTORY_SIZE - 1]
history.insert(0, bufnr)
w.vars[VHIST] = history
return history
def check_history(window):
if VHIST not in window.vars:
bufnr = vim.current.buffer.number
history = [r.number for r in vim.buffers if r.number != bufnr]
history.reverse()
history.insert(0, bufnr)
window.vars[VHIST] = history
def win_buf_enter():
w = vim.current.window
bufnr = int(vfunc.expand('<abuf>'))
if not w.vars.get('vial_bufhist_switch', None):
check_history(w)
add_to_history(w, bufnr)
w.vars[VLAST] = 0
else:
w.vars[VLAST] = bufnr, time()
@dref
def moved():
now = time()
w = vim.current.window
lastbuf = w.vars.get(VLAST, None)
if not lastbuf or now - lastbuf[1] > 0.1:
w.vars[VLAST] = 0
vim.command('echo "" | au! vial_bufhist_wait_action')
skey = lambda r: r[1][1]
def jump(dir):
w = vim.current.window
check_history(w)
history = list(w.vars[VHIST])
bufnr = vim.current.buffer.number
now = time()
lastbuf = w.vars.get(VLAST, None)
if not lastbuf or (bufnr == lastbuf[0] and
now - lastbuf[1] > vim.vars['vial_bufhist_timeout']):
history = add_to_history(w, bufnr)
if bufnr not in history:
history = add_to_history(w, bufnr)
names = {r.number: (split(r.name)
if r.name
else ['', '[buf-{}]'.format(r.number)])
for r in vim.buffers if vfunc.buflisted(r.number)}
history[:] = filter(lambda r: r in names, history)
dups = True
while dups:
dups = False
for name, g in groupby(sorted(names.iteritems(), key=skey), skey):
g = list(g)
if len(g) > 1:
dups = True
for nr, (path, _) in g:
p, n = split(path)
names[nr] = p, n + '/' + name
width = vim.vars['vial_bufhist_width']
if width < 0:
width += int(vim.eval('&columns')) - 1
try:
idx = history.index(bufnr)
except ValueError:
return
idx += dir
if idx < 0:
idx = 0
elif idx >= len(history):
idx = len(history) - 1
anr = history[idx]
active = names[anr][1]
before = ' '.join(names[r][1] for r in history[:idx])
after = ' '.join(names[r][1] for r in history[idx+1:])
half = (width - len(active) - 4) / 2
if len(before) < len(after):
blen = min(half, len(before))
alen = width - len(active) - blen - 4
else:
alen = min(half, len(after))
blen = width - len(active) - alen - 4
if len(before) > blen:
before = '...' + before[3-blen:]
if len(after) > alen:
after = after[:alen-3] + '...'
if before: before += ' '
if after: after = ' ' + after
vim.command('let x=&ruler | let y=&showcmd')
vim.command('set noruler noshowcmd')
redraw()
echon(before)
vim.command('echohl CursorLine')
echon(active)
vim.command('echohl None')
echon(after)
vim.command('let &ruler=x | let &showcmd=y')
if anr != bufnr:
w.vars['vial_bufhist_switch'] = 1
vim.command('silent b {}'.format(anr))
w.vars['vial_bufhist_switch'] = 0
vim.command('augroup vial_bufhist_wait_action')
vim.command('au!')
vim.command('au CursorMoved,CursorHold <buffer> python %s()' % moved.ref)
vim.command('augroup END')
|
guilhermedallanol/dotfiles
|
vim/plugged/vial/vial/plugins/bufhist/plugin.py
|
Python
|
mit
| 3,812 | 0.000787 |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## udp-echo-client.h: ns3::UdpEchoClient [class]
module.add_class('UdpEchoClient', parent=root_module['ns3::Application'])
## udp-echo-server.h: ns3::UdpEchoServer [class]
module.add_class('UdpEchoServer', parent=root_module['ns3::Application'])
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace dsdv
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_dsdv(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3UdpEchoClient_methods(root_module, root_module['ns3::UdpEchoClient'])
register_Ns3UdpEchoServer_methods(root_module, root_module['ns3::UdpEchoServer'])
return
def register_Ns3UdpEchoClient_methods(root_module, cls):
## udp-echo-client.h: ns3::UdpEchoClient::UdpEchoClient(ns3::UdpEchoClient const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoClient const &', 'arg0')])
## udp-echo-client.h: ns3::UdpEchoClient::UdpEchoClient() [constructor]
cls.add_constructor([])
## udp-echo-client.h: uint32_t ns3::UdpEchoClient::GetDataSize() const [member function]
cls.add_method('GetDataSize',
'uint32_t',
[],
is_const=True)
## udp-echo-client.h: static ns3::TypeId ns3::UdpEchoClient::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-echo-client.h: void ns3::UdpEchoClient::SetDataSize(uint32_t dataSize) [member function]
cls.add_method('SetDataSize',
'void',
[param('uint32_t', 'dataSize')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(std::string fill) [member function]
cls.add_method('SetFill',
'void',
[param('std::string', 'fill')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(uint8_t fill, uint32_t dataSize) [member function]
cls.add_method('SetFill',
'void',
[param('uint8_t', 'fill'), param('uint32_t', 'dataSize')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(uint8_t * fill, uint32_t fillSize, uint32_t dataSize) [member function]
cls.add_method('SetFill',
'void',
[param('uint8_t *', 'fill'), param('uint32_t', 'fillSize'), param('uint32_t', 'dataSize')])
## udp-echo-client.h: void ns3::UdpEchoClient::SetRemote(ns3::Ipv4Address ip, uint16_t port) [member function]
cls.add_method('SetRemote',
'void',
[param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')])
## udp-echo-client.h: void ns3::UdpEchoClient::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-echo-client.h: void ns3::UdpEchoClient::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-echo-client.h: void ns3::UdpEchoClient::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3UdpEchoServer_methods(root_module, cls):
## udp-echo-server.h: ns3::UdpEchoServer::UdpEchoServer(ns3::UdpEchoServer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoServer const &', 'arg0')])
## udp-echo-server.h: ns3::UdpEchoServer::UdpEchoServer() [constructor]
cls.add_constructor([])
## udp-echo-server.h: static ns3::TypeId ns3::UdpEchoServer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-echo-server.h: void ns3::UdpEchoServer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-echo-server.h: void ns3::UdpEchoServer::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-echo-server.h: void ns3::UdpEchoServer::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_dsdv(module.get_submodule('dsdv'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
return
def register_functions_ns3_dsdv(module, root_module):
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
|
annegabrielle/secure_adhoc_network_ns-3
|
ns3_source_code/ns-3.10/bindings/python/apidefs/gcc-LP64/ns3_module_udp_echo.py
|
Python
|
gpl-2.0
| 8,335 | 0.017876 |
import os
import ntpath
import xlrd
import openpyxl
from openpyxl.utils import coordinate_from_string, column_index_from_string
from openpyxl.utils.exceptions import CellCoordinatesException
class ExcelManager:
"""
Wrapper that opens and operates on .xls, .xlsx or .xlsm excel files. By
default we take in a string representing the excel file path (extension
included), and depending on the file type use xlrd or openpyxl to operate
on it.
The dev facing api is identical for either - internally we use xlrd or
openpyxl methods depending on the file type.
For rows & columns we use 1 based indexing to stay with the more modern
openpyxl (and most users are more familiar with it if they are coming from
an office environment, not a programming one). Be aware of which file type
you are using if you retrieve the sheet object - it could be using zero OR
one based indexing.
Public Variables:
file_path: full file path with extension of the file we are operating on
workbook: openpyxl/xlrd workbook object for this file
sheet: currently in use openpxl/xlrd sheet object for this work book
read_count: number of sheet reads this object has done
write_count: number of sheet writes this object has done
Public Methods:
select_sheet - choose which sheet to use (by index or name)
cell - retrieve an openpyxl/xlrd cell object by row/column or index
read - retrieve the value from the current sheet at row/column or index
write - write a value to the current sheet at row/column or index
save - save the workbook at the initial file path, or a new file path
if one is specified
info - return basic information/status of the object
to_array - return a 2D numpy array representation of the current sheet
find_index - return the first index of the match or None if it does not exist
find_indexes - return a list of tuples containing the indexes of all matches
"""
write_count = 0
read_count = 0
sheet_array = None
def __init__(self, file_path, sheet_name=None, sheet_index=None):
self.file_path = file_path
self.__check_file_extension(file_path)
self.__check_file_exists(file_path)
if file_path.endswith('.xls'):
self.__is_xls = True
self.__init_xls(sheet_name, sheet_index)
else:
self.__is_xls = False
self.__init_excel(sheet_name, sheet_index)
def change_sheet(self, *args):
"""
Change the current active sheet object
:param name: sheet name
:param index: sheet index (1 index)
:return: None
"""
if isinstance(args[0], str):
name = args[0]
index = None
elif isinstance(args[0], int):
name = None
index = args[0]
else:
raise ValueError('Specify either the sheet name or sheet index to change sheets')
if self.__is_xls:
self.__select_xls_sheet(name, index - 1 if index else None)
else:
self.__select_excel_sheet(name, index - 1 if index else None)
def row(self, row_index):
"""
Return the row at the specified index
:row_index row_index: 1 based index
:return: list of values
"""
self.sheet_array = self.array()
return self.sheet_array[row_index - 1]
def column(self, column_index):
"""
return the column at the specified index
:param column_index: string or (1 based) int index
:return: list of values
"""
if isinstance(column_index, int):
column = column_index - 1
else:
column = column_index_from_string(column_index.upper()) - 1
self.sheet_array = self.array()
return [row[column] for row in self.sheet_array]
def cell(self, *args):
"""
Return the cell at the specified location
:param args: tuple with either a 1 based representation for row/column
or string based index
:return: xlrd/openpyxl cell object
"""
row, column = self.__parse_row_column_from_args(*args)
if self.__is_xls:
return self.__get_xls_cell(row - 1, column - 1) # xlrd is a 1 based index
else:
return self.__get_excel_cell(row, column)
def read(self, *args):
"""
Read the value from the target cell
:param args: tuple with either a 1 based representation for row/column
or string based index
:return: string
"""
self.read_count += 1
value = self.cell(*args).value
return value if value else ''
def write(self, *args, value=None):
"""
Input the value at the specified target
:param args: tuple with either a 1 based representation for row/column
or string based index
:param value:
:return:
"""
if self.__is_xls:
raise TypeError('Writing to a cell is not supported for .xls files')
self.cell(*args).value = value
self.write_count += 1
def save(self, *args):
"""
Save the current sheet either at the original file_path (if none
specified) or at the file_path parameter
:param file_path: new file path to save file
:return: None
"""
if len(args) == 1:
self.__check_file_extension(args[0])
file_path = args[0]
else:
file_path = self.file_path
if self.__is_xls:
raise TypeError('Saving is not supported for .xls files')
else:
self.workbook.save(file_path)
def info(self, string=False):
"""
return basic information about this ExcelWrapper instance
:return: string
"""
sheet_name = self.sheet.name if self.__is_xls else self.sheet.title
if string:
return 'File: {}\nSheet: {}\nReads: {}\nWrites: {}' \
.format(self.file_path, sheet_name, self.read_count, self.write_count)
else:
return {
'file': self.file_path,
'sheet': sheet_name,
'reads': self.read_count,
'writes': self.write_count
}
def array(self):
"""
Return a 2D list representing the spreadsheet
:return: list(list())
"""
if self.__is_xls:
self.sheet_array = self.__xls_to_array()
return self.sheet_array
else:
self.sheet_array = self.__excel_to_array()
return self.sheet_array
def search(self, value, match=1, case_insensitive=True, contains=False, many=False):
"""
Given a value find the 1 based index where that value is located
on the sheet or None if it does not exist. If 'many' is set true then
an empty list is returned if no matches are found
:param value: the value we are searching for
:param match: if multiple results are found we return only one - this
parameter determines which index of the list we return with a 1 based index
:param case_insensitive: whether or not the search should be case insensitive
:param contains: whether or not the search should use 'in' or equality to
check if the spreadsheet value is a match
:param many: whether or not to return a singular value or a list of values
:return:
"""
indexes = self.__find_indexes(value, case_insensitive=case_insensitive, contains=contains)
if many:
return indexes
try:
match = indexes[match - 1]
return match[0], match[1]
except IndexError:
return None, None
def __find_indexes(self, value, case_insensitive, contains):
"""
Iterate over the 2D list representation of the sheet and determine
if the input value exists based on search parameters
:param value: value we are looking for
:param case_insensitive: whether or not search is case_insensitive
:param contains: use 'in' to find matches
:return:
"""
self.sheet_array = self.array()
indexes = []
for i, row in enumerate(self.sheet_array):
for j, column in enumerate(row):
input_val, column_val = self.__check_case_sensitive(case_insensitive, value, column)
if contains and input_val in column_val:
indexes.append((i + 1, j + 1))
elif input_val == column_val:
indexes.append((i + 1, j + 1))
return indexes
@staticmethod
def __check_case_sensitive(case_insensitive, value, column):
column_val = column.lower() if case_insensitive else column
input_val = value.lower() if case_insensitive else value
return input_val, column_val
def __parse_row_column_from_args(self, *args):
"""
convert a generic arguments tuple into a 1-based ow/column index. This
is to support both numeric (1, 1) and string (A1) representation of
cells with the same API.
:param args: args tuple
:return: int, int tuple
"""
if len(args) == 1 and isinstance(args[0], str):
row, column = self.__parse_row_column_from_index(args[0])
elif len(args) == 2 and isinstance(args[0], int) and isinstance(args[1], int):
row = args[0]
column = args[1]
else:
raise ValueError('Specify either row and column numbers (1, 1) OR a cell index ("A1")')
return row, column
@staticmethod
def __parse_row_column_from_index(cell_index):
"""
Given a string based excel index return the int based row, column
representation
:param cell_index: string based excel input
:return: row, column ints
"""
try:
xy = coordinate_from_string(cell_index.upper())
row = xy[1]
column = column_index_from_string(xy[0])
return row, column
except CellCoordinatesException:
raise ValueError('The index must be a valid Excel index (A1, E17, etc.)')
def __init_xls(self, sheet_name, sheet_index):
"""
initialize a .xls file with xlrd
"""
self.workbook = xlrd.open_workbook(self.file_path)
self.__select_xls_sheet(sheet_name, sheet_index)
def __select_xls_sheet(self, sheet_name, sheet_index):
"""
change the currently active xlrd sheet
"""
if sheet_name:
self.sheet = self.workbook.sheet_by_name(sheet_name)
elif sheet_index:
self.sheet = self.workbook.sheet_by_index(sheet_index)
else:
self.sheet = self.workbook.sheet_by_index(0)
def __get_xls_cell(self, row, column):
"""
retrieve the xlrd cell object at the specified row/column
:param row: 1-based row index
:param column: 1-based column index
:return: cell object
"""
return self.sheet.cell(row, column)
def __xls_to_array(self):
"""
convert an xlrd sheet to a 2D list of values.
:return:
"""
sheet_array = []
for row in range(1, self.__get_max_rows() + 1):
row_array = []
for column in range(1, self.__get_max_columns() + 1):
value = self.read(row, column)
row_array.append(value)
sheet_array.append(row_array)
return sheet_array
def __init_excel(self, sheet_name, sheet_index):
"""
initialize a .xlsx file with openpyxl
"""
self.workbook = openpyxl.load_workbook(self.file_path)
self.__select_excel_sheet(sheet_name, sheet_index)
def __select_excel_sheet(self, sheet_name, sheet_index):
"""
change the currently active openpyxl sheet
"""
if sheet_name:
self.sheet = self.workbook[sheet_name]
elif sheet_index:
sheet_names = self.workbook.sheetnames
self.sheet = self.workbook[sheet_names[sheet_index]]
else:
sheet_names = self.workbook.sheetnames
self.sheet = self.workbook[(sheet_names[0])]
def __get_excel_cell(self, row, column):
"""
retrieve the openpyxl cell object at the specified row/column
:param row: 1-based row index
:param column: 1-based column index
:return: cell object
"""
return self.sheet.cell(row=row, column=column)
def __excel_to_array(self):
"""
convert an openpyxl sheet to a 2D list of values.
:return:
"""
sheet_array = []
for row in range(1, self.sheet.max_row + 1):
row_array = []
for column in range(1, self.sheet.max_column + 1):
value = self.read(row, column)
row_array.append(value)
sheet_array.append(row_array)
return sheet_array
def __get_max_rows(self):
"""
return the number of rows in the current xlrd sheet object
:return: int
"""
if self.__is_xls:
return self.sheet.nrows
return self.sheet.max_rows
def __get_max_columns(self):
"""
return the number of columns in the current xlrd sheet object
:return: int
"""
if self.__is_xls:
return self.sheet.ncols
return self.sheet.max_column
@staticmethod
def __check_file_extension(file_path):
extensions = ['.xls', '.xlsx', '.xlsm']
if not any(file_path.endswith(extension) for extension in extensions):
raise ValueError("""
No extension found on file path - make sure you include the FULL file path with the extension. Valid
extensions include: {}
""".format(', '.join(extensions)))
@staticmethod
def __check_file_exists(file_path):
"""
Check to see if the input file exists - if not, raise an error
that lists other excel files in the same directory
:param file_path: full file path to excel file
:return:
"""
if not os.path.exists(file_path):
file_name = ntpath.basename(file_path)
file_directory = file_path.replace(file_name, '')
valid_files = [file for file in os.listdir(file_directory) if 'xls' in file]
base_error = 'The file {} was not found. Maybe you were looking for one of these?\n\n'.format(file_name)
raise FileNotFoundError(base_error + '\n'.join(valid_files[:10]))
|
erik-sn/xlwrap
|
xlwrap.py
|
Python
|
mit
| 14,765 | 0.001422 |
# -*- coding:utf-8 -*-
from bottle import route, run
@route("/")
def access():
return "OK!"
# hostデフォルト値は、127.0.0.1
# OK - localhost / 127.0.0.1
# NG - 192.168.0.10 / hostname
# run(port=8080, debug=True, reloader=True)
# run(host="localhost", port=8080, debug=True, reloader=True)
# OK - 192.168.0.10 / hostname
# NG - localhost / 127.0.0.1
run(host="192.168.0.10", port=8080, debug=True, reloader=True)
# run(host="<your hostname>", port=8080, debug=True, reloader=True)
# OK - ALL
# run(host="0.0.0.0", port=8080, debug=True, reloader=True)
|
thinkAmi-sandbox/Bottle-sample
|
lan_access.py
|
Python
|
unlicense
| 589 | 0.005236 |
# GNU Enterprise Common Library - Schema support for MS-SQL
#
# Copyright 2000-2007 Free Software Foundation
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: Behavior.py,v 1.2 2008/11/04 20:14:04 oleg Exp $
"""
Schema support plugin for MS-ADO backends.
"""
__all__ = ['Behavior']
from gnue.common.datasources import GSchema
from gnue.common.datasources.drivers import Base
# =============================================================================
# Behavior class
# =============================================================================
class Behavior (Base.Behavior):
"""
Behavior class for MS-SQL backends.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, connection):
Base.Behavior.__init__ (self, connection)
|
HarmonyEnterpriseSolutions/harmony-platform
|
src/gnue/common/datasources/drivers/sql/mssql/Behavior.py
|
Python
|
gpl-2.0
| 1,600 | 0.00625 |
import falcon
import json
class QuoteResource:
def on_get(self, req, resp):
"""Handles GET requests"""
quote = {
'quote': 'I\'ve always been more interested in the future than in the past.',
'author': 'Grace Hopper'
}
resp.body = json.dumps(quote)
api = falcon.API()
api.add_route('/quote', QuoteResource())
|
lotrekagency/heimdall
|
server/server.py
|
Python
|
mit
| 371 | 0.008086 |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import hashlib
import json
import locale
import random
import StringIO
import time
import threading
import uuid
import unittest
from nose import SkipTest
from ConfigParser import ConfigParser
from test import get_config
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
from swift.common.constraints import MAX_FILE_SIZE, MAX_META_NAME_LENGTH, \
MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
MAX_OBJECT_NAME_LENGTH, CONTAINER_LISTING_LIMIT, ACCOUNT_LISTING_LIMIT, \
MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH
default_constraints = dict((
('max_file_size', MAX_FILE_SIZE),
('max_meta_name_length', MAX_META_NAME_LENGTH),
('max_meta_value_length', MAX_META_VALUE_LENGTH),
('max_meta_count', MAX_META_COUNT),
('max_meta_overall_size', MAX_META_OVERALL_SIZE),
('max_object_name_length', MAX_OBJECT_NAME_LENGTH),
('container_listing_limit', CONTAINER_LISTING_LIMIT),
('account_listing_limit', ACCOUNT_LISTING_LIMIT),
('max_account_name_length', MAX_ACCOUNT_NAME_LENGTH),
('max_container_name_length', MAX_CONTAINER_NAME_LENGTH)))
constraints_conf = ConfigParser()
conf_exists = constraints_conf.read('/etc/swift/swift.conf')
# Constraints are set first from the test config, then from
# /etc/swift/swift.conf if it exists. If swift.conf doesn't exist,
# then limit test coverage. This allows SAIO tests to work fine but
# requires remote functional testing to know something about the cluster
# that is being tested.
config = get_config('func_test')
for k in default_constraints:
if k in config:
# prefer what's in test.conf
config[k] = int(config[k])
elif conf_exists:
# swift.conf exists, so use what's defined there (or swift defaults)
# This normally happens when the test is running locally to the cluster
# as in a SAIO.
config[k] = default_constraints[k]
else:
# .functests don't know what the constraints of the tested cluster are,
# so the tests can't reliably pass or fail. Therefore, skip those
# tests.
config[k] = '%s constraint is not defined' % k
web_front_end = config.get('web_front_end', 'integral')
normalized_urls = config.get('normalized_urls', False)
def load_constraint(name):
c = config[name]
if not isinstance(c, int):
raise SkipTest(c)
return c
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
def chunks(s, length=3):
i, j = 0, length
while i < len(s):
yield s[i:j]
i, j = j, j + length
def timeout(seconds, method, *args, **kwargs):
class TimeoutThread(threading.Thread):
def __init__(self, method, *args, **kwargs):
threading.Thread.__init__(self)
self.method = method
self.args = args
self.kwargs = kwargs
self.exception = None
def run(self):
try:
self.method(*self.args, **self.kwargs)
except Exception as e:
self.exception = e
t = TimeoutThread(method, *args, **kwargs)
t.start()
t.join(seconds)
if t.exception:
raise t.exception
if t.isAlive():
t._Thread__stop()
return True
return False
class Utils(object):
@classmethod
def create_ascii_name(cls, length=None):
return uuid.uuid4().hex
@classmethod
def create_utf8_name(cls, length=None):
if length is None:
length = 15
else:
length = int(length)
utf8_chars = u'\uF10F\uD20D\uB30B\u9409\u8508\u5605\u3703\u1801'\
u'\u0900\uF110\uD20E\uB30C\u940A\u8509\u5606\u3704'\
u'\u1802\u0901\uF111\uD20F\uB30D\u940B\u850A\u5607'\
u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\
u'\u5608\u3706\u1804\u0903\u03A9\u2603'
return ''.join([random.choice(utf8_chars)
for x in xrange(length)]).encode('utf-8')
create_name = create_ascii_name
class Base(unittest.TestCase):
def setUp(self):
cls = type(self)
if not cls.set_up:
cls.env.setUp()
cls.set_up = True
def assert_body(self, body):
response_body = self.env.conn.response.read()
self.assert_(response_body == body,
'Body returned: %s' % (response_body))
def assert_status(self, status_or_statuses):
self.assert_(self.env.conn.response.status == status_or_statuses or
(hasattr(status_or_statuses, '__iter__') and
self.env.conn.response.status in status_or_statuses),
'Status returned: %d Expected: %s' %
(self.env.conn.response.status, status_or_statuses))
class Base2(object):
def setUp(self):
Utils.create_name = Utils.create_utf8_name
super(Base2, self).setUp()
def tearDown(self):
Utils.create_name = Utils.create_ascii_name
class TestAccountEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.containers = []
for i in range(10):
cont = cls.account.container(Utils.create_name())
if not cont.create():
raise ResponseError(cls.conn.response)
cls.containers.append(cont)
class TestAccountDev(Base):
env = TestAccountEnv
set_up = False
class TestAccountDevUTF8(Base2, TestAccountDev):
set_up = False
class TestAccount(Base):
env = TestAccountEnv
set_up = False
def testNoAuthToken(self):
self.assertRaises(ResponseError, self.env.account.info,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
self.assertRaises(ResponseError, self.env.account.containers,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
def testInvalidUTF8Path(self):
invalid_utf8 = Utils.create_utf8_name()[::-1]
container = self.env.account.container(invalid_utf8)
self.assert_(not container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assert_body('Invalid UTF8 or contains NULL')
def testVersionOnlyPath(self):
self.env.account.conn.make_request('PUT',
cfg={'version_only_path': True})
self.assert_status(412)
self.assert_body('Bad URL')
def testInvalidPath(self):
was_url = self.env.account.conn.storage_url
if (normalized_urls):
self.env.account.conn.storage_url = '/'
else:
self.env.account.conn.storage_url = "/%s" % was_url
self.env.account.conn.make_request('GET')
try:
self.assert_status(404)
finally:
self.env.account.conn.storage_url = was_url
def testPUT(self):
self.env.account.conn.make_request('PUT')
self.assert_status([403, 405])
def testAccountHead(self):
try_count = 0
while try_count < 5:
try_count += 1
info = self.env.account.info()
for field in ['object_count', 'container_count', 'bytes_used']:
self.assert_(info[field] >= 0)
if info['container_count'] == len(self.env.containers):
break
if try_count < 5:
time.sleep(1)
self.assertEquals(info['container_count'], len(self.env.containers))
self.assert_status(204)
def testContainerSerializedInfo(self):
container_info = {}
for container in self.env.containers:
info = {'bytes': 0}
info['count'] = random.randint(10, 30)
for i in range(info['count']):
file_item = container.file(Utils.create_name())
bytes = random.randint(1, 32768)
file_item.write_random(bytes)
info['bytes'] += bytes
container_info[container.name] = info
for format_type in ['json', 'xml']:
for a in self.env.account.containers(
parms={'format': format_type}):
self.assert_(a['count'] >= 0)
self.assert_(a['bytes'] >= 0)
headers = dict(self.env.conn.response.getheaders())
if format_type == 'json':
self.assertEquals(headers['content-type'],
'application/json; charset=utf-8')
elif format_type == 'xml':
self.assertEquals(headers['content-type'],
'application/xml; charset=utf-8')
def testListingLimit(self):
limit = load_constraint('account_listing_limit')
for l in (1, 100, limit / 2, limit - 1, limit, limit + 1, limit * 2):
p = {'limit': l}
if l <= limit:
self.assert_(len(self.env.account.containers(parms=p)) <= l)
self.assert_status(200)
else:
self.assertRaises(ResponseError,
self.env.account.containers, parms=p)
self.assert_status(412)
def testContainerListing(self):
a = sorted([c.name for c in self.env.containers])
for format_type in [None, 'json', 'xml']:
b = self.env.account.containers(parms={'format': format_type})
if isinstance(b[0], dict):
b = [x['name'] for x in b]
self.assertEquals(a, b)
def testInvalidAuthToken(self):
hdrs = {'X-Auth-Token': 'bogus_auth_token'}
self.assertRaises(ResponseError, self.env.account.info, hdrs=hdrs)
self.assert_status(401)
def testLastContainerMarker(self):
for format_type in [None, 'json', 'xml']:
containers = self.env.account.containers({'format': format_type})
self.assertEquals(len(containers), len(self.env.containers))
self.assert_status(200)
containers = self.env.account.containers(
parms={'format': format_type, 'marker': containers[-1]})
self.assertEquals(len(containers), 0)
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
def testMarkerLimitContainerList(self):
for format_type in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, 9)
containers = self.env.account.containers(
parms={'format': format_type,
'marker': marker,
'limit': limit})
self.assert_(len(containers) <= limit)
if containers:
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assert_(locale.strcoll(containers[0], marker) > 0)
def testContainersOrderedByName(self):
for format_type in [None, 'json', 'xml']:
containers = self.env.account.containers(
parms={'format': format_type})
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assertEquals(sorted(containers, cmp=locale.strcoll),
containers)
class TestAccountUTF8(Base2, TestAccount):
set_up = False
class TestAccountNoContainersEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
class TestAccountNoContainers(Base):
env = TestAccountNoContainersEnv
set_up = False
def testGetRequest(self):
for format_type in [None, 'json', 'xml']:
self.assert_(not self.env.account.containers(
parms={'format': format_type}))
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
set_up = False
class TestContainerEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 10
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file_item = cls.container.file(Utils.create_name())
file_item.write_random(cls.file_size)
cls.files.append(file_item.name)
class TestContainerDev(Base):
env = TestContainerEnv
set_up = False
class TestContainerDevUTF8(Base2, TestContainerDev):
set_up = False
class TestContainer(Base):
env = TestContainerEnv
set_up = False
def testContainerNameLimit(self):
limit = load_constraint('max_container_name_length')
for l in (limit - 100, limit - 10, limit - 1, limit,
limit + 1, limit + 10, limit + 100):
cont = self.env.account.container('a' * l)
if l <= limit:
self.assert_(cont.create())
self.assert_status(201)
else:
self.assert_(not cont.create())
self.assert_status(400)
def testFileThenContainerDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
file_item = cont.file(Utils.create_name())
self.assert_(file_item.write_random())
self.assert_(file_item.delete())
self.assert_status(204)
self.assert_(file_item.name not in cont.files())
self.assert_(cont.delete())
self.assert_status(204)
self.assert_(cont.name not in self.env.account.containers())
def testFileListingLimitMarkerPrefix(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
files = sorted([Utils.create_name() for x in xrange(10)])
for f in files:
file_item = cont.file(f)
self.assert_(file_item.write_random())
for i in xrange(len(files)):
f = files[i]
for j in xrange(1, len(files) - i):
self.assert_(cont.files(parms={'limit': j, 'marker': f}) ==
files[i + 1: i + j + 1])
self.assert_(cont.files(parms={'marker': f}) == files[i + 1:])
self.assert_(cont.files(parms={'marker': f, 'prefix': f}) == [])
self.assert_(cont.files(parms={'prefix': f}) == [f])
def testPrefixAndLimit(self):
load_constraint('container_listing_limit')
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
prefix_file_count = 10
limit_count = 2
prefixs = ['alpha/', 'beta/', 'kappa/']
prefix_files = {}
for prefix in prefixs:
prefix_files[prefix] = []
for i in range(prefix_file_count):
file_item = cont.file(prefix + Utils.create_name())
file_item.write()
prefix_files[prefix].append(file_item.name)
for format_type in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'prefix': prefix})
self.assertEquals(files, sorted(prefix_files[prefix]))
for format_type in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'limit': limit_count,
'prefix': prefix})
self.assertEquals(len(files), limit_count)
for file_item in files:
self.assert_(file_item.startswith(prefix))
def testCreate(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.name in self.env.account.containers())
def testContainerFileListOnContainerThatDoesNotExist(self):
for format_type in [None, 'json', 'xml']:
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.files,
parms={'format': format_type})
self.assert_status(404)
def testUtf8Container(self):
valid_utf8 = Utils.create_utf8_name()
invalid_utf8 = valid_utf8[::-1]
container = self.env.account.container(valid_utf8)
self.assert_(container.create(cfg={'no_path_quote': True}))
self.assert_(container.name in self.env.account.containers())
self.assertEquals(container.files(), [])
self.assert_(container.delete())
container = self.env.account.container(invalid_utf8)
self.assert_(not container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assertRaises(ResponseError, container.files,
cfg={'no_path_quote': True})
self.assert_status(412)
def testCreateOnExisting(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.create())
self.assert_status(202)
def testSlashInName(self):
if Utils.create_name == Utils.create_utf8_name:
cont_name = list(unicode(Utils.create_name(), 'utf-8'))
else:
cont_name = list(Utils.create_name())
cont_name[random.randint(2, len(cont_name) - 2)] = '/'
cont_name = ''.join(cont_name)
if Utils.create_name == Utils.create_utf8_name:
cont_name = cont_name.encode('utf-8')
cont = self.env.account.container(cont_name)
self.assert_(not cont.create(cfg={'no_path_quote': True}),
'created container with name %s' % (cont_name))
self.assert_status(404)
self.assert_(cont.name not in self.env.account.containers())
def testDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.delete())
self.assert_status(204)
self.assert_(cont.name not in self.env.account.containers())
def testDeleteOnContainerThatDoesNotExist(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(not cont.delete())
self.assert_status(404)
def testDeleteOnContainerWithFiles(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
file_item = cont.file(Utils.create_name())
file_item.write_random(self.env.file_size)
self.assert_(file_item.name in cont.files())
self.assert_(not cont.delete())
self.assert_status(409)
def testFileCreateInContainerThatDoesNotExist(self):
file_item = File(self.env.conn, self.env.account, Utils.create_name(),
Utils.create_name())
self.assertRaises(ResponseError, file_item.write)
self.assert_status(404)
def testLastFileMarker(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files({'format': format_type})
self.assertEquals(len(files), len(self.env.files))
self.assert_status(200)
files = self.env.container.files(
parms={'format': format_type, 'marker': files[-1]})
self.assertEquals(len(files), 0)
if format_type is None:
self.assert_status(204)
else:
self.assert_status(200)
def testContainerFileList(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type})
self.assert_status(200)
if isinstance(files[0], dict):
files = [x['name'] for x in files]
for file_item in self.env.files:
self.assert_(file_item in files)
for file_item in files:
self.assert_(file_item in self.env.files)
def testMarkerLimitFileList(self):
for format_type in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, self.env.file_count - 1)
files = self.env.container.files(parms={'format': format_type,
'marker': marker,
'limit': limit})
if not files:
continue
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assert_(len(files) <= limit)
if files:
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assert_(locale.strcoll(files[0], marker) > 0)
def testFileOrder(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type})
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assertEquals(sorted(files, cmp=locale.strcoll), files)
def testContainerInfo(self):
info = self.env.container.info()
self.assert_status(204)
self.assertEquals(info['object_count'], self.env.file_count)
self.assertEquals(info['bytes_used'],
self.env.file_count * self.env.file_size)
def testContainerInfoOnContainerThatDoesNotExist(self):
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.info)
self.assert_status(404)
def testContainerFileListWithLimit(self):
for format_type in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format_type,
'limit': 2})
self.assertEquals(len(files), 2)
def testTooLongName(self):
cont = self.env.account.container('x' * 257)
self.assert_(not cont.create(),
'created container with name %s' % (cont.name))
self.assert_status(400)
def testContainerExistenceCachingProblem(self):
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assert_(cont.create())
cont.files()
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assert_(cont.create())
file_item = cont.file(Utils.create_name())
file_item.write_random()
class TestContainerUTF8(Base2, TestContainer):
set_up = False
class TestContainerPathsEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.file_size = 8
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.files = [
'/file1',
'/file A',
'/dir1/',
'/dir2/',
'/dir1/file2',
'/dir1/subdir1/',
'/dir1/subdir2/',
'/dir1/subdir1/file2',
'/dir1/subdir1/file3',
'/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/',
'/dir1/subdir1/subsubdir1/file5',
'/dir1/subdir1/subsubdir1/file6',
'/dir1/subdir1/subsubdir1/file7',
'/dir1/subdir1/subsubdir1/file8',
'/dir1/subdir1/subsubdir2/',
'/dir1/subdir1/subsubdir2/file9',
'/dir1/subdir1/subsubdir2/file0',
'file1',
'dir1/',
'dir2/',
'dir1/file2',
'dir1/subdir1/',
'dir1/subdir2/',
'dir1/subdir1/file2',
'dir1/subdir1/file3',
'dir1/subdir1/file4',
'dir1/subdir1/subsubdir1/',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file6',
'dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir2/',
'dir1/subdir1/subsubdir2/file9',
'dir1/subdir1/subsubdir2/file0',
'dir1/subdir with spaces/',
'dir1/subdir with spaces/file B',
'dir1/subdir+with{whatever/',
'dir1/subdir+with{whatever/file D',
]
stored_files = set()
for f in cls.files:
file_item = cls.container.file(f)
if f.endswith('/'):
file_item.write(hdrs={'Content-Type': 'application/directory'})
else:
file_item.write_random(cls.file_size,
hdrs={'Content-Type':
'application/directory'})
if (normalized_urls):
nfile = '/'.join(filter(None, f.split('/')))
if (f[-1] == '/'):
nfile += '/'
stored_files.add(nfile)
else:
stored_files.add(f)
cls.stored_files = sorted(stored_files)
class TestContainerPaths(Base):
env = TestContainerPathsEnv
set_up = False
def testTraverseContainer(self):
found_files = []
found_dirs = []
def recurse_path(path, count=0):
if count > 10:
raise ValueError('too deep recursion')
for file_item in self.env.container.files(parms={'path': path}):
self.assert_(file_item.startswith(path))
if file_item.endswith('/'):
recurse_path(file_item, count + 1)
found_dirs.append(file_item)
else:
found_files.append(file_item)
recurse_path('')
for file_item in self.env.stored_files:
if file_item.startswith('/'):
self.assert_(file_item not in found_dirs)
self.assert_(file_item not in found_files)
elif file_item.endswith('/'):
self.assert_(file_item in found_dirs)
self.assert_(file_item not in found_files)
else:
self.assert_(file_item in found_files)
self.assert_(file_item not in found_dirs)
found_files = []
found_dirs = []
recurse_path('/')
for file_item in self.env.stored_files:
if not file_item.startswith('/'):
self.assert_(file_item not in found_dirs)
self.assert_(file_item not in found_files)
elif file_item.endswith('/'):
self.assert_(file_item in found_dirs)
self.assert_(file_item not in found_files)
else:
self.assert_(file_item in found_files)
self.assert_(file_item not in found_dirs)
def testContainerListing(self):
for format_type in (None, 'json', 'xml'):
files = self.env.container.files(parms={'format': format_type})
if isinstance(files[0], dict):
files = [str(x['name']) for x in files]
self.assertEquals(files, self.env.stored_files)
for format_type in ('json', 'xml'):
for file_item in self.env.container.files(parms={'format':
format_type}):
self.assert_(int(file_item['bytes']) >= 0)
self.assert_('last_modified' in file_item)
if file_item['name'].endswith('/'):
self.assertEquals(file_item['content_type'],
'application/directory')
def testStructure(self):
def assert_listing(path, file_list):
files = self.env.container.files(parms={'path': path})
self.assertEquals(sorted(file_list, cmp=locale.strcoll), files)
if not normalized_urls:
assert_listing('/', ['/dir1/', '/dir2/', '/file1', '/file A'])
assert_listing('/dir1',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/subdir1',
['/dir1/subdir1/subsubdir2/', '/dir1/subdir1/file2',
'/dir1/subdir1/file3', '/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/'])
assert_listing('/dir1/subdir2', [])
assert_listing('', ['file1', 'dir1/', 'dir2/'])
else:
assert_listing('', ['file1', 'dir1/', 'dir2/', 'file A'])
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1/',
'dir1/subdir2/', 'dir1/subdir with spaces/',
'dir1/subdir+with{whatever/'])
assert_listing('dir1/subdir1',
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2/',
'dir1/subdir1/file2', 'dir1/subdir1/file3',
'dir1/subdir1/subsubdir1/'])
assert_listing('dir1/subdir1/subsubdir1',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir1/subsubdir1/',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir with spaces/',
['dir1/subdir with spaces/file B'])
class TestFileEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_size = 128
class TestFileDev(Base):
env = TestFileEnv
set_up = False
class TestFileDevUTF8(Base2, TestFileDev):
set_up = False
class TestFile(Base):
env = TestFileEnv
set_up = False
def testCopy(self):
# makes sure to test encoded characters
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
file_item = self.env.container.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
data = file_item.write_random()
file_item.sync_metadata(metadata)
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.copy('%s%s' % (prefix, cont), dest_filename)
self.assert_(dest_filename in cont.files())
file_item = cont.file(dest_filename)
self.assert_(data == file_item.read())
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
def testCopy404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
for prefix in ('', '/'):
# invalid source container
source_cont = self.env.account.container(Utils.create_name())
file_item = source_cont.file(source_filename)
self.assert_(not file_item.copy(
'%s%s' % (prefix, self.env.container),
Utils.create_name()))
self.assert_status(404)
self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont),
Utils.create_name()))
self.assert_status(404)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assert_(not file_item.copy(
'%s%s' % (prefix, self.env.container),
Utils.create_name()))
self.assert_status(404)
self.assert_(not file_item.copy('%s%s' % (prefix, dest_cont),
Utils.create_name()))
self.assert_status(404)
# invalid destination container
file_item = self.env.container.file(source_filename)
self.assert_(not file_item.copy(
'%s%s' % (prefix, Utils.create_name()),
Utils.create_name()))
def testCopyNoDestinationHeader(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
file_item = self.env.container.file(source_filename)
self.assert_(not file_item.copy(Utils.create_name(),
Utils.create_name(),
cfg={'no_destination': True}))
self.assert_status(412)
def testCopyDestinationSlashProblems(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
# no slash
self.assert_(not file_item.copy(Utils.create_name(),
Utils.create_name(),
cfg={'destination': Utils.create_name()}))
self.assert_status(412)
def testCopyFromHeader(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
data = file_item.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file_item = cont.file(dest_filename)
file_item.write(hdrs={'X-Copy-From': '%s%s/%s' % (
prefix, self.env.container.name, source_filename)})
self.assert_(dest_filename in cont.files())
file_item = cont.file(dest_filename)
self.assert_(data == file_item.read())
self.assert_(file_item.initialize())
self.assert_(metadata == file_item.metadata)
def testCopyFromHeader404s(self):
source_filename = Utils.create_name()
file_item = self.env.container.file(source_filename)
file_item.write_random()
for prefix in ('', '/'):
# invalid source container
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
Utils.create_name(), source_filename)})
self.assert_status(404)
# invalid source object
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
self.env.container.name, Utils.create_name())})
self.assert_status(404)
# invalid destination container
dest_cont = self.env.account.container(Utils.create_name())
file_item = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
self.env.container.name, source_filename)})
self.assert_status(404)
def testNameLimit(self):
limit = load_constraint('max_object_name_length')
for l in (1, 10, limit / 2, limit - 1, limit, limit + 1, limit * 2):
file_item = self.env.container.file('a' * l)
if l <= limit:
self.assert_(file_item.write())
self.assert_status(201)
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
def testQuestionMarkInName(self):
if Utils.create_name == Utils.create_ascii_name:
file_name = list(Utils.create_name())
file_name[random.randint(2, len(file_name) - 2)] = '?'
file_name = "".join(file_name)
else:
file_name = Utils.create_name(6) + '?' + Utils.create_name(6)
file_item = self.env.container.file(file_name)
self.assert_(file_item.write(cfg={'no_path_quote': True}))
self.assert_(file_name not in self.env.container.files())
self.assert_(file_name.split('?')[0] in self.env.container.files())
def testDeleteThen404s(self):
file_item = self.env.container.file(Utils.create_name())
self.assert_(file_item.write_random())
self.assert_status(201)
self.assert_(file_item.delete())
self.assert_status(204)
file_item.metadata = {Utils.create_ascii_name(): Utils.create_name()}
for method in (file_item.info,
file_item.read,
file_item.sync_metadata,
file_item.delete):
self.assertRaises(ResponseError, method)
self.assert_status(404)
def testBlankMetadataName(self):
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = {'': Utils.create_name()}
self.assertRaises(ResponseError, file_item.write_random)
self.assert_status(400)
def testMetadataNumberLimit(self):
number_limit = load_constraint('max_meta_count')
size_limit = load_constraint('max_meta_overall_size')
for i in (number_limit - 10, number_limit - 1, number_limit,
number_limit + 1, number_limit + 10, number_limit + 100):
j = size_limit / (i * 2)
size = 0
metadata = {}
while len(metadata.keys()) < i:
key = Utils.create_ascii_name()
val = Utils.create_name()
if len(key) > j:
key = key[:j]
val = val[:j]
size += len(key) + len(val)
metadata[key] = val
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
if i <= number_limit:
self.assert_(file_item.write())
self.assert_status(201)
self.assert_(file_item.sync_metadata())
self.assert_status((201, 202))
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
file_item.metadata = {}
self.assert_(file_item.write())
self.assert_status(201)
file_item.metadata = metadata
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(400)
def testContentTypeGuessing(self):
file_types = {'wav': 'audio/x-wav', 'txt': 'text/plain',
'zip': 'application/zip'}
container = self.env.account.container(Utils.create_name())
self.assert_(container.create())
for i in file_types.keys():
file_item = container.file(Utils.create_name() + '.' + i)
file_item.write('', cfg={'no_content_type': True})
file_types_read = {}
for i in container.files(parms={'format': 'json'}):
file_types_read[i['name'].split('.')[1]] = i['content_type']
self.assertEquals(file_types, file_types_read)
def testRangedGets(self):
file_length = 10000
range_size = file_length / 10
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(file_length)
for i in range(0, file_length, range_size):
range_string = 'bytes=%d-%d' % (i, i + range_size - 1)
hdrs = {'Range': range_string}
self.assert_(data[i: i + range_size] == file_item.read(hdrs=hdrs),
range_string)
range_string = 'bytes=-%d' % (i)
hdrs = {'Range': range_string}
if i == 0:
# RFC 2616 14.35.1
# "If a syntactically valid byte-range-set includes ... at
# least one suffix-byte-range-spec with a NON-ZERO
# suffix-length, then the byte-range-set is satisfiable.
# Otherwise, the byte-range-set is unsatisfiable.
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
else:
self.assertEquals(file_item.read(hdrs=hdrs), data[-i:])
range_string = 'bytes=%d-' % (i)
hdrs = {'Range': range_string}
self.assert_(file_item.read(hdrs=hdrs) == data[i - file_length:],
range_string)
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assert_(file_item.read(hdrs=hdrs) == data[-1000:], range_string)
hdrs = {'Range': '0-4'}
self.assert_(file_item.read(hdrs=hdrs) == data, range_string)
# RFC 2616 14.35.1
# "If the entity is shorter than the specified suffix-length, the
# entire entity-body is used."
range_string = 'bytes=-%d' % (file_length + 10)
hdrs = {'Range': range_string}
self.assert_(file_item.read(hdrs=hdrs) == data, range_string)
def testRangedGetsWithLWSinHeader(self):
#Skip this test until webob 1.2 can tolerate LWS in Range header.
file_length = 10000
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random(file_length)
for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999',
'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '):
self.assert_(file_item.read(hdrs={'Range': r}) == data[0:1000])
def testFileSizeLimit(self):
limit = load_constraint('max_file_size')
tsecs = 3
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
limit + 10, limit + 100):
file_item = self.env.container.file(Utils.create_name())
if i <= limit:
self.assert_(timeout(tsecs, file_item.write,
cfg={'set_content_length': i}))
else:
self.assertRaises(ResponseError, timeout, tsecs,
file_item.write,
cfg={'set_content_length': i})
def testNoContentLengthForPut(self):
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.write, 'testing',
cfg={'no_content_length': True})
self.assert_status(411)
def testDelete(self):
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(self.env.file_size)
self.assert_(file_item.name in self.env.container.files())
self.assert_(file_item.delete())
self.assert_(file_item.name not in self.env.container.files())
def testBadHeaders(self):
file_length = 100
# no content type on puts should be ok
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(file_length, cfg={'no_content_type': True})
self.assert_status(201)
# content length x
self.assertRaises(ResponseError, file_item.write_random, file_length,
hdrs={'Content-Length': 'X'},
cfg={'no_content_length': True})
self.assert_status(400)
# bad request types
#for req in ('LICK', 'GETorHEAD_base', 'container_info',
# 'best_response'):
for req in ('LICK', 'GETorHEAD_base'):
self.env.account.conn.make_request(req)
self.assert_status(405)
# bad range headers
self.assert_(len(file_item.read(hdrs={'Range': 'parsecs=8-12'})) ==
file_length)
self.assert_status(200)
def testMetadataLengthLimits(self):
key_limit = load_constraint('max_meta_name_length')
value_limit = load_constraint('max_meta_value_length')
lengths = [[key_limit, value_limit], [key_limit, value_limit + 1],
[key_limit + 1, value_limit], [key_limit, 0],
[key_limit, value_limit * 10],
[key_limit * 10, value_limit]]
for l in lengths:
metadata = {'a' * l[0]: 'b' * l[1]}
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
if l[0] <= key_limit and l[1] <= value_limit:
self.assert_(file_item.write())
self.assert_status(201)
self.assert_(file_item.sync_metadata())
else:
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
file_item.metadata = {}
self.assert_(file_item.write())
self.assert_status(201)
file_item.metadata = metadata
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(400)
def testEtagWayoff(self):
file_item = self.env.container.file(Utils.create_name())
hdrs = {'etag': 'reallylonganddefinitelynotavalidetagvalue'}
self.assertRaises(ResponseError, file_item.write_random, hdrs=hdrs)
self.assert_status(422)
def testFileCreate(self):
for i in range(10):
file_item = self.env.container.file(Utils.create_name())
data = file_item.write_random()
self.assert_status(201)
self.assert_(data == file_item.read())
self.assert_status(200)
def testHead(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
file_item.write_random(self.env.file_size)
md5 = file_item.md5
file_item = self.env.container.file(file_name)
info = file_item.info()
self.assert_status(200)
self.assertEquals(info['content_length'], self.env.file_size)
self.assertEquals(info['etag'], md5)
self.assertEquals(info['content_type'], content_type)
self.assert_('last_modified' in info)
def testDeleteOfFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.delete)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.delete)
self.assert_status(404)
def testHeadOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.info)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.info)
self.assert_status(404)
def testMetadataOnPost(self):
file_item = self.env.container.file(Utils.create_name())
file_item.write_random(self.env.file_size)
for i in range(10):
metadata = {}
for j in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item.metadata = metadata
self.assert_(file_item.sync_metadata())
self.assert_status((201, 202))
file_item = self.env.container.file(file_item.name)
self.assert_(file_item.initialize())
self.assert_status(200)
self.assertEquals(file_item.metadata, metadata)
def testGetContentType(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file_item = self.env.container.file(file_name)
file_item.content_type = content_type
file_item.write_random()
file_item = self.env.container.file(file_name)
file_item.read()
self.assertEquals(content_type, file_item.content_type)
def testGetOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.read)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
self.assertRaises(ResponseError, file_item.read)
self.assert_status(404)
def testPostOnFileThatDoesNotExist(self):
# in container that exists
file_item = self.env.container.file(Utils.create_name())
file_item.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file_item = container.file(Utils.create_name())
file_item.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file_item.sync_metadata)
self.assert_status(404)
def testMetadataOnPut(self):
for i in range(10):
metadata = {}
for j in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file_item = self.env.container.file(Utils.create_name())
file_item.metadata = metadata
file_item.write_random(self.env.file_size)
file_item = self.env.container.file(file_item.name)
self.assert_(file_item.initialize())
self.assert_status(200)
self.assertEquals(file_item.metadata, metadata)
def testSerialization(self):
container = self.env.account.container(Utils.create_name())
self.assert_(container.create())
files = []
for i in (0, 1, 10, 100, 1000, 10000):
files.append({'name': Utils.create_name(),
'content_type': Utils.create_name(), 'bytes': i})
write_time = time.time()
for f in files:
file_item = container.file(f['name'])
file_item.content_type = f['content_type']
file_item.write_random(f['bytes'])
f['hash'] = file_item.md5
f['json'] = False
f['xml'] = False
write_time = time.time() - write_time
for format_type in ['json', 'xml']:
for file_item in container.files(parms={'format': format_type}):
found = False
for f in files:
if f['name'] != file_item['name']:
continue
self.assertEquals(file_item['content_type'],
f['content_type'])
self.assertEquals(int(file_item['bytes']), f['bytes'])
d = datetime.strptime(
file_item['last_modified'].split('.')[0],
"%Y-%m-%dT%H:%M:%S")
lm = time.mktime(d.timetuple())
if 'last_modified' in f:
self.assertEquals(f['last_modified'], lm)
else:
f['last_modified'] = lm
f[format_type] = True
found = True
self.assert_(found, 'Unexpected file %s found in '
'%s listing' % (file_item['name'], format_type))
headers = dict(self.env.conn.response.getheaders())
if format_type == 'json':
self.assertEquals(headers['content-type'],
'application/json; charset=utf-8')
elif format_type == 'xml':
self.assertEquals(headers['content-type'],
'application/xml; charset=utf-8')
lm_diff = max([f['last_modified'] for f in files]) -\
min([f['last_modified'] for f in files])
self.assert_(lm_diff < write_time + 1, 'Diff in last '
'modified times should be less than time to write files')
for f in files:
for format_type in ['json', 'xml']:
self.assert_(f[format_type], 'File %s not found in %s listing'
% (f['name'], format_type))
def testStackedOverwrite(self):
file_item = self.env.container.file(Utils.create_name())
for i in range(1, 11):
data = file_item.write_random(512)
file_item.write(data)
self.assert_(file_item.read() == data)
def testTooLongName(self):
file_item = self.env.container.file('x' * 1025)
self.assertRaises(ResponseError, file_item.write)
self.assert_status(400)
def testZeroByteFile(self):
file_item = self.env.container.file(Utils.create_name())
self.assert_(file_item.write(''))
self.assert_(file_item.name in self.env.container.files())
self.assert_(file_item.read() == '')
def testEtagResponse(self):
file_item = self.env.container.file(Utils.create_name())
data = StringIO.StringIO(file_item.write_random(512))
etag = File.compute_md5sum(data)
headers = dict(self.env.conn.response.getheaders())
self.assert_('etag' in headers.keys())
header_etag = headers['etag'].strip('"')
self.assertEquals(etag, header_etag)
def testChunkedPut(self):
if (web_front_end == 'apache2'):
raise SkipTest()
data = File.random_data(10000)
etag = File.compute_md5sum(data)
for i in (1, 10, 100, 1000):
file_item = self.env.container.file(Utils.create_name())
for j in chunks(data, i):
file_item.chunked_write(j)
self.assert_(file_item.chunked_write())
self.assert_(data == file_item.read())
info = file_item.info()
self.assertEquals(etag, info['etag'])
class TestFileUTF8(Base2, TestFile):
set_up = False
class TestDloEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
# avoid getting a prefix that stops halfway through an encoded
# character
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
cls.segment_prefix = prefix
for letter in ('a', 'b', 'c', 'd', 'e'):
file_item = cls.container.file("%s/seg_lower%s" % (prefix, letter))
file_item.write(letter * 10)
file_item = cls.container.file("%s/seg_upper%s" % (prefix, letter))
file_item.write(letter.upper() * 10)
man1 = cls.container.file("man1")
man1.write('man1-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg_lower" %
(cls.container.name, prefix)})
man1 = cls.container.file("man2")
man1.write('man2-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg_upper" %
(cls.container.name, prefix)})
manall = cls.container.file("manall")
manall.write('manall-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg" %
(cls.container.name, prefix)})
class TestDlo(Base):
env = TestDloEnv
set_up = False
def test_get_manifest(self):
file_item = self.env.container.file('man1')
file_contents = file_item.read()
self.assertEqual(
file_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
file_item = self.env.container.file('man2')
file_contents = file_item.read()
self.assertEqual(
file_contents,
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE")
file_item = self.env.container.file('manall')
file_contents = file_item.read()
self.assertEqual(
file_contents,
("aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee" +
"AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEE"))
def test_get_manifest_document_itself(self):
file_item = self.env.container.file('man1')
file_contents = file_item.read(parms={'multipart-manifest': 'get'})
self.assertEqual(file_contents, "man1-contents")
def test_get_range(self):
file_item = self.env.container.file('man1')
file_contents = file_item.read(size=25, offset=8)
self.assertEqual(file_contents, "aabbbbbbbbbbccccccccccddd")
file_contents = file_item.read(size=1, offset=47)
self.assertEqual(file_contents, "e")
def test_get_range_out_of_range(self):
file_item = self.env.container.file('man1')
self.assertRaises(ResponseError, file_item.read, size=7, offset=50)
self.assert_status(416)
def test_copy(self):
# Adding a new segment, copying the manifest, and then deleting the
# segment proves that the new object is really the concatenated
# segments and not just a manifest.
f_segment = self.env.container.file("%s/seg_lowerf" %
(self.env.segment_prefix))
f_segment.write('ffffffffff')
try:
man1_item = self.env.container.file('man1')
man1_item.copy(self.env.container.name, "copied-man1")
finally:
# try not to leave this around for other tests to stumble over
f_segment.delete()
file_item = self.env.container.file('copied-man1')
file_contents = file_item.read()
self.assertEqual(
file_contents,
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
def test_dlo_if_match_get(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.read(hdrs={'If-Match': etag})
self.assert_status(200)
def test_dlo_if_none_match_get(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
def test_dlo_if_match_head(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.info(hdrs={'If-Match': etag})
self.assert_status(200)
def test_dlo_if_none_match_head(self):
manifest = self.env.container.file("man1")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
class TestDloUTF8(Base2, TestDlo):
set_up = False
class TestFileComparisonEnv(object):
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 20
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file_item = cls.container.file(Utils.create_name())
file_item.write_random(cls.file_size)
cls.files.append(file_item)
cls.time_old_f1 = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(time.time() - 86400))
cls.time_old_f2 = time.strftime("%A, %d-%b-%y %H:%M:%S GMT",
time.gmtime(time.time() - 86400))
cls.time_old_f3 = time.strftime("%a %b %d %H:%M:%S %Y",
time.gmtime(time.time() - 86400))
cls.time_new = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(time.time() + 86400))
class TestFileComparison(Base):
env = TestFileComparisonEnv
set_up = False
def testIfMatch(self):
for file_item in self.env.files:
hdrs = {'If-Match': file_item.md5}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
def testIfNoneMatch(self):
for file_item in self.env.files:
hdrs = {'If-None-Match': 'bogus'}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-None-Match': file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
def testIfModifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Modified-Since': self.env.time_old_f1}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
def testIfUnmodifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Unmodified-Since': self.env.time_new}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
def testIfMatchAndUnmodified(self):
for file_item in self.env.files:
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_new}
self.assert_(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus',
'If-Unmodified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_old_f3}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
def testLastModified(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file = self.env.container.file(file_name)
file.content_type = content_type
resp = file.write_random_return_resp(self.env.file_size)
put_last_modified = resp.getheader('last-modified')
file = self.env.container.file(file_name)
info = file.info()
self.assert_('last_modified' in info)
last_modified = info['last_modified']
self.assertEqual(put_last_modified, info['last_modified'])
hdrs = {'If-Modified-Since': last_modified}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(304)
hdrs = {'If-Unmodified-Since': last_modified}
self.assert_(file.read(hdrs=hdrs))
class TestFileComparisonUTF8(Base2, TestFileComparison):
set_up = False
class TestSloEnv(object):
slo_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
if cls.slo_enabled is None:
status = cls.conn.make_request('GET', '/info',
cfg={'verbatim_path': True})
if not (200 <= status <= 299):
# Can't tell if SLO is enabled or not since we're running
# against an old cluster, so let's skip the tests instead of
# possibly having spurious failures.
cls.slo_enabled = False
else:
# Don't bother looking for ValueError here. If something is
# responding to a GET /info request with invalid JSON, then
# the cluster is broken and a test failure will let us know.
cluster_info = json.loads(cls.conn.response.read())
cls.slo_enabled = 'slo' in cluster_info
if not cls.slo_enabled:
return
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
seg_info = {}
for letter, size in (('a', 1024 * 1024),
('b', 1024 * 1024),
('c', 1024 * 1024),
('d', 1024 * 1024),
('e', 1)):
seg_name = "seg_%s" % letter
file_item = cls.container.file(seg_name)
file_item.write(letter * size)
seg_info[seg_name] = {
'size_bytes': size,
'etag': file_item.md5,
'path': '/%s/%s' % (cls.container.name, seg_name)}
file_item = cls.container.file("manifest-abcde")
file_item.write(
json.dumps([seg_info['seg_a'], seg_info['seg_b'],
seg_info['seg_c'], seg_info['seg_d'],
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
file_item = cls.container.file('manifest-cd')
cd_json = json.dumps([seg_info['seg_c'], seg_info['seg_d']])
file_item.write(cd_json, parms={'multipart-manifest': 'put'})
cd_etag = hashlib.md5(seg_info['seg_c']['etag'] +
seg_info['seg_d']['etag']).hexdigest()
file_item = cls.container.file("manifest-bcd-submanifest")
file_item.write(
json.dumps([seg_info['seg_b'],
{'etag': cd_etag,
'size_bytes': (seg_info['seg_c']['size_bytes'] +
seg_info['seg_d']['size_bytes']),
'path': '/%s/%s' % (cls.container.name,
'manifest-cd')}]),
parms={'multipart-manifest': 'put'})
bcd_submanifest_etag = hashlib.md5(
seg_info['seg_b']['etag'] + cd_etag).hexdigest()
file_item = cls.container.file("manifest-abcde-submanifest")
file_item.write(
json.dumps([
seg_info['seg_a'],
{'etag': bcd_submanifest_etag,
'size_bytes': (seg_info['seg_b']['size_bytes'] +
seg_info['seg_c']['size_bytes'] +
seg_info['seg_d']['size_bytes']),
'path': '/%s/%s' % (cls.container.name,
'manifest-bcd-submanifest')},
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
class TestSlo(Base):
env = TestSloEnv
set_up = False
def setUp(self):
super(TestSlo, self).setUp()
if self.env.slo_enabled is False:
raise SkipTest("SLO not enabled")
elif self.env.slo_enabled is not True:
# just some sanity checking
raise Exception(
"Expected slo_enabled to be True/False, got %r" %
(self.env.slo_enabled,))
def test_slo_get_simple_manifest(self):
file_item = self.env.container.file('manifest-abcde')
file_contents = file_item.read()
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
self.assertEqual('a', file_contents[0])
self.assertEqual('a', file_contents[1024 * 1024 - 1])
self.assertEqual('b', file_contents[1024 * 1024])
self.assertEqual('d', file_contents[-2])
self.assertEqual('e', file_contents[-1])
def test_slo_get_nested_manifest(self):
file_item = self.env.container.file('manifest-abcde-submanifest')
file_contents = file_item.read()
self.assertEqual(4 * 1024 * 1024 + 1, len(file_contents))
self.assertEqual('a', file_contents[0])
self.assertEqual('a', file_contents[1024 * 1024 - 1])
self.assertEqual('b', file_contents[1024 * 1024])
self.assertEqual('d', file_contents[-2])
self.assertEqual('e', file_contents[-1])
def test_slo_ranged_get(self):
file_item = self.env.container.file('manifest-abcde')
file_contents = file_item.read(size=1024 * 1024 + 2,
offset=1024 * 1024 - 1)
self.assertEqual('a', file_contents[0])
self.assertEqual('b', file_contents[1])
self.assertEqual('b', file_contents[-2])
self.assertEqual('c', file_contents[-1])
def test_slo_ranged_submanifest(self):
file_item = self.env.container.file('manifest-abcde-submanifest')
file_contents = file_item.read(size=1024 * 1024 + 2,
offset=1024 * 1024 * 2 - 1)
self.assertEqual('b', file_contents[0])
self.assertEqual('c', file_contents[1])
self.assertEqual('c', file_contents[-2])
self.assertEqual('d', file_contents[-1])
def test_slo_etag_is_hash_of_etags(self):
expected_hash = hashlib.md5()
expected_hash.update(hashlib.md5('a' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('b' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('c' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('d' * 1024 * 1024).hexdigest())
expected_hash.update(hashlib.md5('e').hexdigest())
expected_etag = expected_hash.hexdigest()
file_item = self.env.container.file('manifest-abcde')
self.assertEqual(expected_etag, file_item.info()['etag'])
def test_slo_etag_is_hash_of_etags_submanifests(self):
def hd(x):
return hashlib.md5(x).hexdigest()
expected_etag = hd(hd('a' * 1024 * 1024) +
hd(hd('b' * 1024 * 1024) +
hd(hd('c' * 1024 * 1024) +
hd('d' * 1024 * 1024))) +
hd('e'))
file_item = self.env.container.file('manifest-abcde-submanifest')
self.assertEqual(expected_etag, file_item.info()['etag'])
def test_slo_etag_mismatch(self):
file_item = self.env.container.file("manifest-a-bad-etag")
try:
file_item.write(
json.dumps([{
'size_bytes': 1024 * 1024,
'etag': 'not it',
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
parms={'multipart-manifest': 'put'})
except ResponseError as err:
self.assertEqual(400, err.status)
else:
self.fail("Expected ResponseError but didn't get it")
def test_slo_size_mismatch(self):
file_item = self.env.container.file("manifest-a-bad-size")
try:
file_item.write(
json.dumps([{
'size_bytes': 1024 * 1024 - 1,
'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
parms={'multipart-manifest': 'put'})
except ResponseError as err:
self.assertEqual(400, err.status)
else:
self.fail("Expected ResponseError but didn't get it")
def test_slo_copy(self):
file_item = self.env.container.file("manifest-abcde")
file_item.copy(self.env.container.name, "copied-abcde")
copied = self.env.container.file("copied-abcde")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
self.assertEqual(4 * 1024 * 1024 + 1, len(copied_contents))
def test_slo_copy_the_manifest(self):
file_item = self.env.container.file("manifest-abcde")
file_item.copy(self.env.container.name, "copied-abcde-manifest-only",
parms={'multipart-manifest': 'get'})
copied = self.env.container.file("copied-abcde-manifest-only")
copied_contents = copied.read(parms={'multipart-manifest': 'get'})
try:
json.loads(copied_contents)
except ValueError:
self.fail("COPY didn't copy the manifest (invalid json on GET)")
def test_slo_get_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_body = manifest.read(parms={'multipart-manifest': 'get'})
self.assertEqual('application/json; charset=utf-8',
manifest.content_type)
try:
json.loads(got_body)
except ValueError:
self.fail("GET with multipart-manifest=get got invalid json")
def test_slo_head_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_info = manifest.info(parms={'multipart-manifest': 'get'})
self.assertEqual('application/json; charset=utf-8',
got_info['content_type'])
def test_slo_if_match_get(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.read(hdrs={'If-Match': etag})
self.assert_status(200)
def test_slo_if_none_match_get(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.read,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.read(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
def test_slo_if_match_head(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-Match': 'not-%s' % etag})
self.assert_status(412)
manifest.info(hdrs={'If-Match': etag})
self.assert_status(200)
def test_slo_if_none_match_head(self):
manifest = self.env.container.file("manifest-abcde")
etag = manifest.info()['etag']
self.assertRaises(ResponseError, manifest.info,
hdrs={'If-None-Match': etag})
self.assert_status(304)
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
class TestSloUTF8(Base2, TestSlo):
set_up = False
class TestObjectVersioningEnv(object):
versioning_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
# avoid getting a prefix that stops halfway through an encoded
# character
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
cls.versions_container = cls.account.container(prefix + "-versions")
if not cls.versions_container.create():
raise ResponseError(cls.conn.response)
cls.container = cls.account.container(prefix + "-objs")
if not cls.container.create(
hdrs={'X-Versions-Location': cls.versions_container.name}):
raise ResponseError(cls.conn.response)
container_info = cls.container.info()
# if versioning is off, then X-Versions-Location won't persist
cls.versioning_enabled = 'versions' in container_info
class TestObjectVersioning(Base):
env = TestObjectVersioningEnv
set_up = False
def setUp(self):
super(TestObjectVersioning, self).setUp()
if self.env.versioning_enabled is False:
raise SkipTest("Object versioning not enabled")
elif self.env.versioning_enabled is not True:
# just some sanity checking
raise Exception(
"Expected versioning_enabled to be True/False, got %r" %
(self.env.versioning_enabled,))
def test_overwriting(self):
container = self.env.container
versions_container = self.env.versions_container
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
versioned_obj.write("aaaaa")
self.assertEqual(0, versions_container.info()['object_count'])
versioned_obj.write("bbbbb")
# the old version got saved off
self.assertEqual(1, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[0]
self.assertEqual(
"aaaaa", versions_container.file(versioned_obj_name).read())
# if we overwrite it again, there are two versions
versioned_obj.write("ccccc")
self.assertEqual(2, versions_container.info()['object_count'])
# as we delete things, the old contents return
self.assertEqual("ccccc", versioned_obj.read())
versioned_obj.delete()
self.assertEqual("bbbbb", versioned_obj.read())
versioned_obj.delete()
self.assertEqual("aaaaa", versioned_obj.read())
versioned_obj.delete()
self.assertRaises(ResponseError, versioned_obj.read)
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
set_up = False
if __name__ == '__main__':
unittest.main()
|
gotostack/swift
|
test/functional/tests.py
|
Python
|
apache-2.0
| 82,395 | 0.000158 |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO Ematelot SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'liste' de la commande 'matelot'."""
from primaires.format.fonctions import supprimer_accents
from primaires.format.tableau import Tableau
from primaires.interpreteur.masque.parametre import Parametre
from secondaires.navigation.equipage.postes.hierarchie import ORDRE
class PrmListe(Parametre):
"""Commande 'matelot liste'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "liste", "list")
self.tronquer = True
self.aide_courte = "liste les matelots de l'équipage"
self.aide_longue = \
"Cette commande liste les matelots de votre équipage. " \
"Elle permet d'obtenir rapidement des informations pratiques " \
"sur le nom du matelot ainsi que l'endroit où il se trouve."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
if not hasattr(salle, "navire"):
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
equipage = navire.equipage
if not navire.a_le_droit(personnage, "officier"):
personnage << "|err|Vous ne pouvez donner d'ordre sur ce " \
"navire.|ff|"
return
matelots = tuple((m, m.nom_poste) for m in \
equipage.matelots.values())
matelots += tuple(equipage.joueurs.items())
matelots = sorted(matelots, \
key=lambda couple: ORDRE.index(couple[1]), reverse=True)
if len(matelots) == 0:
personnage << "|err|Votre équipage ne comprend aucun matelot.|ff|"
return
tableau = Tableau()
tableau.ajouter_colonne("Nom")
tableau.ajouter_colonne("Poste")
tableau.ajouter_colonne("Affectation")
for matelot, nom_poste in matelots:
nom = matelot.nom
nom_poste = nom_poste.capitalize()
titre = "Aucune"
if hasattr(matelot, "personnage"):
titre = matelot.personnage.salle.titre_court.capitalize()
tableau.ajouter_ligne(nom, nom_poste, titre)
personnage << tableau.afficher()
|
stormi/tsunami
|
src/secondaires/navigation/commandes/matelot/liste.py
|
Python
|
bsd-3-clause
| 3,827 | 0.00131 |
# -*- test-case-name: mamba.test.test_application mamba.test.test_mamba -*-
# Copyright (c) 2012 - Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for more details
"""
.. module: app
:platform: Linux
:synopsis: Mamba Application Manager
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
import os
import gc
from twisted.web import http
from twisted.internet import address
from twisted.python.logfile import DailyLogFile
from twisted.python.monkey import MonkeyPatcher
from twisted.python import versions, filepath, log
from mamba.utils import borg
from mamba.http import headers
from mamba.core import packages
from mamba import _version as _mamba_version
from mamba.application import controller, model
_app_ver = versions.Version('Application', 0, 1, 0)
_app_project_ver = versions.Version('Project', 0, 1, 0)
class ApplicationError(Exception):
"""ApplicationError raises when an error occurs
"""
class Mamba(borg.Borg):
"""
This object is just a global configuration for mamba applications that
act as the central object on them and is able to act as a central registry.
It inherits from the :class: `~mamba.utils.borg.Borg` so you can just
instantiate a new object of this class and it will share all the
information between instances.
You create an instance of the :class:`~Mamba` class in your main module
or in your `Twisted` `tac` file:
.. sourcecode:: python
from mamba import Mamba
app = Mamba({'name': 'MyApp', 'description': 'My App', ...})
:param options: options to initialize the application with
:type options: dict
"""
def __init__(self, options=None):
"""Mamba constructor"""
super(Mamba, self).__init__()
if hasattr(self, 'initialized') and self.initialized is True:
return
self.monkey_patched = False
self.development = False
self.already_logging = False
self._mamba_ver = _mamba_version.version.short()
self._ver = _app_ver.short()
self._port = 1936
self._log_file = None
self._project_ver = _app_project_ver.short()
self.name = 'Mamba Webservice v%s' % _mamba_version.version.short()
self.description = (
'Mamba %s is a Web applications framework that works '
'over Twisted using Jinja2 as GUI enhancement '
'Mamba has been developed by Oscar Campos '
'<oscar.campos@member.fsf.org>' % _mamba_version.version.short()
)
self.language = os.environ.get('LANG', 'en_EN').split('_')[0]
self.lessjs = False
self._parse_options(options)
# monkey patch twisted
self._monkey_patch()
# register log file if any
self._handle_logging()
# PyPy does not implement set_debug method in gc object
if getattr(options, 'debug', False):
if hasattr(gc, 'set_debug'):
gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_INSTANCES)
else:
log.msg(
'Debug is set as True but gc object is laking '
'set_debug method'
)
self._header = headers.Headers()
self._header.language = self.language
self._header.description = self.description
self.managers = {
'controller': controller.ControllerManager(),
'model': model.ModelManager(),
'packages': packages.PackagesManager()
}
self.initialized = True
def _handle_logging(self):
"""
Start logging to file if there is some file configuration and we
are not running in development mode
"""
if self.development is False and self._log_file is not None:
self.already_logging = True
log.startLogging(DailyLogFile.fromFullPath(self.log_file))
def _parse_options(self, options):
if options is None:
return
for key in dir(options):
if key.startswith('__'):
continue
if key == 'port':
setattr(self, '_port', getattr(options, key))
elif key == 'version':
setattr(self, '_ver', getattr(options, key))
elif key == 'log_file':
if getattr(options, key) is not None:
log_file = 'logs/{}'.format(getattr(options, key))
setattr(self, '_log_file', log_file)
else:
setattr(self, key, getattr(options, key))
def _monkey_patch(self):
"""
Monkeypatch some parts of the twisted library that are waiting
for bugfix inclussion in the trunk
"""
if not self.monkey_patched:
# add new method
setattr(http.Request, 'getClientProxyIP', getClientProxyIP)
# patch getClientIP
monkey_patcher = MonkeyPatcher(
(http.Request, 'getClientIP', getClientIPPatch)
)
monkey_patcher.patch()
self.monkey_patched = True
@property
def port(self):
return self._port
@port.setter
def port(self, value):
if type(value) is not int:
raise ApplicationError("Int expected, get %s" % (type(value)))
self._port = value
@property
def log_file(self):
return self._log_file if self._log_file is not None else 'service.log'
@log_file.setter
def log_file(self, file):
path = filepath.FilePath(file)
if not filepath.exists(path.dirname()):
raise ApplicationError('%s' % (
'Given directory %s don\t exists' % path.dirname())
)
self._log_file = file
@property
def project_ver(self):
return self._project_ver
@project_ver.setter
def project_ver(self, ver):
if type(ver) is not versions.Version:
raise ApplicationError('%s expected, get %s' % (
'twisted.python.versions.Version', type(ver))
)
self._project_ver = ver
@property
def mamba_ver(self):
return self._mamba_ver
@mamba_ver.setter
def mamba_ver(self, value):
raise ApplicationError("'mamba_ver' is readonly")
@property
def ver(self):
return self._ver
@ver.setter
def ver(self, value):
raise ApplicationError("'ver' is readonly")
def getClientIPPatch(self):
"""
Return the IP address of the client who submitted this request. If
there are headers for X-Forwarded-For, they are returned as well.
If you need to get the value of Request.client.host you can use the
new patched method Request.getClientProxyIP() on Request objects.
:returns: the client IP address(es)
"""
x_forwarded_for = self.getHeader('x-forwarded-for')
if x_forwarded_for is not None:
return x_forwarded_for.split(', ')[0]
return self.getClientProxyIP()
def getClientProxyIP(self):
"""
Return the IP address of the client/proxy who submitted the request.
:returns: the client/proxy IP address or None
"""
if isinstance(self.client, address.IPv4Address):
return self.client.host
return None
__all__ = ['Mamba', 'ApplicationError']
|
DamnWidget/mamba
|
mamba/application/app.py
|
Python
|
gpl-3.0
| 7,315 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2013 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from __future__ import absolute_import
from django.contrib import auth, messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse_lazy
from django.http import Http404
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (CreateView, DeleteView, TemplateView,
UpdateView)
from pootle.core.views import SuperuserRequiredMixin
from .forms import agreement_form_factory
from .models import AbstractPage, LegalPage, StaticPage
class PageModelMixin(object):
"""Mixin used to set the view's page model according to the
`page_type` argument caught in a url pattern.
"""
def dispatch(self, request, *args, **kwargs):
self.page_type = kwargs.get('page_type', None)
self.model = {
'legal': LegalPage,
'static': StaticPage,
}.get(self.page_type)
if self.model is None:
raise Http404
return super(PageModelMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(PageModelMixin, self).get_context_data(**kwargs)
ctx.update({
'has_page_model': True,
})
return ctx
class AdminTemplateView(SuperuserRequiredMixin, TemplateView):
template_name = 'staticpages/admin/page_list.html'
def get_context_data(self, **kwargs):
ctx = super(AdminTemplateView, self).get_context_data(**kwargs)
ctx.update({
'legalpages': LegalPage.objects.all(),
'staticpages': StaticPage.objects.all(),
})
return ctx
class PageCreateView(SuperuserRequiredMixin, PageModelMixin, CreateView):
success_url = reverse_lazy('staticpages.admin')
template_name = 'staticpages/admin/page_create.html'
def get_initial(self):
initial = super(PageModelMixin, self).get_initial()
next_page_number = AbstractPage.max_pk() + 1
initial.update({
'title': _('Page Title'),
'virtual_path': _('page-%d', next_page_number),
})
return initial
class PageUpdateView(SuperuserRequiredMixin, PageModelMixin, UpdateView):
success_url = reverse_lazy('staticpages.admin')
template_name = 'staticpages/admin/page_update.html'
def get_context_data(self, **kwargs):
ctx = super(PageUpdateView, self).get_context_data(**kwargs)
ctx.update({
'show_delete': True,
'page_type': self.page_type,
})
return ctx
class PageDeleteView(SuperuserRequiredMixin, PageModelMixin, DeleteView):
success_url = reverse_lazy('staticpages.admin')
def display_page(request, virtual_path):
"""Displays an active page defined in `virtual_path`."""
page = None
for page_model in AbstractPage.__subclasses__():
try:
page = page_model.objects.live(request.user).get(
virtual_path=virtual_path,
)
except ObjectDoesNotExist:
pass
if page is None:
raise Http404
if page.url:
return redirect(page.url)
if request.user.is_superuser and not page.active:
msg = _('This page is inactive and visible to administrators '
'only. You can activate it by <a href="%s">editing its '
'properties</a>', page.get_edit_url())
messages.warning(request, msg)
template_name = 'staticpages/page_display.html'
if 'HTTP_X_FANCYBOX' in request.META:
template_name = 'staticpages/_body.html'
ctx = {
'page': page,
}
return render_to_response(template_name, ctx, RequestContext(request))
def legal_agreement(request):
"""Displays the pending documents to be agreed by the current user."""
pending_pages = LegalPage.objects.pending_user_agreement(request.user)
form_class = agreement_form_factory(pending_pages, request.user)
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
# The user agreed, let's record the specific agreements
# and redirect to the next page
form.save()
redirect_to = request.POST.get(auth.REDIRECT_FIELD_NAME, '/')
return redirect(redirect_to)
else:
form = form_class()
ctx = {
'form': form,
'next': request.GET.get(auth.REDIRECT_FIELD_NAME, ''),
}
return render_to_response('staticpages/agreement.html', ctx,
RequestContext(request))
|
ttreeagency/PootleTypo3Org
|
pootle/apps/staticpages/views.py
|
Python
|
gpl-2.0
| 5,490 | 0 |
def solve(N, R, P, S):
if max([R, P, S]) > 2**(N-1): return "IMPOSSIBLE"
if N > 2 and max([R, P, S]) == 2**(N-1): return "IMPOSSIBLE"
min_val = min([R, P, S])
rep = 2**N//3
if min_val < rep: return "IMPOSSIBLE"
if N == 1:
tmp = ""
if P: tmp += "P"
if R: tmp += "R"
if S: tmp += "S"
return tmp
elif N == 2:
if P == 2: return "PRPS"
elif R == 2: return "PRRS"
else: return "PSRS"
else:
preP = P//2
preR = R//2
preS = S//2
if preP < P-preP: preP += 1
elif preR < R-preR: preR += 1
else: preS += 1
ans = solve(N-1, preR, preP, preS) + solve(N-1, R-preR, P-preP, S-preS)
return ans
for case in range(1, eval(input()) + 1):
N, R, P, S = map(int, input().split())
print("Case #{}: {}".format(case, solve(N, R, P, S)))
|
zuun77/givemegoogletshirts
|
codejam/2016/Round2/q1.py
|
Python
|
apache-2.0
| 877 | 0.013683 |
# -*- coding: utf-8 -*-
# Author: Alexandre Fayolle
# Copyright 2013 Camptocamp SA
# Author: Damien Crier
# Copyright 2015 Camptocamp SA
# © 2015 Eficent Business and IT Consulting Services S.L. -
# Jordi Ballester Alomar
# © 2015 Serpent Consulting Services Pvt. Ltd. - Sudhir Arya
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from . import purchase
from . import invoice
|
SerpentCS/purchase-workflow
|
purchase_order_line_sequence/models/__init__.py
|
Python
|
agpl-3.0
| 413 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pdb
import unittest
from datetime import date
from testing_utils import setupTestDB, fake_response_from_file
from scrapy.http import Response, Request, HtmlResponse
from sgfSpider.dbsgf import DBsgf, DBNewsItem
from sgfSpider.spiders.igokisen import IgokisenSpider
class TestIgokisenSpider(unittest.TestCase):
def setUp(self):
setupTestDB()
self.spider = IgokisenSpider()
def testIgokisenNewsParsing(self):
results = self.spider.parse(fake_response_from_file('Go_Topics.html'))
# there should be 48 items
for x in range(48):
results.next()
dbitems = DBsgf().session.query(DBNewsItem).order_by(DBNewsItem.date).all()
self.assertEqual(len(dbitems), 48)
item = dbitems[7]
self.assertEqual(item.date.strftime('%Y-%m-%d'), '2015-04-02')
self.assertEqual(item.game, 'GS Caltex Cup')
self.assertEqual(item.link, 'file:///var/folders/08/1yh0yp1955z8rg6jdhrps2vw0000gn/T/kr/gs.html')
self.assertEqual(item.nation,'Korea')
self.assertEqual(item.site, 'igokisen')
def testIgokisenGameParsing(self):
results = self.spider.parseTournamentGames(fake_response_from_file('Gosei.html'))
urls = []
# there should be 4 items
urls.extend(results.next()['file_urls'])
urls.extend(results.next()['file_urls'])
urls.extend(results.next()['file_urls'])
urls.extend(results.next()['file_urls'])
self.assertEqual(sorted(urls), [
u'http://igokisen.web.fc2.com/jp/sgf/40goseit1.sgf',
u'http://igokisen.web.fc2.com/jp/sgf/40goseit2.sgf',
u'http://igokisen.web.fc2.com/jp/sgf/40goseit3.sgf',
u'http://igokisen.web.fc2.com/jp/sgf/40goseit4.sgf'
])
if __name__ == '__main__':
unittest.main()
|
dhodges/sgfspider
|
tests/test_igokisen.py
|
Python
|
mit
| 1,778 | 0.006187 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/krl1to5/Work/FULL/Sequence-ToolKit/2016/resources/ui/gensec/process/tl.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_process(object):
def setupUi(self, process):
process.setObjectName("process")
process.resize(680, 164)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(process.sizePolicy().hasHeightForWidth())
process.setSizePolicy(sizePolicy)
process.setMinimumSize(QtCore.QSize(0, 164))
process.setMaximumSize(QtCore.QSize(16777215, 164))
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(process)
self.horizontalLayout_8.setSpacing(12)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.form_area = QtWidgets.QFrame(process)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.form_area.sizePolicy().hasHeightForWidth())
self.form_area.setSizePolicy(sizePolicy)
self.form_area.setFrameShape(QtWidgets.QFrame.Box)
self.form_area.setFrameShadow(QtWidgets.QFrame.Raised)
self.form_area.setObjectName("form_area")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.form_area)
self.verticalLayout_2.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_2.setSpacing(12)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.layout = QtWidgets.QHBoxLayout()
self.layout.setSpacing(15)
self.layout.setObjectName("layout")
self.layout_2 = QtWidgets.QHBoxLayout()
self.layout_2.setObjectName("layout_2")
self.final_temperature_label = QtWidgets.QLabel(self.form_area)
self.final_temperature_label.setObjectName("final_temperature_label")
self.layout_2.addWidget(self.final_temperature_label)
self.final_temperature = QtWidgets.QDoubleSpinBox(self.form_area)
self.final_temperature.setMinimumSize(QtCore.QSize(80, 28))
self.final_temperature.setMaximumSize(QtCore.QSize(80, 16777215))
self.final_temperature.setMaximum(600.0)
self.final_temperature.setObjectName("final_temperature")
self.layout_2.addWidget(self.final_temperature)
self.layout.addLayout(self.layout_2)
self.layout_3 = QtWidgets.QHBoxLayout()
self.layout_3.setObjectName("layout_3")
self.time_at_final_temp_label = QtWidgets.QLabel(self.form_area)
self.time_at_final_temp_label.setObjectName("time_at_final_temp_label")
self.layout_3.addWidget(self.time_at_final_temp_label)
self.time_at_final_temp = QtWidgets.QDoubleSpinBox(self.form_area)
self.time_at_final_temp.setMinimumSize(QtCore.QSize(80, 28))
self.time_at_final_temp.setMaximumSize(QtCore.QSize(80, 16777215))
self.time_at_final_temp.setMaximum(99999.0)
self.time_at_final_temp.setObjectName("time_at_final_temp")
self.layout_3.addWidget(self.time_at_final_temp)
self.layout.addLayout(self.layout_3)
spacerItem = QtWidgets.QSpacerItem(0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.layout)
self.layout_4 = QtWidgets.QHBoxLayout()
self.layout_4.setSpacing(15)
self.layout_4.setObjectName("layout_4")
self.layout_5 = QtWidgets.QHBoxLayout()
self.layout_5.setObjectName("layout_5")
self.channels_label = QtWidgets.QLabel(self.form_area)
self.channels_label.setObjectName("channels_label")
self.layout_5.addWidget(self.channels_label)
self.channels = QtWidgets.QSpinBox(self.form_area)
self.channels.setMinimumSize(QtCore.QSize(55, 28))
self.channels.setMaximumSize(QtCore.QSize(55, 16777215))
self.channels.setMaximum(512)
self.channels.setObjectName("channels")
self.layout_5.addWidget(self.channels)
self.layout_4.addLayout(self.layout_5)
self.layout_6 = QtWidgets.QHBoxLayout()
self.layout_6.setObjectName("layout_6")
self.heating_rate_label = QtWidgets.QLabel(self.form_area)
self.heating_rate_label.setObjectName("heating_rate_label")
self.layout_6.addWidget(self.heating_rate_label)
self.heating_rate = QtWidgets.QDoubleSpinBox(self.form_area)
self.heating_rate.setMinimumSize(QtCore.QSize(80, 28))
self.heating_rate.setMaximumSize(QtCore.QSize(80, 16777215))
self.heating_rate.setMinimum(0.1)
self.heating_rate.setMaximum(20.0)
self.heating_rate.setObjectName("heating_rate")
self.layout_6.addWidget(self.heating_rate)
self.layout_4.addLayout(self.layout_6)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_4.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.layout_4)
self.layout_7 = QtWidgets.QHBoxLayout()
self.layout_7.setObjectName("layout_7")
self.save_temp = QtWidgets.QCheckBox(self.form_area)
self.save_temp.setObjectName("save_temp")
self.layout_7.addWidget(self.save_temp)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_7.addItem(spacerItem2)
self.verticalLayout_2.addLayout(self.layout_7)
self.horizontalLayout_8.addWidget(self.form_area)
self.buttons_area = QtWidgets.QFrame(process)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttons_area.sizePolicy().hasHeightForWidth())
self.buttons_area.setSizePolicy(sizePolicy)
self.buttons_area.setMinimumSize(QtCore.QSize(0, 0))
self.buttons_area.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.buttons_area.setFrameShape(QtWidgets.QFrame.Box)
self.buttons_area.setFrameShadow(QtWidgets.QFrame.Raised)
self.buttons_area.setObjectName("buttons_area")
self.verticalLayout = QtWidgets.QVBoxLayout(self.buttons_area)
self.verticalLayout.setContentsMargins(5, 5, 5, 5)
self.verticalLayout.setSpacing(12)
self.verticalLayout.setObjectName("verticalLayout")
self.push_button_accept = QtWidgets.QPushButton(self.buttons_area)
self.push_button_accept.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_accept.setShortcut("Return")
self.push_button_accept.setObjectName("push_button_accept")
self.verticalLayout.addWidget(self.push_button_accept)
self.push_button_cancel = QtWidgets.QPushButton(self.buttons_area)
self.push_button_cancel.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_cancel.setShortcut("Esc")
self.push_button_cancel.setObjectName("push_button_cancel")
self.verticalLayout.addWidget(self.push_button_cancel)
self.push_button_info = QtWidgets.QPushButton(self.buttons_area)
self.push_button_info.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_info.setObjectName("push_button_info")
self.verticalLayout.addWidget(self.push_button_info)
spacerItem3 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem3)
self.horizontalLayout_8.addWidget(self.buttons_area)
self.retranslateUi(process)
QtCore.QMetaObject.connectSlotsByName(process)
def retranslateUi(self, process):
_translate = QtCore.QCoreApplication.translate
process.setWindowTitle(_translate("process", "TL"))
self.final_temperature_label.setText(_translate("process", "Final Temperature (°C)"))
self.time_at_final_temp_label.setText(_translate("process", "Time at final temperature (s)"))
self.channels_label.setText(_translate("process", "Channels"))
self.heating_rate_label.setText(_translate("process", "Heating Rate (°C/s)"))
self.save_temp.setText(_translate("process", "Save Temperature"))
self.push_button_accept.setText(_translate("process", "Accept"))
self.push_button_cancel.setText(_translate("process", "Cancel"))
self.push_button_info.setText(_translate("process", "Information"))
|
carlos-ferras/Sequence-ToolKit
|
view/gensec/dialogs/processes/ui_tl.py
|
Python
|
gpl-3.0
| 8,875 | 0.001691 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <matt@sivel.net>
# (c) 2016, Justin Mayer <https://justinmayer.com/>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# =============================================================================
#
# This script is to be used with vault_password_file or --vault-password-file
# to retrieve the vault password via your OS's native keyring application.
#
# This file *MUST* be saved with executable permissions. Otherwise, Ansible
# will try to parse as a password file and display: "ERROR! Decryption failed"
#
# The `keyring` Python module is required: https://pypi.python.org/pypi/keyring
#
# By default, this script will store the specified password in the keyring of
# the user that invokes the script. To specify a user keyring, add a [vault]
# section to your ansible.cfg file with a 'username' option. Example:
#
# [vault]
# username = 'ansible-vault'
#
# Another optional setting is for the key name, which allows you to use this
# script to handle multiple project vaults with different passwords:
#
# [vault]
# keyname = 'ansible-vault-yourproject'
#
# You can configure the `vault_password_file` option in ansible.cfg:
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring.py
# ...
#
# To set your password, `cd` to your project directory and run:
#
# python /path/to/vault-keyring.py set
#
# If you choose not to configure the path to `vault_password_file` in
# ansible.cfg, your `ansible-playbook` command might look like:
#
# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import sys
import getpass
import keyring
import ansible.constants as C
def main():
(parser, config_path) = C.load_config_file()
if parser.has_option('vault', 'username'):
username = parser.get('vault', 'username')
else:
username = getpass.getuser()
if parser.has_option('vault', 'keyname'):
keyname = parser.get('vault', 'keyname')
else:
keyname = 'ansible'
if len(sys.argv) == 2 and sys.argv[1] == 'set':
intro = 'Storing password in "{}" user keyring using key name: {}\n'
sys.stdout.write(intro.format(username, keyname))
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password(keyname, username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
sys.stdout.write('{}\n'.format(keyring.get_password(keyname,
username)))
sys.exit(0)
if __name__ == '__main__':
main()
|
GustavoHennig/ansible
|
contrib/vault/vault-keyring.py
|
Python
|
gpl-3.0
| 3,430 | 0.001166 |
# Copyright (c) Microsoft Corporation 2015
from z3 import *
x = Real('x')
y = Real('y')
s = Solver()
s.add(x + y > 5, x > 1, y > 1)
print(s.check())
print(s.model())
|
sccblom/vercors
|
deps/z3/4.4.1/Windows NT/intel/bin/example.py
|
Python
|
mpl-2.0
| 178 | 0 |
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2011 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.gtkui.desktop.sync - Glue code between GTK+ UI and sync module
# Thomas Perl <thp@gpodder.org>; 2009-09-05 (based on code from gui.py)
import gtk
import threading
import gpodder
_ = gpodder.gettext
from gpodder import util
from gpodder import sync
from gpodder.liblogger import log
from gpodder.gtkui.desktop.syncprogress import gPodderSyncProgress
from gpodder.gtkui.desktop.deviceplaylist import gPodderDevicePlaylist
class gPodderSyncUI(object):
def __init__(self, config, notification, \
parent_window, show_confirmation, \
update_episode_list_icons, \
update_podcast_list_model, \
preferences_widget, \
episode_selector_class, \
commit_changes_to_database):
self._config = config
self.notification = notification
self.parent_window = parent_window
self.show_confirmation = show_confirmation
self.update_episode_list_icons = update_episode_list_icons
self.update_podcast_list_model = update_podcast_list_model
self.preferences_widget = preferences_widget
self.episode_selector_class = episode_selector_class
self.commit_changes_to_database = commit_changes_to_database
def _filter_sync_episodes(self, channels, only_downloaded=True):
"""Return a list of episodes for device synchronization
If only_downloaded is True, this will skip episodes that
have not been downloaded yet and podcasts that are marked
as "Do not synchronize to my device".
"""
episodes = []
for channel in channels:
if not channel.sync_to_devices and only_downloaded:
log('Skipping channel: %s', channel.title, sender=self)
continue
for episode in channel.get_all_episodes():
if episode.was_downloaded(and_exists=True) or \
not only_downloaded:
episodes.append(episode)
return episodes
def _show_message_unconfigured(self):
title = _('No device configured')
message = _('Please set up your device in the preferences dialog.')
self.notification(message, title, widget=self.preferences_widget)
def _show_message_cannot_open(self):
title = _('Cannot open device')
message = _('Please check the settings in the preferences dialog.')
self.notification(message, title, widget=self.preferences_widget)
def on_synchronize_episodes(self, channels, episodes=None, force_played=True):
if self._config.device_type == 'ipod' and not sync.gpod_available:
title = _('Cannot sync to iPod')
message = _('Please install python-gpod and restart gPodder.')
self.notification(message, title, important=True)
return
elif self._config.device_type == 'mtp' and not sync.pymtp_available:
title = _('Cannot sync to MTP device')
message = _('Please install libmtp and restart gPodder.')
self.notification(message, title, important=True)
return
device = sync.open_device(self._config)
if device is not None:
def after_device_sync_callback(device, successful_sync):
if device.cancelled:
log('Cancelled by user.', sender=self)
elif successful_sync:
title = _('Device synchronized')
message = _('Your device has been synchronized.')
self.notification(message, title)
else:
title = _('Error closing device')
message = _('Please check settings and permission.')
self.notification(message, title, important=True)
# Update the UI to reflect changes from the sync process
episode_urls = set()
channel_urls = set()
for episode in episodes:
episode_urls.add(episode.url)
channel_urls.add(episode.channel.url)
util.idle_add(self.update_episode_list_icons, episode_urls)
util.idle_add(self.update_podcast_list_model, channel_urls)
util.idle_add(self.commit_changes_to_database)
device.register('post-done', after_device_sync_callback)
if device is None:
return self._show_message_unconfigured()
if not device.open():
return self._show_message_cannot_open()
if self._config.device_type == 'ipod':
#update played episodes and delete if requested
for channel in channels:
if channel.sync_to_devices:
allepisodes = [e for e in channel.get_all_episodes() \
if e.was_downloaded(and_exists=True)]
device.update_played_or_delete(channel, allepisodes, \
self._config.ipod_delete_played_from_db)
if self._config.ipod_purge_old_episodes:
device.purge()
if episodes is None:
force_played = False
episodes = self._filter_sync_episodes(channels)
def check_free_space():
# "Will we add this episode to the device?"
def will_add(episode):
# If already on-device, it won't take up any space
if device.episode_on_device(episode):
return False
# Might not be synced if it's played already
if not force_played and \
self._config.only_sync_not_played and \
episode.is_played:
return False
# In all other cases, we expect the episode to be
# synchronized to the device, so "answer" positive
return True
# "What is the file size of this episode?"
def file_size(episode):
filename = episode.local_filename(create=False)
if filename is None:
return 0
return util.calculate_size(str(filename))
# Calculate total size of sync and free space on device
total_size = sum(file_size(e) for e in episodes if will_add(e))
free_space = max(device.get_free_space(), 0)
if total_size > free_space:
title = _('Not enough space left on device')
message = _('You need to free up %s.\nDo you want to continue?') \
% (util.format_filesize(total_size-free_space),)
if not self.show_confirmation(message, title):
device.cancel()
device.close()
return
# Finally start the synchronization process
gPodderSyncProgress(self.parent_window, device=device)
def sync_thread_func():
device.add_tracks(episodes, force_played=force_played)
device.close()
threading.Thread(target=sync_thread_func).start()
# This function is used to remove files from the device
def cleanup_episodes():
# 'only_sync_not_played' must be used or else all the
# played tracks will be copied then immediately deleted
if self._config.mp3_player_delete_played and \
self._config.only_sync_not_played:
all_episodes = self._filter_sync_episodes(channels, \
only_downloaded=False)
episodes_on_device = device.get_all_tracks()
for local_episode in all_episodes:
episode = device.episode_on_device(local_episode)
if episode is None:
continue
if local_episode.state == gpodder.STATE_DELETED \
or (local_episode.is_played and \
not local_episode.is_locked):
log('Removing episode from device: %s',
episode.title, sender=self)
device.remove_track(episode)
# When this is done, start the callback in the UI code
util.idle_add(check_free_space)
# This will run the following chain of actions:
# 1. Remove old episodes (in worker thread)
# 2. Check for free space (in UI thread)
# 3. Sync the device (in UI thread)
threading.Thread(target=cleanup_episodes).start()
def on_cleanup_device(self):
columns = (
('title', None, None, _('Episode')),
('podcast', None, None, _('Podcast')),
('filesize', 'length', int, _('Size')),
('modified', 'modified_sort', int, _('Copied')),
('playcount_str', 'playcount', int, _('Play count')),
('released', None, None, _('Released')),
)
device = sync.open_device(self._config)
if device is None:
return self._show_message_unconfigured()
if not device.open():
return self._show_message_cannot_open()
tracks = device.get_all_tracks()
if tracks:
def remove_tracks_callback(tracks):
title = _('Delete podcasts from device?')
message = _('Do you really want to remove these episodes from your device? Episodes in your library will not be deleted.')
if tracks and self.show_confirmation(message, title):
gPodderSyncProgress(self.parent_window, device=device)
def cleanup_thread_func():
device.remove_tracks(tracks)
if not device.close():
title = _('Error closing device')
message = _('There has been an error closing your device.')
self.notification(message, title, important=True)
threading.Thread(target=cleanup_thread_func).start()
wanted_columns = []
for key, sort_name, sort_type, caption in columns:
want_this_column = False
for track in tracks:
if getattr(track, key) is not None:
want_this_column = True
break
if want_this_column:
wanted_columns.append((key, sort_name, sort_type, caption))
title = _('Remove podcasts from device')
instructions = _('Select episodes to remove from your device.')
self.episode_selector_class(self.parent_window, title=title, \
instructions=instructions, \
episodes=tracks, columns=wanted_columns, \
stock_ok_button=gtk.STOCK_DELETE, \
callback=remove_tracks_callback, \
tooltip_attribute=None, \
_config=self._config)
else:
device.close()
title = _('No files on device')
message = _('The devices contains no files to be removed.')
self.notification(message, title)
def on_manage_device_playlist(self):
if self._config.device_type == 'ipod' and not sync.gpod_available:
title = _('Cannot manage iPod playlist')
message = _('This feature is not available for iPods.')
self.notification(message, title)
return
elif self._config.device_type == 'mtp' and not sync.pymtp_available:
title = _('Cannot manage MTP device playlist')
message = _('This feature is not available for MTP devices.')
self.notification(message, title)
return
device = sync.open_device(self._config)
if device is None:
return self._show_message_unconfigured()
if not device.open():
return self._show_message_cannot_open()
gPodderDevicePlaylist(self.parent_window, \
device=device, \
_config=self._config)
device.close()
|
elelay/gPodderAsRSSReader
|
src/gpodder/gtkui/desktop/sync.py
|
Python
|
gpl-3.0
| 13,131 | 0.003427 |
import re
from modularodm import Q
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import PermissionDenied, ValidationError, NotFound, MethodNotAllowed, NotAuthenticated
from rest_framework.status import HTTP_204_NO_CONTENT
from rest_framework.response import Response
from framework.auth.oauth_scopes import CoreScopes
from framework.postcommit_tasks.handlers import enqueue_postcommit_task
from api.base import generic_bulk_views as bulk_views
from api.base import permissions as base_permissions
from api.base.exceptions import InvalidModelValueError, JSONAPIException, Gone
from api.base.filters import ODMFilterMixin, ListFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.parsers import (
JSONAPIRelationshipParser,
JSONAPIRelationshipParserForRegularJSON,
JSONAPIMultipleRelationshipsParser,
JSONAPIMultipleRelationshipsParserForRegularJSON,
)
from api.base.exceptions import RelationshipPostMakesNoChanges, EndpointNotImplementedError
from api.base.pagination import CommentPagination, NodeContributorPagination, MaxSizePagination
from api.base.utils import get_object_or_error, is_bulk_request, get_user_auth, is_truthy
from api.base.settings import ADDONS_OAUTH, API_BASE
from api.caching.tasks import ban_url
from api.addons.views import AddonSettingsMixin
from api.files.serializers import FileSerializer
from api.comments.serializers import NodeCommentSerializer, CommentCreateSerializer
from api.comments.permissions import CanCommentOrPublic
from api.users.views import UserMixin
from api.wikis.serializers import NodeWikiSerializer
from api.base.views import LinkedNodesRelationship, BaseContributorDetail, BaseContributorList, BaseNodeLinksDetail, BaseNodeLinksList, BaseLinkedList
from api.base.throttling import (
UserRateThrottle,
NonCookieAuthThrottle,
AddContributorThrottle,
)
from api.nodes.filters import NodePreprintsFilterMixin
from api.nodes.serializers import (
NodeSerializer,
ForwardNodeAddonSettingsSerializer,
NodeAddonSettingsSerializer,
NodeLinksSerializer,
NodeForksSerializer,
NodeDetailSerializer,
NodeProviderSerializer,
DraftRegistrationSerializer,
DraftRegistrationDetailSerializer,
NodeContributorsSerializer,
NodeContributorDetailSerializer,
NodeInstitutionsRelationshipSerializer,
NodeAlternativeCitationSerializer,
NodeContributorsCreateSerializer,
NodeViewOnlyLinkSerializer,
NodeViewOnlyLinkUpdateSerializer,
NodeCitationSerializer,
NodeCitationStyleSerializer
)
from api.nodes.utils import get_file_object
from api.citations.utils import render_citation
from api.addons.serializers import NodeAddonFolderSerializer
from api.registrations.serializers import RegistrationSerializer
from api.institutions.serializers import InstitutionSerializer
from api.identifiers.serializers import NodeIdentifierSerializer
from api.identifiers.views import IdentifierList
from api.nodes.permissions import (
IsAdmin,
IsPublic,
AdminOrPublic,
ContributorOrPublic,
RegistrationAndPermissionCheckForPointers,
ContributorDetailPermissions,
ReadOnlyIfRegistration,
IsAdminOrReviewer,
WriteOrPublicForRelationshipInstitutions,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
from api.logs.serializers import NodeLogSerializer
from api.preprints.serializers import PreprintSerializer
from website.addons.wiki.model import NodeWikiPage
from website.exceptions import NodeStateError
from website.util.permissions import ADMIN
from website.models import Node, Pointer, Comment, NodeLog, Institution, DraftRegistration, PrivateLink, PreprintService
from website.files.models import FileNode
from framework.auth.core import User
from api.base.utils import default_node_list_query, default_node_permission_query
class NodeMixin(object):
"""Mixin with convenience methods for retrieving the current node based on the
current URL. By default, fetches the current node based on the node_id kwarg.
"""
serializer_class = NodeSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
Node,
self.kwargs[self.node_lookup_url_kwarg],
display_name='node'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a collection through a node endpoint, we return a 404
if node.is_collection or node.is_registration:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class DraftMixin(object):
serializer_class = DraftRegistrationSerializer
def get_draft(self, draft_id=None):
node_id = self.kwargs['node_id']
if draft_id is None:
draft_id = self.kwargs['draft_id']
draft = get_object_or_error(DraftRegistration, draft_id)
if not draft.branched_from._id == node_id:
raise ValidationError('This draft registration is not created from the given node.')
if self.request.method not in drf_permissions.SAFE_METHODS:
registered_and_deleted = draft.registered_node and draft.registered_node.is_deleted
if draft.registered_node and not draft.registered_node.is_deleted:
raise PermissionDenied('This draft has already been registered and cannot be modified.')
if draft.is_pending_review:
raise PermissionDenied('This draft is pending review and cannot be modified.')
if draft.requires_approval and draft.is_approved and (not registered_and_deleted):
raise PermissionDenied('This draft has already been approved and cannot be modified.')
self.check_object_permissions(self.request, draft)
return draft
class WaterButlerMixin(object):
path_lookup_url_kwarg = 'path'
provider_lookup_url_kwarg = 'provider'
def get_file_item(self, item):
attrs = item['attributes']
file_node = FileNode.resolve_class(
attrs['provider'],
FileNode.FOLDER if attrs['kind'] == 'folder'
else FileNode.FILE
).get_or_create(self.get_node(check_object_permissions=False), attrs['path'])
file_node.update(None, attrs, user=self.request.user)
self.check_object_permissions(self.request, file_node)
return file_node
def fetch_from_waterbutler(self):
node = self.get_node(check_object_permissions=False)
path = self.kwargs[self.path_lookup_url_kwarg]
provider = self.kwargs[self.provider_lookup_url_kwarg]
return self.get_file_object(node, path, provider)
def get_file_object(self, node, path, provider, check_object_permissions=True):
obj = get_file_object(node=node, path=path, provider=provider, request=self.request)
if provider == 'osfstorage':
if check_object_permissions:
self.check_object_permissions(self.request, obj)
return obj
class NodeList(JSONAPIBaseView, bulk_views.BulkUpdateJSONAPIView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, NodePreprintsFilterMixin, WaterButlerMixin):
"""Nodes that represent projects and components. *Writeable*.
Paginated list of nodes ordered by their `date_modified`. Each resource contains the full representation of the
node, meaning additional requests to an individual node's detail view are not necessary. Registrations and withdrawn
registrations cannot be accessed through this endpoint (see registration endpoints instead).
<!--- Copied Spiel from NodeDetail -->
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
preprint boolean is this a preprint?
collection boolean is this a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Creating New Nodes
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"attributes": {
"title": {title}, # required
"category": {category}, # required
"description": {description}, # optional
"tags": [{tag1}, {tag2}], # optional
"public": true|false # optional
"template_from": {node_id} # optional
}
}
}
Success: 201 CREATED + node representation
New nodes are created by issuing a POST request to this endpoint. The `title` and `category` fields are
mandatory. `category` must be one of the [permitted node categories](/v2/#osf-node-categories). `public` defaults
to false. All other fields not listed above will be ignored. If the node creation is successful the API will
return a 201 response with the representation of the new node in the body. For the new node's canonical URL, see
the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
+ `view_only=<Str>` -- Allow users with limited access keys to access this node. Note that some keys are anonymous,
so using the view_only key will cause user-related information to no longer serialize. This includes blank ids for
users and contributors and missing serializer fields and relationships.
Nodes may be filtered by their `id`, `title`, `category`, `description`, `public`, `tags`, `date_created`, `date_modified`,
`root`, `parent`, 'preprint', and `contributors`. Most are string fields and will be filtered using simple substring matching. `public`
and `preprint` are boolean values, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true`
or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
model_class = Node
serializer_class = NodeSerializer
view_category = 'nodes'
view_name = 'node-list'
ordering = ('-date_modified', ) # default ordering
# overrides NodePreprintsFilterMixin
def get_default_odm_query(self):
user = self.request.user
base_query = default_node_list_query()
permissions_query = default_node_permission_query(user)
return base_query & permissions_query
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
query = Q('_id', 'in', [node['id'] for node in self.request.data])
auth = get_user_auth(self.request)
nodes = Node.find(query)
# If skip_uneditable=True in query_params, skip nodes for which the user
# does not have EDIT permissions.
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
has_permission = []
for node in nodes:
if node.can_edit(auth):
has_permission.append(node)
query = Q('_id', 'in', [node._id for node in has_permission])
return Node.find(query)
for node in nodes:
if not node.can_edit(auth):
raise PermissionDenied
return nodes
else:
query = self.get_query_from_request()
return Node.find(query)
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView, BulkDestroyJSONAPIView
def get_serializer_class(self):
"""
Use NodeDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH', 'DELETE'):
return NodeDetailSerializer
else:
return NodeSerializer
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
"""Create a node.
:param serializer:
"""
# On creation, make sure that current user is the creator
user = self.request.user
serializer.save(creator=user)
# overrides BulkDestroyJSONAPIView
def allow_bulk_destroy_resources(self, user, resource_list):
"""User must have admin permissions to delete nodes."""
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
return any([node.has_permission(user, ADMIN) for node in resource_list])
return all([node.has_permission(user, ADMIN) for node in resource_list])
def bulk_destroy_skip_uneditable(self, resource_object_list, user, object_type):
"""
If skip_uneditable=True in query_params, skip the resources for which the user does not have
admin permissions and delete the remaining resources
"""
allowed = []
skipped = []
if not is_truthy(self.request.query_params.get('skip_uneditable', False)):
return None
for resource in resource_object_list:
if resource.has_permission(user, ADMIN):
allowed.append(resource)
else:
skipped.append({'id': resource._id, 'type': object_type})
return {'skipped': skipped, 'allowed': allowed}
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
try:
instance.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
instance.save()
class NodeDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, NodeMixin, WaterButlerMixin):
"""Details about a given node (project or component). *Writeable*.
A registration or withdrawn registration cannot be accessed through this endpoint. See Registration Detail endpoint.
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
###Permissions
Nodes that are made public will give read-only access to everyone. Private nodes require explicit read
permission. Write and admin access are the same for public and private nodes. Administrators on a parent node have
implicit read permissions for all child nodes.
##Attributes
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
collection boolean is this a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
##Relationships
###Children
List of nodes that are children of this node. New child nodes may be added through this endpoint.
###Comments
List of comments on this node. New comments can be left on the node through this endpoint.
###Contributors
List of users who are contributors to this node. Contributors may have "read", "write", or "admin" permissions.
A node must always have at least one "admin" contributor. Contributors may be added via this endpoint.
###Draft Registrations
List of draft registrations of the current node.
###Files
List of top-level folders (actually cloud-storage providers) associated with this node. This is the starting point
for accessing the actual files stored within this node.
###Forked From
If this node was forked from another node, the canonical endpoint of the node that was forked from will be
available in the `/forked_from/links/related/href` key. Otherwise, it will be null.
###Logs
List of read-only log actions pertaining to the node.
###Node Links
List of links (pointers) to other nodes on the OSF. Node links can be added through this endpoint.
###Parent
If this node is a child node of another node, the parent's canonical endpoint will be available in the
`/parent/links/related/href` key. Otherwise, it will be null.
###Registrations
List of registrations of the current node.
###Root
Returns the top-level node associated with the current node. If the current node is the top-level node, the root is
the current node.
##Links
self: the canonical api endpoint of this node
html: this node's page on the OSF website
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"id": {node_id}, # required
"attributes": {
"title": {title}, # mandatory
"category": {category}, # mandatory
"description": {description}, # optional
"tags": [{tag1}, {tag2}], # optional
"public": true|false # optional
}
}
}
Success: 200 OK + node representation
To update a node, issue either a PUT or a PATCH request against the `/links/self` URL. The `title` and `category`
fields are mandatory if you PUT and optional if you PATCH. The `tags` parameter must be an array of strings.
Non-string values will be accepted and stringified, but we make no promises about the stringification output. So
don't do that.
###Delete
Method: DELETE
URL: /links/self
Params: <none>
Success: 204 No Content
To delete a node, issue a DELETE request against `/links/self`. A successful delete will return a 204 No Content
response. Attempting to delete a node you do not own will result in a 403 Forbidden.
##Query Params
+ `view_only=<Str>` -- Allow users with limited access keys to access this node. Note that some keys are anonymous, so using the view_only key will cause user-related information to no longer serialize. This includes blank ids for users and contributors and missing serializer fields and relationships.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
serializer_class = NodeDetailSerializer
view_category = 'nodes'
view_name = 'node-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
return self.get_node()
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_object()
try:
node.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
node.save()
class NodeContributorsList(BaseContributorList, bulk_views.BulkUpdateJSONAPIView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, NodeMixin):
"""Contributors (users) for a node.
Contributors are users who can make changes to the node or, in the case of private nodes,
have read access to the node. Contributors are divided between 'bibliographic' and 'non-bibliographic'
contributors. From a permissions standpoint, both are the same, but bibliographic contributors
are included in citations, while non-bibliographic contributors are not included in citations.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed and the id for
the contributor will be an empty string.
##Node Contributor Attributes
<!--- Copied Attributes from NodeContributorDetail -->
`type` is "contributors"
name type description
======================================================================================================
bibliographic boolean Whether the user will be included in citations for this node. Default is true.
permission string User permission level. Must be "read", "write", or "admin". Default is "write".
unregistered_contributor string Contributor's assigned name if contributor hasn't yet claimed account
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
###Users
This endpoint shows the contributor user detail and is automatically embedded.
##Actions
###Adding Contributors
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "contributors", # required
"attributes": {
"bibliographic": true|false, # optional
"permission": "read"|"write"|"admin" # optional
},
"relationships": {
"users": {
"data": {
"type": "users", # required
"id": "{user_id}" # required
}
}
}
}
}
Success: 201 CREATED + node contributor representation
Add a contributor to a node by issuing a POST request to this endpoint. This effectively creates a relationship
between the node and the user. Besides the top-level type, there are optional "attributes" which describe the
relationship between the node and the user. `bibliographic` is a boolean and defaults to `true`. `permission` must
be a [valid OSF permission key](/v2/#osf-node-permission-keys) and defaults to `"write"`. A relationship object
with a "data" member, containing the user `type` and user `id` must be included. The id must be a valid user id.
All other fields not listed above will be ignored. If the request is successful the API will return
a 201 response with the representation of the new node contributor in the body. For the new node contributor's
canonical URL, see the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
NodeContributors may be filtered by `bibliographic`, or `permission` attributes. `bibliographic` is a boolean, and
can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true` or `false` in
the query will cause the match to fail regardless.
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` of the user entities so that it points to
the user's profile image scaled to the given size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
permission_classes = (
AdminOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.NODE_CONTRIBUTORS_WRITE]
model_class = User
throttle_classes = (AddContributorThrottle, UserRateThrottle, NonCookieAuthThrottle, )
pagination_class = NodeContributorPagination
serializer_class = NodeContributorsSerializer
view_category = 'nodes'
view_name = 'node-contributors'
ordering = ('index',) # default ordering
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView, BulkDeleteJSONAPIView
def get_serializer_class(self):
"""
Use NodeContributorDetailSerializer which requires 'id'
"""
if self.request.method == 'PUT' or self.request.method == 'PATCH' or self.request.method == 'DELETE':
return NodeContributorDetailSerializer
elif self.request.method == 'POST':
return NodeContributorsCreateSerializer
else:
return NodeContributorsSerializer
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView
def get_queryset(self):
queryset = self.get_queryset_from_request()
# If bulk request, queryset only contains contributors in request
if is_bulk_request(self.request):
contrib_ids = []
for item in self.request.data:
try:
contrib_ids.append(item['id'].split('-')[1])
except AttributeError:
raise ValidationError('Contributor identifier not provided.')
except IndexError:
raise ValidationError('Contributor identifier incorrectly formatted.')
queryset[:] = [contrib for contrib in queryset if contrib._id in contrib_ids]
return queryset
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
if len(node.visible_contributors) == 1 and node.get_visible(instance):
raise ValidationError('Must have at least one visible contributor')
if instance not in node.contributors:
raise NotFound('User cannot be found in the list of contributors.')
removed = node.remove_contributor(instance, auth)
if not removed:
raise ValidationError('Must have at least one registered admin contributor')
# Overrides BulkDestroyJSONAPIView
def get_requested_resources(self, request, request_data):
requested_ids = []
for data in request_data:
try:
requested_ids.append(data['id'].split('-')[1])
except IndexError:
raise ValidationError('Contributor identifier incorrectly formatted.')
resource_object_list = User.find(Q('_id', 'in', requested_ids))
for resource in resource_object_list:
if getattr(resource, 'is_deleted', None):
raise Gone
if len(resource_object_list) != len(request_data):
raise ValidationError({'non_field_errors': 'Could not find all objects to delete.'})
return resource_object_list
class NodeContributorDetail(BaseContributorDetail, generics.RetrieveUpdateDestroyAPIView, NodeMixin, UserMixin):
"""Detail of a contributor for a node. *Writeable*.
Contributors are users who can make changes to the node or, in the case of private nodes,
have read access to the node. Contributors are divided between 'bibliographic' and 'non-bibliographic'
contributors. From a permissions standpoint, both are the same, but bibliographic contributors
are included in citations, while non-bibliographic contributors are not included in citations.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed and the id for
the contributor will be an empty string.
Contributors can be viewed, removed, and have their permissions and bibliographic status changed via this
endpoint.
##Attributes
`type` is "contributors"
name type description
======================================================================================================
bibliographic boolean Whether the user will be included in citations for this node. Default is true.
permission string User permission level. Must be "read", "write", or "admin". Default is "write".
unregistered_contributor string Contributor's assigned name if contributor hasn't yet claimed account
index integer The position in the list of contributors reflected in the bibliography. Zero Indexed.
##Relationships
###Users
This endpoint shows the contributor user detail.
##Links
self: the canonical api endpoint of this contributor
html: the contributing user's page on the OSF website
profile_image: a url to the contributing user's profile image
##Actions
###Update Contributor
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "contributors", # required
"id": {contributor_id}, # required
"attributes": {
"bibliographic": true|false, # optional
"permission": "read"|"write"|"admin" # optional
"index": "0" # optional
}
}
}
Success: 200 OK + node representation
To update a contributor's bibliographic preferences, order in the bibliography,
or access permissions for the node, issue a PUT request to the
`self` link. Since this endpoint has no mandatory attributes, PUT and PATCH are functionally the same. If the given
user is not already in the contributor list, a 404 Not Found error will be returned. A node must always have at
least one admin, and any attempt to downgrade the permissions of a sole admin will result in a 400 Bad Request
error.
###Remove Contributor
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a contributor from a node, issue a DELETE request to the `self` link. Attempting to remove the only admin
from a node will result in a 400 Bad Request response. This request will only remove the relationship between the
node and the user, not the user itself.
##Query Params
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` so that it points the image scaled to the given
size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.NODE_CONTRIBUTORS_WRITE]
serializer_class = NodeContributorDetailSerializer
view_category = 'nodes'
view_name = 'node-contributor-detail'
# overrides DestroyAPIView
def perform_destroy(self, instance):
node = self.get_node()
auth = get_user_auth(self.request)
if len(node.visible_contributors) == 1 and node.get_visible(instance):
raise ValidationError('Must have at least one visible contributor')
removed = node.remove_contributor(instance, auth)
if not removed:
raise ValidationError('Must have at least one registered admin contributor')
class NodeDraftRegistrationsList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin):
"""Draft registrations of the current node.
<!--- Copied partially from NodeDraftRegistrationDetail -->
Draft registrations contain the supplemental registration questions that accompany a registration. A registration
is a frozen version of the project that can never be edited or deleted but can be withdrawn.
Your original project remains editable but will now have the registration linked to it.
###Permissions
Users must have admin permission on the node in order to view or create a draft registration.
##Draft Registration Attributes
Draft Registrations have the "draft_registrations" `type`.
name type description
===========================================================================
registration_supplement string id of registration_schema, must be an active schema
registration_metadata dictionary dictionary of question ids and responses from registration schema
datetime_initiated iso8601 timestamp timestamp that the draft was created
datetime_updated iso8601 timestamp timestamp when the draft was last updated
##Relationships
###Branched From
Node that the draft is branched from. The node endpoint is available in `/branched_from/links/related/href`.
###Initiator
User who initiated the draft registration. The user endpoint is available in `/initiator/links/related/href`.
##Registration Schema
Detailed registration schema. The schema endpoint is available in `/registration_schema/links/related/href`.
##Actions
###Create Draft Registration
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "draft_registrations", # required
"attributes": {
"registration_supplement": {schema_id}, # required
"registration_metadata": {"question_id": {"value": "question response"}} # optional
}
}
}
Success: 201 OK + draft representation
To create a draft registration, issue a POST request to the `self` link. Registration supplement must be the id of an
active registration schema. Registration metadata is not required on the creation of the draft. If registration metadata is included,
it must be a dictionary with keys as question ids in the registration supplement, and values as nested dictionaries
matching the specific format in the registration schema. See registration schema endpoints for specifics. If question
is multiple-choice, question response must exactly match one of the possible choices.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
#This request/response
"""
permission_classes = (
IsAdmin,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_WRITE]
serializer_class = DraftRegistrationSerializer
view_category = 'nodes'
view_name = 'node-draft-registrations'
# overrides ListCreateAPIView
def get_queryset(self):
node = self.get_node()
drafts = DraftRegistration.find(Q('branched_from', 'eq', node))
return [draft for draft in drafts if not draft.registered_node or draft.registered_node.is_deleted]
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
user = self.request.user
serializer.save(initiator=user, node=self.get_node())
class NodeDraftRegistrationDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, DraftMixin):
"""Details about a given draft registration. *Writeable*.
Draft registrations contain the supplemental registration questions that accompany a registration. A registration
is a frozen version of the project that can never be edited or deleted but can be withdrawn. Answer the questions
in the draft registration with PUT/PATCH requests until you are ready to submit. Final submission will include sending the
draft registration id as part of a POST request to the Node Registrations endpoint.
###Permissions
Users must have admin permission on the node in order to view, update, or delete a draft registration.
##Attributes
Draft Registrations have the "draft_registrations" `type`.
name type description
===========================================================================
registration_supplement string id of registration_schema, must be an active schema
registration_metadata dictionary dictionary of question ids and responses from registration schema
datetime_initiated iso8601 timestamp timestamp that the draft was created
datetime_updated iso8601 timestamp timestamp when the draft was last updated
##Relationships
###Branched From
Node that the draft is branched from. The node endpoint is available in `/branched_from/links/related/href`.
###Initiator
User who initiated the draft registration. The user endpoint is available in `/initiator/links/related/href`.
##Registration Schema
Detailed registration schema. The schema endpoint is available in `/registration_schema/links/related/href`.
##Actions
###Update Draft Registration
Method: PUT/PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"id": {draft_registration_id}, # required
"type": "draft_registrations", # required
"attributes": {
"registration_metadata": {"question_id": {"value": "question response"}} # optional
}
}
}
Success: 200 OK + draft representation
To update a draft registration, issue a PUT/PATCH request to the `self` link. Registration supplement cannot be updated
after the draft registration has been created. Registration metadata is required. It must be a dictionary with
keys as question ids in the registration form, and values as nested dictionaries matching the specific format in the
registration schema. See registration schema endpoints for specifics. If question is multiple-choice, question response
must exactly match one of the possible choices.
###Delete Draft Registration
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To delete a draft registration, issue a DELETE request to the `self` link. This request will remove the draft completely.
A draft that has already been registered cannot be deleted.
##Query Params
+ `view_only=<Str>` -- Allow users with limited access keys to access this node. Note that some keys are anonymous,
so using the view_only key will cause user-related information to no longer serialize. This includes blank ids for users and contributors and missing serializer fields and relationships.
#This Request/Response
"""
permission_classes = (
IsAdminOrReviewer,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_DRAFT_REGISTRATIONS_WRITE]
serializer_class = DraftRegistrationDetailSerializer
view_category = 'nodes'
view_name = 'node-draft-registration-detail'
def get_object(self):
return self.get_draft()
def perform_destroy(self, draft):
DraftRegistration.remove_one(draft)
class NodeRegistrationsList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin, DraftMixin):
"""Registrations of the current node.
Registrations are read-only snapshots of a project that can never be edited or deleted but can be withdrawn. This view
is a list of all the registrations and withdrawn registrations of the current node. To create a registration, first
create a draft registration and answer the required supplemental registration questions. Then, submit a POST request
to this endpoint with the draft registration id in the body of the request.
<!--- Copied from RegistrationList -->
A withdrawn registration will display a limited subset of information, namely, title, description,
date_created, registration, withdrawn, date_registered, withdrawal_justification, and registration supplement. All
other fields will be displayed as null. Additionally, the only relationships permitted to be accessed for a withdrawn
registration are the contributors - other relationships will return a 403. Each resource contains the full representation
of the registration, meaning additional requests to an individual registrations's detail view are not necessary.
<!--- Copied Attributes from RegistrationList -->
##Registration Attributes
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean is this node a registration? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Actions
###Create Registration
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "registrations", # required
"attributes": {
"draft_registration": {draft_registration_id}, # required, write-only
"registration_choice": one of ['embargo', 'immediate'], # required, write-only
"lift_embargo": format %Y-%m-%dT%H:%M:%S' # required if registration_choice is 'embargo'
}
}
}
Success: 201 OK + draft representation
To create a registration, issue a POST request to the `self` link. 'draft_registration' must be the id of a completed
draft registration created for the current node. All required supplemental questions in the draft registration must
have been answered. Registration choice should be 'embargo' if you wish to add an embargo date to the registration.
Registrations can have embargo periods for up to four years. 'lift_embargo' should be the embargo end date.
When the embargo expires, the registration will be made public. If 'immediate' is selected as the "registration_choice",
the registration will be made public once it is approved.
##Relationships
###Registered from
The registration is branched from this node.
###Registered by
The registration was initiated by this user.
##Registration Schema
Detailed registration schema. The schema endpoint is available in `/registration_schema/links/related/href`.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
#This request/response
"""
permission_classes = (
AdminOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'nodes'
view_name = 'node-registrations'
# overrides ListCreateAPIView
# TODO: Filter out withdrawals by default
def get_queryset(self):
nodes = self.get_node().registrations_all
auth = get_user_auth(self.request)
registrations = [node for node in nodes if node.can_view(auth)]
return registrations
# overrides ListCreateJSONAPIView
def perform_create(self, serializer):
"""Create a registration from a draft.
"""
# On creation, make sure that current user is the creator
draft_id = self.request.data.get('draft_registration', None)
draft = self.get_draft(draft_id)
serializer.save(draft=draft)
class NodeChildrenList(JSONAPIBaseView, bulk_views.ListBulkCreateJSONAPIView, NodeMixin, NodePreprintsFilterMixin):
"""Children of the current node. *Writeable*.
This will get the next level of child nodes for the selected node if the current user has read access for those
nodes. Creating a node via this endpoint will behave the same as the [node list endpoint](/v2/nodes/), but the new
node will have the selected node set as its parent.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
collection boolean is this a collection? (always false - may be deprecated in future versions)
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create Child Node
<!--- Copied Creating New Node from NodeList -->
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"attributes": {
"title": {title}, # required
"category": {category}, # required
"description": {description}, # optional
"tags": [{tag1}, {tag2}] # optional
}
}
}
Success: 201 CREATED + node representation
To create a child node of the current node, issue a POST request to this endpoint. The `title` and `category`
fields are mandatory. `category` must be one of the [permitted node categories](/v2/#osf-node-categories). If the
node creation is successful the API will return a 201 response with the representation of the new node in the body.
For the new node's canonical URL, see the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `id`, `title`, `category`, `description`, `public`, `tags`, `date_created`, `date_modified`,
`root`, `parent`, and `contributors`. Most are string fields and will be filtered using simple substring matching. `public`
is a boolean, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true`
or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_CHILDREN_READ]
required_write_scopes = [CoreScopes.NODE_CHILDREN_WRITE]
serializer_class = NodeSerializer
view_category = 'nodes'
view_name = 'node-children'
# overrides NodePreprintsFilterMixin
def get_default_odm_query(self):
return default_node_list_query()
# overrides ListBulkCreateJSONAPIView
def get_queryset(self):
node = self.get_node()
req_query = self.get_query_from_request()
query = (
Q('_id', 'in', [e._id for e in node.nodes if e.primary]) &
req_query
)
nodes = Node.find(query)
auth = get_user_auth(self.request)
return sorted([each for each in nodes if each.can_view(auth)], key=lambda n: n.date_modified, reverse=True)
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
user = self.request.user
serializer.save(creator=user, parent=self.get_node())
class NodeCitationDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeMixin):
""" The node citation for a node in CSL format *read only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##NodeCitationDetail Attributes
name type description
=================================================================================
id string unique ID for the citation
title string title of project or component
author list list of authors for the work
publisher string publisher - most always 'Open Science Framework'
type string type of citation - web
doi string doi of the resource
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeCitationSerializer
view_category = 'nodes'
view_name = 'node-citation'
def get_object(self):
node = self.get_node()
auth = get_user_auth(self.request)
if not node.is_public and not node.can_view(auth):
raise PermissionDenied if auth.user else NotAuthenticated
return node.csl
class NodeCitationStyleDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeMixin):
""" The node citation for a node in a specific style's format *read only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##NodeCitationDetail Attributes
name type description
=================================================================================
citation string complete citation for a node in the given style
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeCitationStyleSerializer
view_category = 'nodes'
view_name = 'node-citation'
def get_object(self):
node = self.get_node()
auth = get_user_auth(self.request)
if not node.is_public and not node.can_view(auth):
raise PermissionDenied if auth.user else NotAuthenticated
style = self.kwargs.get('style_id')
try:
citation = render_citation(node=node, style=style)
except ValueError as err: # style requested could not be found
csl_name = re.findall('[a-zA-Z]+\.csl', err.message)[0]
raise NotFound('{} is not a known style.'.format(csl_name))
return {'citation': citation, 'id': style}
# TODO: Make NodeLinks filterable. They currently aren't filterable because we have can't
# currently query on a Pointer's node's attributes.
# e.g. Pointer.find(Q('node.title', 'eq', ...)) doesn't work
class NodeLinksList(BaseNodeLinksList, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, NodeMixin):
"""Node Links to other nodes. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
`type` is "node_links"
None
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
### Target Node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Adding Node Links
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
"relationships": {
"nodes": {
"data": {
"type": "nodes", # required
"id": "{target_node_id}", # required
}
}
}
}
}
Success: 201 CREATED + node link representation
To add a node link (a pointer to another node), issue a POST request to this endpoint. This effectively creates a
relationship between the node and the target node. The target node must be described as a relationship object with
a "data" member, containing the nodes `type` and the target node `id`.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
model_class = Pointer
serializer_class = NodeLinksSerializer
view_category = 'nodes'
view_name = 'node-pointers'
def get_queryset(self):
return [
pointer for pointer in
self.get_node().nodes_pointer
if not pointer.node.is_deleted
]
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = get_object_or_error(
Node,
self.kwargs[self.node_lookup_url_kwarg],
display_name='node'
)
if node.is_registration:
raise MethodNotAllowed(method=self.request.method)
node = self.get_node()
try:
node.rm_pointer(instance, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise ValidationError(err.message)
node.save()
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(NodeLinksList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeLinksDetail(BaseNodeLinksDetail, generics.RetrieveDestroyAPIView, NodeMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
`type` is "node_links"
None
##Links
*None*
##Relationships
###Target node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Remove Node Link
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a node link from a node, issue a DELETE request to the `self` link. This request will remove the
relationship between the node and the target node, not the nodes themselves.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
base_permissions.TokenHasScope,
drf_permissions.IsAuthenticatedOrReadOnly,
RegistrationAndPermissionCheckForPointers,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = NodeLinksSerializer
view_category = 'nodes'
view_name = 'node-pointer-detail'
node_link_lookup_url_kwarg = 'node_link_id'
# overrides RetrieveAPIView
def get_object(self):
node_link = get_object_or_error(
Pointer,
self.kwargs[self.node_link_lookup_url_kwarg],
'node link'
)
self.check_object_permissions(self.request, node_link)
return node_link
# overrides DestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
pointer = self.get_object()
try:
node.rm_pointer(pointer, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise NotFound(err.message)
node.save()
class NodeForksList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin, NodePreprintsFilterMixin):
"""Forks of the current node. *Writeable*.
Paginated list of the current node's forks ordered by their `forked_date`. Forks are copies of projects that you can
change without affecting the original project. When creating a fork, your fork will will only contain public components or those
for which you are a contributor. Private components that you do not have access to will not be forked.
##Node Fork Attributes
<!--- Copied Attributes from NodeDetail with exception of forked_date-->
OSF Node Fork entities have the "nodes" `type`.
name type description
===============================================================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered? (always False)
collection boolean is this node a collection (always False)
fork boolean is this node a fork of another node? (always True)
public boolean has this node been made publicly-visible?
forked_date iso8601 timestamp timestamp when the node was forked
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings List of strings representing the permissions for the current user on this node
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create Node Fork
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"attributes": {
"title": {title} # optional
}
}
}
Success: 201 CREATED + node representation
To create a fork of the current node, issue a POST request to this endpoint. The `title` field is optional, with the
default title being 'Fork of ' + the current node's title. If the fork's creation is successful the API will return a
201 response with the representation of the forked node in the body. For the new fork's canonical URL, see the `/links/self`
field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, `tags`, `date_created`,
`date_modified`, `root`, `parent`, and `contributors`. Most are string fields and will be filtered using simple
substring matching. Others are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`.
Note that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
IsPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_FORKS_READ, CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_FORKS_WRITE]
serializer_class = NodeForksSerializer
view_category = 'nodes'
view_name = 'node-forks'
# overrides ListCreateAPIView
def get_queryset(self):
all_forks = self.get_node().forks.sort('-forked_date')
auth = get_user_auth(self.request)
return [node for node in all_forks if node.can_view(auth)]
# overrides ListCreateAPIView
def perform_create(self, serializer):
serializer.save(node=self.get_node())
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that attributes are not required in request
"""
res = super(NodeForksList, self).get_parser_context(http_request)
res['attributes_required'] = False
return res
class NodeFilesList(JSONAPIBaseView, generics.ListAPIView, WaterButlerMixin, ListFilterMixin, NodeMixin):
"""Files attached to a node for a given provider. *Read-only*.
This gives a list of all of the files and folders that are attached to your project for the given storage provider.
If the provider is not "osfstorage", the metadata for the files in the storage will be retrieved and cached whenever
this endpoint is accessed. To see the cached metadata, GET the endpoint for the file directly (available through
its `/links/info` attribute).
When a create/update/delete action is performed against the file or folder, the action is handled by an external
service called WaterButler. The WaterButler response format differs slightly from the OSF's.
<!--- Copied from FileDetail.Spiel -->
###Waterbutler Entities
When an action is performed against a WaterButler endpoint, it will generally respond with a file entity, a folder
entity, or no content.
####File Entity
name type description
==========================================================================================================
name string name of the file
path string unique identifier for this file entity for this
project and storage provider. may not end with '/'
materialized string the full path of the file relative to the storage
root. may not end with '/'
kind string "file"
etag string etag - http caching identifier w/o wrapping quotes
modified timestamp last modified timestamp - format depends on provider
contentType string MIME-type when available
provider string id of provider e.g. "osfstorage", "s3", "googledrive".
equivalent to addon_short_name on the OSF
size integer size of file in bytes
current_version integer current file version
current_user_can_comment boolean Whether the current user is allowed to post comments
tags array of strings list of tags that describes the file (osfstorage only)
extra object may contain additional data beyond what's described here,
depending on the provider
version integer version number of file. will be 1 on initial upload
hashes object
md5 string md5 hash of file
sha256 string SHA-256 hash of file
####Folder Entity
name type description
======================================================================
name string name of the folder
path string unique identifier for this folder entity for this
project and storage provider. must end with '/'
materialized string the full path of the folder relative to the storage
root. must end with '/'
kind string "folder"
etag string etag - http caching identifier w/o wrapping quotes
extra object varies depending on provider
##File Attributes
<!--- Copied Attributes from FileDetail -->
For an OSF File entity, the `type` is "files" regardless of whether the entity is actually a file or folder. They
can be distinguished by the `kind` attribute. Files and folders use the same representation, but some attributes may
be null for one kind but not the other. `size` will be null for folders. A list of storage provider keys can be
found [here](/v2/#storage-providers).
name type description
===================================================================================================
guid string OSF GUID for this file (if one has been assigned)
name string name of the file or folder; used for display
kind string "file" or "folder"
path string same as for corresponding WaterButler entity
materialized_path string the unix-style path to the file relative to the provider root
size integer size of file in bytes, null for folders
provider string storage provider for this file. "osfstorage" if stored on the
OSF. other examples include "s3" for Amazon S3, "googledrive"
for Google Drive, "box" for Box.com.
last_touched iso8601 timestamp last time the metadata for the file was retrieved. only
applies to non-OSF storage providers.
date_modified iso8601 timestamp timestamp of when this file was last updated*
date_created iso8601 timestamp timestamp of when this file was created*
extra object may contain additional data beyond what's described here,
depending on the provider
hashes object
md5 string md5 hash of file, null for folders
sha256 string SHA-256 hash of file, null for folders
downloads integer number of times the file has been downloaded (for osfstorage files)
* A note on timestamps: for files stored in osfstorage, `date_created` refers to the time the file was
first uploaded to osfstorage, and `date_modified` is the time the file was last updated while in osfstorage.
Other providers may or may not provide this information, but if they do it will correspond to the provider's
semantics for created/modified times. These timestamps may also be stale; metadata retrieved via the File Detail
endpoint is cached. The `last_touched` field describes the last time the metadata was retrieved from the external
provider. To force a metadata update, access the parent folder via its Node Files List endpoint.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
<!--- Copied from FileDetail.Actions -->
The `links` property of the response provides endpoints for common file operations. The currently-supported actions
are:
###Get Info (*files, folders*)
Method: GET
URL: /links/info
Params: <none>
Success: 200 OK + file representation
The contents of a folder or details of a particular file can be retrieved by performing a GET request against the
`info` link. The response will be a standard OSF response format with the [OSF File attributes](#attributes).
###Download (*files*)
Method: GET
URL: /links/download
Params: <none>
Success: 200 OK + file body
To download a file, issue a GET request against the `download` link. The response will have the Content-Disposition
header set, which will will trigger a download in a browser.
###Create Subfolder (*folders*)
Method: PUT
URL: /links/new_folder
Query Params: ?kind=folder&name={new_folder_name}
Body: <empty>
Success: 201 Created + new folder representation
You can create a subfolder of an existing folder by issuing a PUT request against the `new_folder` link. The
`?kind=folder` portion of the query parameter is already included in the `new_folder` link. The name of the new
subfolder should be provided in the `name` query parameter. The response will contain a [WaterButler folder
entity](#folder-entity). If a folder with that name already exists in the parent directory, the server will return
a 409 Conflict error response.
###Upload New File (*folders*)
Method: PUT
URL: /links/upload
Query Params: ?kind=file&name={new_file_name}
Body (Raw): <file data (not form-encoded)>
Success: 201 Created + new file representation
To upload a file to a folder, issue a PUT request to the folder's `upload` link with the raw file data in the
request body, and the `kind` and `name` query parameters set to `'file'` and the desired name of the file. The
response will contain a [WaterButler file entity](#file-entity) that describes the new file. If a file with the
same name already exists in the folder, the server will return a 409 Conflict error response.
###Update Existing File (*file*)
Method: PUT
URL: /links/upload
Query Params: ?kind=file
Body (Raw): <file data (not form-encoded)>
Success: 200 OK + updated file representation
To update an existing file, issue a PUT request to the file's `upload` link with the raw file data in the request
body and the `kind` query parameter set to `"file"`. The update action will create a new version of the file.
The response will contain a [WaterButler file entity](#file-entity) that describes the updated file.
###Rename (*files, folders*)
Method: POST
URL: /links/move
Query Params: <none>
Body (JSON): {
"action": "rename",
"rename": {new_file_name}
}
Success: 200 OK + new entity representation
To rename a file or folder, issue a POST request to the `move` link with the `action` body parameter set to
`"rename"` and the `rename` body parameter set to the desired name. The response will contain either a folder
entity or file entity with the new name.
###Move & Copy (*files, folders*)
Method: POST
URL: /links/move
Query Params: <none>
Body (JSON): {
// mandatory
"action": "move"|"copy",
"path": {path_attribute_of_target_folder},
// optional
"rename": {new_name},
"conflict": "replace"|"keep", // defaults to 'replace'
"resource": {node_id}, // defaults to current {node_id}
"provider": {provider} // defaults to current {provider}
}
Success: 200 OK or 201 Created + new entity representation
Move and copy actions both use the same request structure, a POST to the `move` url, but with different values for
the `action` body parameters. The `path` parameter is also required and should be the OSF `path` attribute of the
folder being written to. The `rename` and `conflict` parameters are optional. If you wish to change the name of
the file or folder at its destination, set the `rename` parameter to the new name. The `conflict` param governs how
name clashes are resolved. Possible values are `replace` and `keep`. `replace` is the default and will overwrite
the file that already exists in the target folder. `keep` will attempt to keep both by adding a suffix to the new
file's name until it no longer conflicts. The suffix will be ' (**x**)' where **x** is a increasing integer
starting from 1. This behavior is intended to mimic that of the OS X Finder. The response will contain either a
folder entity or file entity with the new name.
Files and folders can also be moved between nodes and providers. The `resource` parameter is the id of the node
under which the file/folder should be moved. It *must* agree with the `path` parameter, that is the `path` must
identify a valid folder under the node identified by `resource`. Likewise, the `provider` parameter may be used to
move the file/folder to another storage provider, but both the `resource` and `path` parameters must belong to a
node and folder already extant on that provider. Both `resource` and `provider` default to the current node and
providers.
If a moved/copied file is overwriting an existing file, a 200 OK response will be returned. Otherwise, a 201
Created will be returned.
###Delete (*file, folders*)
Method: DELETE
URL: /links/delete
Query Params: <none>
Success: 204 No Content
To delete a file or folder send a DELETE request to the `delete` link. Nothing will be returned in the response
body.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Node files may be filtered by `id`, `name`, `node`, `kind`, `path`, `provider`, `size`, and `last_touched`.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.PermissionWithGetter(ContributorOrPublic, 'node'),
base_permissions.PermissionWithGetter(ReadOnlyIfRegistration, 'node'),
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
ordering = ('materialized_path',) # default ordering
serializer_class = FileSerializer
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
view_category = 'nodes'
view_name = 'node-files'
def get_default_queryset(self):
# Don't bother going to waterbutler for osfstorage
files_list = self.fetch_from_waterbutler()
if isinstance(files_list, list):
return [self.get_file_item(file) for file in files_list]
if isinstance(files_list, dict) or getattr(files_list, 'is_file', False):
# We should not have gotten a file here
raise NotFound
return list(files_list.children)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
class NodeFileDetail(JSONAPIBaseView, generics.RetrieveAPIView, WaterButlerMixin, NodeMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.PermissionWithGetter(ContributorOrPublic, 'node'),
base_permissions.PermissionWithGetter(ReadOnlyIfRegistration, 'node'),
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
serializer_class = FileSerializer
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
view_category = 'nodes'
view_name = 'node-file-detail'
def get_object(self):
fobj = self.fetch_from_waterbutler()
if isinstance(fobj, dict):
return self.get_file_item(fobj)
if isinstance(fobj, list) or not getattr(fobj, 'is_file', True):
# We should not have gotten a folder here
raise NotFound
return fobj
class NodeAddonList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, NodeMixin, AddonSettingsMixin):
"""List of addons connected to this node *Read-only*
Paginated list of node addons ordered by their `id` or `addon_short_name`. Attributes other than
`enabled` will be `null` if the addon is not enabled for this node.
## <Addon\>NodeSettings Attributes
OSF <Addon\>NodeSettings entities have the "node_addons" `type`, and their `id` indicates the addon
service provider (eg. `box`, `googledrive`, etc).
name type description
======================================================================================================
external_account_id string _id of the associated ExternalAccount, if any
configured boolean has this node been configured with a folder?
enabled boolean has a node settings object been associated with this node?
folder_id string folder id of linked folder, from third-party service
node_has_auth boolean is this node fully authorized to use an ExternalAccount?
folder_path boolean folder path of linked folder, from third-party service
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
self: the canonical api endpoint of this node_addon
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_ADDON_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeAddonSettingsSerializer
view_category = 'nodes'
view_name = 'node-addons'
def get_default_queryset(self):
qs = []
for addon in ADDONS_OAUTH:
obj = self.get_addon_settings(provider=addon, fail_if_absent=False)
if obj:
qs.append(obj)
qs.sort()
return qs
get_queryset = get_default_queryset
class NodeAddonDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView, NodeMixin, AddonSettingsMixin):
"""
Detail of individual addon connected to this node *Writeable*.
Attributes other than `enabled` will be null if the addon is not enabled for this node.
##Permissions
<Addon>NodeSettings that are attached to public Nodes will give read-only access to everyone. Private nodes require explicit read
permission. Write and admin access are the same for public and private nodes. Administrators on a parent node have
implicit read permissions for all child nodes.
Any users with write or admin access to the node are able to deauthorize an enabled addon, but only the addon authorizer is able
to change the configuration (i.e. selected folder) of an already-configured <Addon>NodeSettings entity.
## <Addon>NodeSettings Attributes
OSF <Addon>NodeSettings entities have the "node_addons" `type`, and their `id` indicates the addon
service provider (eg. `box`, `googledrive`, etc).
name type description
======================================================================================================
external_account_id string _id of the associated ExternalAccount, if any
configured boolean has this node been configured with a folder?
enabled boolean has a node settings object been associated with this node?
folder_id string folder id of linked folder, from third-party service
node_has_auth boolean is this node fully authorized to use an ExternalAccount?
folder_path boolean folder path of linked folder, from third-party service
url string Specific to the `forward` addon
label string Specific to the `forward` addon
##Links
self: the canonical api endpoint of this node_addon
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {"data": {
"type": "node_addons", # required
"id": {provider}, # required
"attributes": {
"external_account_id": {account_id}, # optional
"folder_id": {folder_id}, # optional
"folder_path": {folder_path}, # optional - Google Drive specific
"url": {url}, # optional - External Link specific
"label": {label} # optional - External Link specific
}
}
}
Success: 200 OK + node_addon representation
To update a node, issue either a PUT or a PATCH request against the `/links/self` URL. The `external_account_id`,
`enabled`, and `folder_id` fields are mandatory if you PUT and optional if you PATCH. However, at least one is always mandatory.
Non-string values will be accepted and stringified, but we make no promises about the stringification output. So
don't do that.
To delete or deauthorize a node_addon, issue a PUT with all fields set to `null` / `False`, or a PATCH with `enabled` set to `False`.
####Note
Not all addons are currently configurable via the API. The current list of addons that accept PUT/PATCH is [`box`, `dropbox`, `s3`, `googledrive`]
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_ADDON_READ]
required_write_scopes = [CoreScopes.NODE_ADDON_WRITE]
serializer_class = NodeAddonSettingsSerializer
view_category = 'nodes'
view_name = 'node-addon-detail'
def get_object(self):
return self.get_addon_settings()
def perform_create(self, serializer):
addon = self.kwargs['provider']
if addon not in ADDONS_OAUTH:
raise NotFound('Requested addon unavailable')
node = self.get_node()
if node.has_addon(addon):
raise InvalidModelValueError(
detail='Add-on {} already enabled for node {}'.format(addon, node._id)
)
return super(NodeAddonDetail, self).perform_create(serializer)
def perform_destroy(self, instance):
addon = instance.config.short_name
node = self.get_node()
if not node.has_addon(instance.config.short_name):
raise NotFound('Node {} does not have add-on {}'.format(node._id, addon))
node.delete_addon(addon, auth=get_user_auth(self.request))
def get_serializer_class(self):
"""
Use NodeDetailSerializer which requires 'id'
"""
if 'provider' in self.kwargs and self.kwargs['provider'] == 'forward':
return ForwardNodeAddonSettingsSerializer
else:
return NodeAddonSettingsSerializer
class NodeAddonFolderList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, AddonSettingsMixin):
"""List of folders that this node can connect to *Read-only*.
Paginated list of folders retrieved from the associated third-party service
##Permissions
<Addon> Folders are visible only to the addon authorizer.
## <Addon> Folder Attributes
OSF <Addon\> Folder entities have the "node_addon_folders" `type`, and their `id` indicates the folder_id
according to the associated service provider (eg. `box`, `googledrive`, etc).
name type description
======================================================================================================
path string path of this folder, according to third-party service
kind string `"folder"`, typically.
provider string `short_name` of third-party service provider
name string name of this folder
folder_id string id of this folder, according to third-party service
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
root: the canonical api endpoint of the root folder for this account
children: the canonical api endpoint of this folder's children
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_ADDON_READ, CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NULL]
pagination_class = MaxSizePagination
serializer_class = NodeAddonFolderSerializer
view_category = 'nodes'
view_name = 'node-addon-folders'
def get_queryset(self):
# TODO: [OSF-6120] refactor this/NS models to be generalizable
node_addon = self.get_addon_settings()
if not node_addon.has_auth:
raise JSONAPIException(detail='This addon is enabled but an account has not been imported from your user settings',
meta={'link': '{}users/me/addons/{}/accounts/'.format(API_BASE, node_addon.config.short_name)})
path = self.request.query_params.get('path')
folder_id = self.request.query_params.get('id')
if not hasattr(node_addon, 'get_folders'):
raise EndpointNotImplementedError('Endpoint not yet implemented for this addon')
return node_addon.get_folders(path=path, folder_id=folder_id)
class NodeProvider(object):
def __init__(self, provider, node):
self.path = '/'
self.node = node
self.kind = 'folder'
self.name = provider
self.provider = provider
self.node_id = node._id
self.pk = node._id
class NodeProvidersList(JSONAPIBaseView, generics.ListAPIView, NodeMixin):
"""List of storage providers enabled for this node. *Read-only*.
Users of the OSF may access their data on a [number of cloud-storage](/v2/#storage-providers) services that have
integrations with the OSF. We call these "providers". By default every node has access to the OSF-provided
storage but may use as many of the supported providers as desired. This endpoint lists all of the providers that are
configured for this node. If you want to add more, you will need to do that in the Open Science Framework front end
for now.
In the OSF filesystem model, providers are treated as folders, but with special properties that distinguish them
from regular folders. Every provider folder is considered a root folder, and may not be deleted through the regular
file API. To see the contents of the provider, issue a GET request to the `/relationships/files/links/related/href`
attribute of the provider resource. The `new_folder` and `upload` actions are handled by another service called
WaterButler, whose response format differs slightly from the OSF's.
<!--- Copied from FileDetail.Spiel -->
###Waterbutler Entities
When an action is performed against a WaterButler endpoint, it will generally respond with a file entity, a folder
entity, or no content.
####File Entity
name type description
=========================================================================
name string name of the file
path string unique identifier for this file entity for this
project and storage provider. may not end with '/'
materialized string the full path of the file relative to the storage
root. may not end with '/'
kind string "file"
etag string etag - http caching identifier w/o wrapping quotes
modified timestamp last modified timestamp - format depends on provider
contentType string MIME-type when available
provider string id of provider e.g. "osfstorage", "s3", "googledrive".
equivalent to addon_short_name on the OSF
size integer size of file in bytes
extra object may contain additional data beyond what's described here,
depending on the provider
version integer version number of file. will be 1 on initial upload
downloads integer count of the number times the file has been downloaded
hashes object
md5 string md5 hash of file
sha256 string SHA-256 hash of file
####Folder Entity
name type description
======================================================================
name string name of the folder
path string unique identifier for this folder entity for this
project and storage provider. must end with '/'
materialized string the full path of the folder relative to the storage
root. must end with '/'
kind string "folder"
etag string etag - http caching identifier w/o wrapping quotes
extra object varies depending on provider
##Provider Attributes
`type` is "files"
name type description
=================================================================================
name string name of the provider
kind string type of this file/folder. always "folder"
path path relative path of this folder within the provider filesys. always "/"
node string node this provider belongs to
provider string provider id, same as "name"
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
<!--- Copied from FileDetail.Actions -->
###Create Subfolder (*folders*)
Method: PUT
URL: /links/new_folder
Query Params: ?kind=folder&name={new_folder_name}
Body: <empty>
Success: 201 Created + new folder representation
You can create a subfolder of an existing folder by issuing a PUT request against the `new_folder` link. The
`?kind=folder` portion of the query parameter is already included in the `new_folder` link. The name of the new
subfolder should be provided in the `name` query parameter. The response will contain a [WaterButler folder
entity](#folder-entity). If a folder with that name already exists in the parent directory, the server will return
a 409 Conflict error response.
###Upload New File (*folders*)
Method: PUT
URL: /links/upload
Query Params: ?kind=file&name={new_file_name}
Body (Raw): <file data (not form-encoded)>
Success: 201 Created + new file representation
To upload a file to a folder, issue a PUT request to the folder's `upload` link with the raw file data in the
request body, and the `kind` and `name` query parameters set to `'file'` and the desired name of the file. The
response will contain a [WaterButler file entity](#file-entity) that describes the new file. If a file with the
same name already exists in the folder, the server will return a 409 Conflict error response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = NodeProviderSerializer
view_category = 'nodes'
view_name = 'node-providers'
def get_provider_item(self, provider):
return NodeProvider(provider, self.get_node())
def get_queryset(self):
return [
self.get_provider_item(addon.config.short_name)
for addon
in self.get_node().get_addons()
if addon.config.has_hgrid_files
and addon.configured
]
class NodeProviderDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ExcludeWithdrawals,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = NodeProviderSerializer
view_category = 'nodes'
view_name = 'node-provider-detail'
def get_object(self):
return NodeProvider(self.kwargs['provider'], Node.load(self.kwargs['node_id']))
class NodeAlternativeCitationsList(JSONAPIBaseView, generics.ListCreateAPIView, NodeMixin):
"""List of alternative citations for a project.
##Actions
###Create Alternative Citation
Method: POST
Body (JSON): {
"data": {
"type": "citations", # required
"attributes": {
"name": {name}, # mandatory
"text": {text} # mandatory
}
}
}
Success: 201 Created + new citation representation
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
AdminOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NODE_CITATIONS_WRITE]
serializer_class = NodeAlternativeCitationSerializer
view_category = 'nodes'
view_name = 'alternative-citations'
def get_queryset(self):
return self.get_node().alternative_citations
class NodeAlternativeCitationDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, NodeMixin):
"""Details about an alternative citations for a project.
##Actions
###Update Alternative Citation
Method: PUT
Body (JSON): {
"data": {
"type": "citations", # required
"id": {{id}} # required
"attributes": {
"name": {name}, # mandatory
"text": {text} # mandatory
}
}
}
Success: 200 Ok + updated citation representation
###Delete Alternative Citation
Method: DELETE
Success: 204 No content
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
AdminOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NODE_CITATIONS_WRITE]
serializer_class = NodeAlternativeCitationSerializer
view_category = 'nodes'
view_name = 'alternative-citation-detail'
def get_object(self):
try:
return self.get_node().alternative_citations.find(Q('_id', 'eq', str(self.kwargs['citation_id'])))[0]
except IndexError:
raise NotFound
def perform_destroy(self, instance):
self.get_node().remove_citation(get_user_auth(self.request), instance, save=True)
class NodeLogList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, ODMFilterMixin):
"""List of Logs associated with a given Node. *Read-only*.
<!--- Copied Description from NodeLogDetail -->
Paginated list of Logs ordered by their `date`. This includes the Logs of the specified Node as well as the logs of that Node's children that the current user has access to.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
On the front end, logs show record and show actions done on the OSF. The complete list of loggable actions (in the format {identifier}: {description}) is as follows:
* 'project_created': A Node is created
* 'project_registered': A Node is registered
* 'project_deleted': A Node is deleted
* 'created_from': A Node is created using an existing Node as a template
* 'pointer_created': A Pointer is created
* 'pointer_forked': A Pointer is forked
* 'pointer_removed': A Pointer is removed
* 'node_removed': A component is deleted
* 'node_forked': A Node is forked
===
* 'made_public': A Node is made public
* 'made_private': A Node is made private
* 'tag_added': A tag is added to a Node
* 'tag_removed': A tag is removed from a Node
* 'edit_title': A Node's title is changed
* 'edit_description': A Node's description is changed
* 'updated_fields': One or more of a Node's fields are changed
* 'external_ids_added': An external identifier is added to a Node (e.g. DOI, ARK)
===
* 'contributor_added': A Contributor is added to a Node
* 'contributor_removed': A Contributor is removed from a Node
* 'contributors_reordered': A Contributor's position in a Node's bibliography is changed
* 'permissions_updated': A Contributor's permissions on a Node are changed
* 'made_contributor_visible': A Contributor is made bibliographically visible on a Node
* 'made_contributor_invisible': A Contributor is made bibliographically invisible on a Node
===
* 'wiki_updated': A Node's wiki is updated
* 'wiki_deleted': A Node's wiki is deleted
* 'wiki_renamed': A Node's wiki is renamed
* 'made_wiki_public': A Node's wiki is made public
* 'made_wiki_private': A Node's wiki is made private
===
* 'addon_added': An add-on is linked to a Node
* 'addon_removed': An add-on is unlinked from a Node
* 'addon_file_moved': A File in a Node's linked add-on is moved
* 'addon_file_copied': A File in a Node's linked add-on is copied
* 'addon_file_renamed': A File in a Node's linked add-on is renamed
* 'node_authorized': An addon is authorized for a project
* 'node_deauthorized': An addon is deauthorized for a project
* 'folder_created': A Folder is created in a Node's linked add-on
* 'file_added': A File is added to a Node's linked add-on
* 'file_updated': A File is updated on a Node's linked add-on
* 'file_removed': A File is removed from a Node's linked add-on
* 'file_restored': A File is restored in a Node's linked add-on
===
* 'comment_added': A Comment is added to some item
* 'comment_removed': A Comment is removed from some item
* 'comment_updated': A Comment is updated on some item
===
* 'embargo_initiated': An embargoed Registration is proposed on a Node
* 'embargo_approved': A proposed Embargo of a Node is approved
* 'embargo_cancelled': A proposed Embargo of a Node is cancelled
* 'embargo_completed': A proposed Embargo of a Node is completed
* 'retraction_initiated': A Withdrawal of a Registration is proposed
* 'retraction_approved': A Withdrawal of a Registration is approved
* 'retraction_cancelled': A Withdrawal of a Registration is cancelled
* 'registration_initiated': A Registration of a Node is proposed
* 'registration_approved': A proposed Registration is approved
* 'registration_cancelled': A proposed Registration is cancelled
===
* 'node_created': A Node is created (_deprecated_)
##Log Attributes
<!--- Copied Attributes from LogList -->
OSF Log entities have the "logs" `type`.
name type description
============================================================================
date iso8601 timestamp timestamp of Log creation
action string Log action (see list above)
##Relationships
###Node
The node this log belongs to.
###User
The user who performed the logged action.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
##Query Params
<!--- Copied Query Params from LogList -->
Logs may be filtered by their `action` and `date`.
#This Request/Response
"""
serializer_class = NodeLogSerializer
view_category = 'nodes'
view_name = 'node-logs'
required_read_scopes = [CoreScopes.NODE_LOG_READ]
required_write_scopes = [CoreScopes.NULL]
log_lookup_url_kwarg = 'node_id'
ordering = ('-date', )
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
def get_default_odm_query(self):
auth = get_user_auth(self.request)
query = self.get_node().get_aggregate_logs_query(auth)
return query
def get_queryset(self):
queryset = NodeLog.find(self.get_query_from_request())
return queryset
class NodeCommentsList(JSONAPIBaseView, generics.ListCreateAPIView, ODMFilterMixin, NodeMixin):
"""List of comments on a node. *Writeable*.
Paginated list of comments ordered by their `date_created.` Each resource contains the full representation of the
comment, meaning additional requests to an individual comment's detail view are not necessary.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
###Permissions
Comments on public nodes are given read-only access to everyone. If the node comment-level is "private",
only contributors have permission to comment. If the comment-level is "public" any logged-in OSF user can comment.
Comments on private nodes are only visible to contributors and administrators on the parent node.
##Attributes
OSF comment entities have the "comments" `type`.
name type description
=================================================================================
content string content of the comment
date_created iso8601 timestamp timestamp that the comment was created
date_modified iso8601 timestamp timestamp when the comment was last updated
modified boolean has this comment been edited?
deleted boolean is this comment deleted?
is_abuse boolean has this comment been reported by the current user?
has_children boolean does this comment have replies?
can_edit boolean can the current user edit this comment?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "comments", # required
"attributes": {
"content": {content}, # mandatory
},
"relationships": {
"target": {
"data": {
"type": {target type} # mandatory
"id": {target._id} # mandatory
}
}
}
}
}
Success: 201 CREATED + comment representation
To create a comment on this node, issue a POST request against this endpoint. The comment target id and target type
must be specified. To create a comment on the node overview page, the target `type` would be "nodes" and the `id`
would be the node id. To reply to a comment on this node, the target `type` would be "comments" and the `id` would
be the id of the comment to reply to. The `content` field is mandatory.
If the comment creation is successful the API will return
a 201 response with the representation of the new comment in the body. For the new comment's canonical URL, see the
`/links/self` field of the response.
##Query Params
+ `filter[deleted]=True|False` -- filter comments based on whether or not they are deleted.
The list of node comments includes deleted comments by default. The `deleted` field is a boolean and can be
filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true` or `false` in
the query will cause the match to fail regardless.
+ `filter[date_created][comparison_operator]=YYYY-MM-DDTH:M:S` -- filter comments based on date created.
Comments can also be filtered based on their `date_created` and `date_modified` fields. Possible comparison
operators include 'gt' (greater than), 'gte'(greater than or equal to), 'lt' (less than) and 'lte'
(less than or equal to). The date must be in the format YYYY-MM-DD and the time is optional.
+ `filter[target]=target_id` -- filter comments based on their target id.
The list of comments can be filtered by target id. For example, to get all comments with target = project,
the target_id would be the project_id.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
CanCommentOrPublic,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_COMMENTS_READ]
required_write_scopes = [CoreScopes.NODE_COMMENTS_WRITE]
pagination_class = CommentPagination
serializer_class = NodeCommentSerializer
view_category = 'nodes'
view_name = 'node-comments'
ordering = ('-date_created', ) # default ordering
# overrides ODMFilterMixin
def get_default_odm_query(self):
return Q('node', 'eq', self.get_node()) & Q('root_target', 'ne', None)
def get_queryset(self):
comments = Comment.find(self.get_query_from_request())
for comment in comments:
# Deleted root targets still appear as tuples in the database,
# but need to be None in order for the query to be correct.
if comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
return Comment.find(self.get_query_from_request())
def get_serializer_class(self):
if self.request.method == 'POST':
return CommentCreateSerializer
else:
return NodeCommentSerializer
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(NodeCommentsList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
def perform_create(self, serializer):
node = self.get_node()
serializer.validated_data['user'] = self.request.user
serializer.validated_data['node'] = node
serializer.save()
class NodeInstitutionsList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin, NodeMixin):
""" Detail of the affiliated institutions a node has, if any. Returns [] if the node has no
affiliated institution.
##Attributes
OSF Institutions have the "institutions" `type`.
name type description
=========================================================================
name string title of the institution
id string unique identifier in the OSF
logo_path string a path to the institution's static logo
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
AdminOrPublic
)
required_read_scopes = [CoreScopes.NODE_BASE_READ, CoreScopes.INSTITUTION_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = InstitutionSerializer
model = Institution
view_category = 'nodes'
view_name = 'node-institutions'
def get_queryset(self):
node = self.get_node()
return node.affiliated_institutions or []
class NodeInstitutionsRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView, NodeMixin):
""" Relationship Endpoint for Node -> Institutions Relationship
Used to set, remove, update and retrieve the affiliated_institutions of a node to an institution
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "institutions", # required
"id": <institution_id> # required
}]
}
Success: 201
This requires write permissions on the node and for the user making the request to
have the institutions in the payload as affiliated in their account.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "institutions", # required
"id": <institution_id> # required
}]
}
Success: 200
This requires write permissions on the node and for the user making the request to
have the institutions in the payload as affiliated in their account. This will delete
all institutions not listed, meaning a data: [] payload does the same as a DELETE with all
the institutions.
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "institutions", # required
"id": <institution_id> # required
}]
}
Success: 204
This requires write permissions in the node. If the user has admin permissions, the institution in the payload does
not need to be affiliated in their account.
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
WriteOrPublicForRelationshipInstitutions
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
serializer_class = NodeInstitutionsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
view_category = 'nodes'
view_name = 'node-relationships-institutions'
def get_object(self):
node = self.get_node(check_object_permissions=False)
obj = {
'data': node.affiliated_institutions,
'self': node
}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
user = self.request.user
current_insts = {inst._id: inst for inst in instance['data']}
node = instance['self']
for val in data:
if val['id'] in current_insts:
if current_insts[val['id']] not in user.affiliated_institutions and not node.has_permission(user, 'admin'):
raise PermissionDenied
node.remove_affiliated_institution(inst=current_insts[val['id']], user=user)
node.save()
def create(self, *args, **kwargs):
try:
ret = super(NodeInstitutionsRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=HTTP_204_NO_CONTENT)
return ret
class NodeWikiList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, ODMFilterMixin):
"""List of wiki pages on a node. *Read only*.
Paginated list of the node's current wiki page versions ordered by their `date_modified.` Each resource contains the
full representation of the wiki, meaning additional requests to an individual wiki's detail view are not necessary.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
###Permissions
Wiki pages on public nodes are given read-only access to everyone. Wiki pages on private nodes are only visible to
contributors and administrators on the parent node.
##Attributes
OSF wiki entities have the "wikis" `type`.
name type description
======================================================================================================
name string name of the wiki pag
path string the path of the wiki page
materialized_path string the path of the wiki page
date_modified iso8601 timestamp timestamp when the wiki was last updated
content_type string MIME-type
current_user_can_comment boolean Whether the current user is allowed to post comments
extra object
version integer version number of the wiki
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `filter[name]=<Str>` -- filter wiki pages by name
+ `filter[date_modified][comparison_operator]=YYYY-MM-DDTH:M:S` -- filter wiki pages based on date modified.
Wiki pages can be filtered based on their `date_modified` fields. Possible comparison
operators include 'gt' (greater than), 'gte'(greater than or equal to), 'lt' (less than) and 'lte'
(less than or equal to). The date must be in the format YYYY-MM-DD and the time is optional.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.WIKI_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeWikiSerializer
view_category = 'nodes'
view_name = 'node-wikis'
ordering = ('-date', ) # default ordering
# overrides ODMFilterMixin
def get_default_odm_query(self):
node = self.get_node()
node_wiki_pages = node.wiki_pages_current.values() if node.wiki_pages_current else []
return Q('_id', 'in', node_wiki_pages)
def get_queryset(self):
return NodeWikiPage.find(self.get_query_from_request())
class NodeLinkedNodesRelationship(LinkedNodesRelationship, NodeMixin):
""" Relationship Endpoint for Nodes -> Linked Node relationships
Used to set, remove, update and retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
view_category = 'nodes'
view_name = 'node-pointer-relationship'
class LinkedNodesList(BaseLinkedList, NodeMixin):
"""List of nodes linked to this node. *Read-only*.
Linked nodes are the nodes pointed to by node links. This view will probably replace node_links in the near future.
<!--- Copied Spiel from NodeDetail -->
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
##Linked Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean is this is a registration?
collection boolean is this node a collection of other nodes?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = NodeSerializer
view_category = 'nodes'
view_name = 'linked-nodes'
def get_queryset(self):
return [node for node in
super(LinkedNodesList, self).get_queryset()
if not node.is_registration]
# overrides APIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(LinkedNodesList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeViewOnlyLinksList(JSONAPIBaseView, generics.ListCreateAPIView, ListFilterMixin, NodeMixin):
"""
List of view only links on a node. *Writeable*.
###Permissions
View only links on a node, public or private, are readable and writeable only by users that are
administrators on the node.
##Attributes
name type description
=================================================================================
name string name of the view only link
anonymous boolean whether the view only link has anonymized contributors
date_created iso8601 timestamp timestamp when the view only link was created
key string the view only link key
##Relationships
###Creator
The user who created the view only link.
###Nodes
The nodes which this view only link key gives read-only access to.
##Actions
###Create
Method: POST
Body (JSON): {
"data": {
"attributes": {
"name": {string}, #optional
"anonymous": true|false, #optional
}
}
}
Success: 201 CREATED + VOL representation
##Query Params
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
View only links may be filtered by their `name`, `anonymous`, and `date_created` attributes.
#This Request/Response
"""
permission_classes = (
IsAdmin,
base_permissions.TokenHasScope,
drf_permissions.IsAuthenticatedOrReadOnly
)
required_read_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_WRITE]
serializer_class = NodeViewOnlyLinkSerializer
view_category = 'nodes'
view_name = 'node-view-only-links'
def get_default_queryset(self):
return [
link for link in
self.get_node().private_links
if not link.is_deleted
]
def get_queryset(self):
return self.get_queryset_from_request()
class NodeViewOnlyLinkDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, NodeMixin):
"""
Detail of a specific view only link on a node. *Writeable*.
###Permissions
View only links on a node, public or private, are only readable and writeable by users that are
administrators on the node.
##Attributes
name type description
=================================================================================
name string name of the view only link
anonymous boolean whether the view only link has anonymized contributors
date_created iso8601 timestamp timestamp when the view only link was created
key string the view only key
##Relationships
###Creator
The user who created the view only link.
###Nodes
The nodes which this view only link key gives read-only access to.
##Actions
###Update
Method: PUT
Body (JSON): {
"data": {
"attributes": {
"name": {string}, #optional
"anonymous": true|false, #optional
},
}
}
Success: 200 OK + VOL representation
###Delete
Method: DELETE
Body (JSON): <none>
Success: 204 NO CONTENT
#This Request/Response
"""
permission_classes = (
IsAdmin,
base_permissions.TokenHasScope,
drf_permissions.IsAuthenticatedOrReadOnly
)
required_read_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_VIEW_ONLY_LINKS_WRITE]
serializer_class = NodeViewOnlyLinkSerializer
view_category = 'nodes'
view_name = 'node-view-only-link-detail'
def get_serializer_class(self):
if self.request.method == 'PUT':
return NodeViewOnlyLinkUpdateSerializer
return NodeViewOnlyLinkSerializer
def get_object(self):
for link in self.get_node().private_links:
if link._id == self.kwargs['link_id']:
return link
raise NotFound
def perform_destroy(self, link):
assert isinstance(link, PrivateLink), 'link must be a PrivateLink'
link.is_deleted = True
link.save()
enqueue_postcommit_task(ban_url, (self.get_node(),), {}, celery=True, once_per_request=True)
class NodeIdentifierList(NodeMixin, IdentifierList):
"""List of identifiers for a specified node. *Read-only*.
##Identifier Attributes
OSF Identifier entities have the "identifiers" `type`.
name type description
----------------------------------------------------------------------------
category string e.g. 'ark', 'doi'
value string the identifier value itself
##Links
self: this identifier's detail page
##Relationships
###Referent
The identifier is refers to this node.
##Actions
*None*.
##Query Params
Identifiers may be filtered by their category.
#This Request/Response
"""
serializer_class = NodeIdentifierSerializer
class NodePreprintsList(JSONAPIBaseView, generics.ListAPIView, NodeMixin, NodePreprintsFilterMixin):
"""List of preprints for a node. *Read-only*.
##Note
**This API endpoint is under active development, and is subject to change in the future.**
Paginated list of preprints ordered by their `date_created`. Each resource contains a representation of the
preprint.
##Preprint Attributes
OSF Preprint entities have the "preprints" `type`.
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the preprint was created
date_modified iso8601 timestamp timestamp that the preprint was last modified
date_published iso8601 timestamp timestamp when the preprint was published
is_published boolean whether or not this preprint is published
is_preprint_orphan boolean whether or not this preprint is orphaned
subjects list of lists of dictionaries ids of Subject in the PLOS taxonomy. Dictrionary, containing the subject text and subject ID
provider string original source of the preprint
doi string bare DOI for the manuscript, as entered by the user
##Relationships
###Node
The node that this preprint was created for
###Primary File
The file that is designated as the preprint's primary file, or the manuscript of the preprint.
###Provider
Link to preprint_provider detail for this preprint
##Links
- `self` -- Preprint detail page for the current preprint
- `html` -- Project on the OSF corresponding to the current preprint
- `doi` -- URL representation of the DOI entered by the user for the preprint manuscript
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
)
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
view_category = 'nodes'
view_name = 'node-preprints'
# overrides ODMFilterMixin
def get_default_odm_query(self):
return (
Q('node', 'eq', self.get_node())
)
# overrides ListAPIView
def get_queryset(self):
return PreprintService.find(self.get_query_from_request())
|
rdhyee/osf.io
|
api/nodes/views.py
|
Python
|
apache-2.0
| 141,472 | 0.004298 |
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import smilesparser
from rdkit import Chem
serial = 0
element_number = {'C': 6,
'N': 7,
'O': 8,
'H': 1,
'S': 16
}
class SMILES:
def __init__(self, smiles):
self.mol = Chem.RWMol()
self.parsed = smilesparser.SMILES.parseString(smiles)[0]
self.prevAtomIdx = None
self.prevBond = None
self.atomStack = []
self.ringClosures = {}
self.iterate_smiles(self.parsed.smiles)
def AddAtom(self, s):
a = Chem.Atom(element_number[s.upper()])
if a.GetSymbol() == 'S':
a.SetHybridization(Chem.rdchem.HybridizationType.SP2)
a.SetNumRadicalElectrons(1)
a.SetNoImplicit(True)
else:
if not self.prevBond:
a.SetHybridization(Chem.rdchem.HybridizationType.SP3)
elif self.prevBond == ':':
bt = Chem.rdchem.BondType.SINGLE
a.SetHybridization(Chem.rdchem.HybridizationType.SP2)
elif self.prevBond == '=':
bt = Chem.rdchem.BondType.DOUBLE
a.SetHybridization(Chem.rdchem.HybridizationType.SP2)
else:
raise RuntimeError
idx = self.mol.AddAtom(a)
if self.prevAtomIdx is not None:
self.AddBond(idx)
self.prevAtomIdx = idx
return a
def AddBond(self, idx):
bt = Chem.rdchem.BondType.SINGLE
if self.prevBond:
if self.prevBond == '=':
bt = Chem.rdchem.BondType.DOUBLE
if self.prevBond == '#':
bt = Chem.rdchem.BondType.TRIPLE
if self.prevBond == ':':
bt = Chem.rdchem.BondType.AROMATIC
self.mol.AddBond(self.prevAtomIdx, idx, bt)
self.prevBond = None
def inspect_organic_symbol(self, organic_symbol, indent=0):
s = ''.join(organic_symbol)
self.AddAtom(s)
def inspect_aromatic_symbol(self, aromatic_symbol, indent=0):
s = ''.join(aromatic_symbol)
a = self.AddAtom(s)
a.SetIsAromatic(True)
self.prevBond = ":"
def inspect_element_symbol(self, element_symbol, indent=0):
s = ''.join(element_symbol)
self.AddAtom(s)
def inspect_chiral_class(self, chiral_class, indent=0):
pass
def inspect_hcount(self, hcount, indent=0):
pass
def inspect_charge(self, charge, indent=0):
pass
def inspect_atomspec(self, atomspec, indent=0):
self.atomStack.append(self.prevAtomIdx)
for item in atomspec:
if isinstance(item, smilesparser.AST.AromaticSymbol):
self.inspect_aromatic_symbol(item.aromatic_symbol, indent+1)
elif isinstance(item, smilesparser.AST.ElementSymbol):
self.inspect_element_symbol(item.element_symbol, indent+1)
elif isinstance(item, smilesparser.AST.ChiralClass):
self.inspect_chiral_class(item.chiral_class, indent+1)
elif isinstance(item, smilesparser.AST.HCount):
self.inspect_hcount(item.hcount, indent+1)
elif isinstance(item, smilesparser.AST.Charge):
self.inspect_charge(item.charge, indent+1)
else:
print " " * indent + str(item), dir(item)
self.prevAtomIdx = self.atomStack.pop()
def inspect_atom(self, atom, indent=0):
if isinstance(atom, smilesparser.AST.OrganicSymbol):
self.inspect_organic_symbol(atom.organic_symbol, indent)
elif isinstance(atom, smilesparser.AST.AromaticSymbol):
self.inspect_aromatic_symbol(atom.aromatic_symbol, indent)
elif isinstance(atom, smilesparser.AST.AtomSpec):
self.inspect_atomspec(atom.atom_spec, indent)
else:
print " " * indent + atom, dir(atom)
def inspect_bond(self, bond, indent=0):
self.prevBond = bond
def inspect_ring_closure(self, ring_closure, indent=0):
if ring_closure not in self.ringClosures:
self.ringClosures[ring_closure] = self.prevAtomIdx
else:
idx = self.ringClosures[ring_closure]
self.AddBond(idx)
def inspect_chain(self, chain, indent=0):
for item in chain:
if isinstance(item, smilesparser.AST.Bond):
self.inspect_bond(item.bond, indent)
elif isinstance(item, smilesparser.AST.Atom):
self.inspect_atom(item.atom, indent)
elif isinstance(item, smilesparser.AST.RingClosure):
self.inspect_ring_closure(item.ring_closure, indent)
else:
print " " * indent + item, dir(item)
def iterate_branch(self, branch, indent=0):
self.atomStack.append(self.prevAtomIdx)
for item in branch[0]:
if isinstance(item, smilesparser.AST.Bond):
self.inspect_bond(item.bond, indent+1)
elif isinstance(item, smilesparser.AST.SMILES):
self.iterate_smiles(item.smiles, indent+1)
else:
print " " * indent + item, dir(item)
self.prevAtomIdx = self.atomStack.pop()
def iterate_smiles(self, smiles, indent=0):
for item in smiles:
if isinstance(item, smilesparser.AST.Atom):
self.inspect_atom(item.atom, indent)
elif isinstance(item, smilesparser.AST.Chain):
self.inspect_chain(item.chain, indent)
elif isinstance(item, smilesparser.AST.Branch):
self.iterate_branch(item, indent+1)
else:
print " " * indent + item, dir(item)
def print_mol(mol):
for atom in mol.GetAtoms():
atom.UpdatePropertyCache(strict=False)
print (atom.GetIdx(),
atom.GetAtomicNum(),
atom.GetDegree(),
atom.GetTotalDegree(),
atom.GetTotalValence(),
atom.GetImplicitValence(),
atom.GetExplicitValence(),
atom.GetFormalCharge(),
atom.GetNumRadicalElectrons(),
atom.GetHybridization(),
atom.GetNoImplicit())
for bond in mol.GetBonds():
print (bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx(),
bond.GetBondType())
if __name__ == '__main__':
smiles=[
# 'C',
# 'CC',
# 'CCCCC(CCC)CCC',
# 'C1CCC(C1C)CCCC',
# 'c1ccccc1',
# 'Cc1ccccc1',
# 'CCC[S]=O',
# 'CC[S@](=O)c1ccc2c(c1)[nH]/c(=N/C(=O)OC)/[nH]2',
'C=CCc1cc(OC)c2c(c1OC)OCO2'
# 'CCC(=O)O[C@]1(CC[NH+](C[C@@H]1CC=C)C)c2ccccc2'
]
for s in smiles:
print s
m = Chem.MolFromSmiles(s)
s1 = Chem.MolToSmiles(m)
print s1
print_mol(m)
print
sm = SMILES(s1)
print_mol(sm.mol)
print Chem.MolToSmiles(sm.mol)
print
|
google/smilesparser
|
test_smilesparser_rdkit.py
|
Python
|
apache-2.0
| 6,797 | 0.010152 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class DummyKeyResponse(object):
def __init__(self, gen=1):
self.generation = gen
self.name = ""
def request(self, path, method, **kwargs):
self.name = path.split('/')[-1]
return self
def json(self):
return {"generation": self.generation,
"name": self.name}
class DummyTicketResponse(object):
def __init__(self, signature, metadata, ticket):
self.signature = signature
self.metadata = metadata
self.ticket = ticket
def request(self, path, method, **kwargs):
return self
def json(self):
return {"signature": self.signature,
"metadata": self.metadata,
"ticket": self.ticket}
class DummyGroupResponse(object):
def __init__(self, name):
self.name = name
def request(self, path, method, **kwargs):
return self
def json(self):
return {"name": self.name}
class DummyGroupKeyResponse(object):
def __init__(self, signature, metadata, group_key):
self.signature = signature
self.metadata = metadata
self.group_key = group_key
def request(self, path, method, **kwargs):
return self
def json(self):
return {"signature": self.signature,
"metadata": self.metadata,
"group_key": self.group_key}
|
jamielennox/python-kiteclient
|
kiteclient/tests/v1/utils.py
|
Python
|
apache-2.0
| 1,915 | 0 |
#! /usr/bin/env python
from sys import argv
script, q1, q2, k = argv
fw = open('c.txt', 'w+')
for docid in range(1,192):
filename = 'data/' + str(docid) + '.txt'
fr = open(filename)
string = fr.read()
pp1 = []
pp2 = []
l = []
position = 0
for token in string.split():
if token == q1:
pp1.append(position)
if token == q2:
pp2.append(position)
position += 1
for i in pp1:
for j in pp2:
if abs(i - j) <= int(k):
l.append(j)
elif j > i:
break
while l and abs(l[0] - i) > int(k):
l.pop(0)
prev_ps = -1
for ps in l:
if ps != prev_ps:
fw.write(str(docid) + ' ' + str(i) + ' ' + str(ps) + '\n')
prev_ps = ps
|
fortesit/search-engine
|
posting-list-search-k-distanced-words.py
|
Python
|
mit
| 663 | 0.045249 |
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
''' Post installation script for linux '''
import sys, os, cPickle, textwrap, stat, errno
from subprocess import check_call, check_output
from functools import partial
from calibre import __appname__, prints, guess_type
from calibre.constants import islinux, isbsd
from calibre.customize.ui import all_input_formats
from calibre.ptempfile import TemporaryDirectory
from calibre import CurrentDir
entry_points = {
'console_scripts': [
'ebook-device = calibre.devices.cli:main',
'ebook-meta = calibre.ebooks.metadata.cli:main',
'ebook-convert = calibre.ebooks.conversion.cli:main',
'ebook-polish = calibre.ebooks.oeb.polish.main:main',
'markdown-calibre = calibre.ebooks.markdown.__main__:run',
'web2disk = calibre.web.fetch.simple:main',
'calibre-server = calibre.srv.standalone:main',
'lrf2lrs = calibre.ebooks.lrf.lrfparser:main',
'lrs2lrf = calibre.ebooks.lrf.lrs.convert_from:main',
'calibre-debug = calibre.debug:main',
'calibredb = calibre.db.cli.main:main',
'calibre-parallel = calibre.utils.ipc.worker:main',
'calibre-customize = calibre.customize.ui:main',
'calibre-complete = calibre.utils.complete:main',
'fetch-ebook-metadata = calibre.ebooks.metadata.sources.cli:main',
'calibre-smtp = calibre.utils.smtp:main',
],
'gui_scripts' : [
__appname__+' = calibre.gui_launch:calibre',
'lrfviewer = calibre.gui2.lrf_renderer.main:main',
'ebook-viewer = calibre.gui_launch:ebook_viewer',
'ebook-edit = calibre.gui_launch:ebook_edit',
],
}
class PreserveMIMEDefaults(object):
def __init__(self):
self.initial_values = {}
def __enter__(self):
def_data_dirs = '/usr/local/share:/usr/share'
paths = os.environ.get('XDG_DATA_DIRS', def_data_dirs)
paths = paths.split(':')
paths.append(os.environ.get('XDG_DATA_HOME', os.path.expanduser(
'~/.local/share')))
paths = list(filter(os.path.isdir, paths))
if not paths:
# Env var had garbage in it, ignore it
paths = def_data_dirs.split(':')
paths = list(filter(os.path.isdir, paths))
self.paths = {os.path.join(x, 'applications/defaults.list') for x in
paths}
self.initial_values = {}
for x in self.paths:
try:
with open(x, 'rb') as f:
self.initial_values[x] = f.read()
except:
self.initial_values[x] = None
def __exit__(self, *args):
for path, val in self.initial_values.iteritems():
if val is None:
try:
os.remove(path)
except:
pass
elif os.path.exists(path):
try:
with open(path, 'r+b') as f:
if f.read() != val:
f.seek(0)
f.truncate()
f.write(val)
except EnvironmentError as e:
if e.errno != errno.EACCES:
raise
# Uninstall script {{{
UNINSTALL = '''\
#!{python}
from __future__ import print_function, unicode_literals
euid = {euid}
import os, subprocess, shutil
try:
raw_input
except NameError:
raw_input = input
if os.geteuid() != euid:
print ('The installer was last run as user id:', euid, 'To remove all files you must run the uninstaller as the same user')
if raw_input('Proceed anyway? [y/n]:').lower() != 'y':
raise SystemExit(1)
frozen_path = {frozen_path!r}
if not frozen_path or not os.path.exists(os.path.join(frozen_path, 'resources', 'calibre-mimetypes.xml')):
frozen_path = None
for f in {mime_resources!r}:
cmd = ['xdg-mime', 'uninstall', f]
print ('Removing mime resource:', os.path.basename(f))
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove mime resource', f)
for x in tuple({manifest!r}) + tuple({appdata_resources!r}) + (os.path.abspath(__file__), __file__, frozen_path):
if not x or not os.path.exists(x):
continue
print ('Removing', x)
try:
if os.path.isdir(x):
shutil.rmtree(x)
else:
os.unlink(x)
except Exception as e:
print ('Failed to delete', x)
print ('\t', e)
icr = {icon_resources!r}
mimetype_icons = []
def remove_icon(context, name, size, update=False):
cmd = ['xdg-icon-resource', 'uninstall', '--context', context, '--size', size, name]
if not update:
cmd.insert(2, '--noupdate')
print ('Removing icon:', name, 'from context:', context, 'at size:', size)
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove icon', name)
for i, (context, name, size) in enumerate(icr):
if context == 'mimetypes':
mimetype_icons.append((name, size))
continue
remove_icon(context, name, size, update=i == len(icr) - 1)
mr = {menu_resources!r}
for f in mr:
cmd = ['xdg-desktop-menu', 'uninstall', f]
print ('Removing desktop file:', f)
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove menu item', f)
print ()
if mimetype_icons and raw_input('Remove the e-book format icons? [y/n]:').lower() in ['', 'y']:
for i, (name, size) in enumerate(mimetype_icons):
remove_icon('mimetypes', name, size, update=i == len(mimetype_icons) - 1)
'''
# }}}
# Completion {{{
class ZshCompleter(object): # {{{
def __init__(self, opts):
self.opts = opts
self.dest = None
base = os.path.dirname(self.opts.staging_sharedir)
self.detect_zsh(base)
if not self.dest and base == '/usr/share':
# Ubuntu puts site-functions in /usr/local/share
self.detect_zsh('/usr/local/share')
self.commands = {}
def detect_zsh(self, base):
for x in ('vendor-completions', 'vendor-functions', 'site-functions'):
c = os.path.join(base, 'zsh', x)
if os.path.isdir(c) and os.access(c, os.W_OK):
self.dest = os.path.join(c, '_calibre')
break
def get_options(self, parser, cover_opts=('--cover',), opf_opts=('--opf',),
file_map={}):
if hasattr(parser, 'option_list'):
options = parser.option_list
for group in parser.option_groups:
options += group.option_list
else:
options = parser
for opt in options:
lo, so = opt._long_opts, opt._short_opts
if opt.takes_value():
lo = [x+'=' for x in lo]
so = [x+'+' for x in so]
ostrings = lo + so
ostrings = u'{%s}'%','.join(ostrings) if len(ostrings) > 1 else ostrings[0]
exclude = u''
if opt.dest is None:
exclude = u"'(- *)'"
h = opt.help or ''
h = h.replace('"', "'").replace('[', '(').replace(
']', ')').replace('\n', ' ').replace(':', '\\:').replace('`', "'")
h = h.replace('%default', type(u'')(opt.default))
arg = ''
if opt.takes_value():
arg = ':"%s":'%h
if opt.dest in {'extract_to', 'debug_pipeline', 'to_dir', 'outbox', 'with_library', 'library_path'}:
arg += "'_path_files -/'"
elif opt.choices:
arg += "(%s)"%'|'.join(opt.choices)
elif set(file_map).intersection(set(opt._long_opts)):
k = set(file_map).intersection(set(opt._long_opts))
exts = file_map[tuple(k)[0]]
if exts:
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
tuple(exts) + tuple(x.upper() for x in exts)))
else:
arg += "_files"
elif (opt.dest in {'pidfile', 'attachment'}):
arg += "_files"
elif set(opf_opts).intersection(set(opt._long_opts)):
arg += "'_files -g \"*.opf\"'"
elif set(cover_opts).intersection(set(opt._long_opts)):
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
tuple(pics) + tuple(x.upper() for x in pics)))
help_txt = u'"[%s]"'%h
yield u'%s%s%s%s '%(exclude, ostrings, help_txt, arg)
def opts_and_exts(self, name, op, exts, cover_opts=('--cover',),
opf_opts=('--opf',), file_map={}):
if not self.dest:
return
exts = sorted({x.lower() for x in exts})
extra = ('''"*:filename:_files -g '(#i)*.(%s)'" ''' % '|'.join(exts),)
opts = '\\\n '.join(tuple(self.get_options(
op(), cover_opts=cover_opts, opf_opts=opf_opts, file_map=file_map)) + extra)
txt = '_arguments -s \\\n ' + opts
self.commands[name] = txt
def opts_and_words(self, name, op, words, takes_files=False):
if not self.dest:
return
extra = ("'*:filename:_files' ",) if takes_files else ()
opts = '\\\n '.join(tuple(self.get_options(op())) + extra)
txt = '_arguments -s \\\n ' + opts
self.commands[name] = txt
def do_ebook_convert(self, f):
from calibre.ebooks.conversion.plumber import supported_input_formats
from calibre.web.feeds.recipes.collection import get_builtin_recipe_titles
from calibre.customize.ui import available_output_formats
from calibre.ebooks.conversion.cli import create_option_parser, group_titles
from calibre.utils.logging import DevNull
input_fmts = set(supported_input_formats())
output_fmts = set(available_output_formats())
iexts = {x.upper() for x in input_fmts}.union(input_fmts)
oexts = {x.upper() for x in output_fmts}.union(output_fmts)
w = lambda x: f.write(x if isinstance(x, bytes) else x.encode('utf-8'))
# Arg 1
w('\n_ebc_input_args() {')
w('\n local extras; extras=(')
w('\n {-h,--help}":Show Help"')
w('\n "--version:Show program version"')
w('\n "--list-recipes:List builtin recipe names"')
for recipe in sorted(set(get_builtin_recipe_titles())):
recipe = recipe.replace(':', '\\:').replace('"', '\\"')
w(u'\n "%s.recipe"'%(recipe))
w('\n ); _describe -t recipes "ebook-convert builtin recipes" extras')
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in iexts)))
w('\n}\n')
# Arg 2
w('\n_ebc_output_args() {')
w('\n local extras; extras=(')
for x in output_fmts:
w('\n ".{0}:Convert to a .{0} file with the same name as the input file"'.format(x))
w('\n ); _describe -t output "ebook-convert output" extras')
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in oexts)))
w('\n _path_files -/')
w('\n}\n')
log = DevNull()
def get_parser(input_fmt='epub', output_fmt=None):
of = ('dummy2.'+output_fmt) if output_fmt else 'dummy'
return create_option_parser(('ec', 'dummy1.'+input_fmt, of, '-h'), log)[0]
# Common options
input_group, output_group = group_titles()
p = get_parser()
opts = p.option_list
for group in p.option_groups:
if group.title not in {input_group, output_group}:
opts += group.option_list
opts.append(p.get_option('--pretty-print'))
opts.append(p.get_option('--input-encoding'))
opts = '\\\n '.join(tuple(
self.get_options(opts, file_map={'--search-replace':()})))
w('\n_ebc_common_opts() {')
w('\n _arguments -s \\\n ' + opts)
w('\n}\n')
# Input/Output format options
for fmts, group_title, func in (
(input_fmts, input_group, '_ebc_input_opts_%s'),
(output_fmts, output_group, '_ebc_output_opts_%s'),
):
for fmt in fmts:
is_input = group_title == input_group
if is_input and fmt in {'rar', 'zip', 'oebzip'}:
continue
p = (get_parser(input_fmt=fmt) if is_input
else get_parser(output_fmt=fmt))
opts = None
for group in p.option_groups:
if group.title == group_title:
opts = [o for o in group.option_list if
'--pretty-print' not in o._long_opts and
'--input-encoding' not in o._long_opts]
if not opts:
continue
opts = '\\\n '.join(tuple(self.get_options(opts)))
w('\n%s() {'%(func%fmt))
w('\n _arguments -s \\\n ' + opts)
w('\n}\n')
w('\n_ebook_convert() {')
w('\n local iarg oarg context state_descr state line\n typeset -A opt_args\n local ret=1')
w("\n _arguments '1: :_ebc_input_args' '*::ebook-convert output:->args' && ret=0")
w("\n case $state in \n (args)")
w('\n iarg=${line[1]##*.}; ')
w("\n _arguments '1: :_ebc_output_args' '*::ebook-convert options:->args' && ret=0")
w("\n case $state in \n (args)")
w('\n oarg=${line[1]##*.}')
w('\n iarg="_ebc_input_opts_${(L)iarg}"; oarg="_ebc_output_opts_${(L)oarg}"')
w('\n _call_function - $iarg; _call_function - $oarg; _ebc_common_opts; ret=0')
w('\n ;;\n esac')
w("\n ;;\n esac\n return ret")
w('\n}\n')
def do_ebook_edit(self, f):
from calibre.ebooks.oeb.polish.main import SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
from calibre.gui2.tweak_book.main import option_parser
tweakable_fmts = SUPPORTED | IMPORTABLE
parser = option_parser()
opt_lines = []
for opt in parser.option_list:
lo, so = opt._long_opts, opt._short_opts
if opt.takes_value():
lo = [x+'=' for x in lo]
so = [x+'+' for x in so]
ostrings = lo + so
ostrings = u'{%s}'%','.join(ostrings) if len(ostrings) > 1 else '"%s"'%ostrings[0]
h = opt.help or ''
h = h.replace('"', "'").replace('[', '(').replace(
']', ')').replace('\n', ' ').replace(':', '\\:').replace('`', "'")
h = h.replace('%default', type(u'')(opt.default))
help_txt = u'"[%s]"'%h
opt_lines.append(ostrings + help_txt + ' \\')
opt_lines = ('\n' + (' ' * 8)).join(opt_lines)
f.write((ur'''
_ebook_edit() {
local curcontext="$curcontext" state line ebookfile expl
typeset -A opt_args
_arguments -C -s \
%s
"1:ebook file:_files -g '(#i)*.(%s)'" \
'*:file in ebook:->files' && return 0
case $state in
files)
ebookfile=${~${(Q)line[1]}}
if [[ -f "$ebookfile" && "$ebookfile" =~ '\.[eE][pP][uU][bB]$' ]]; then
_zip_cache_name="$ebookfile"
_zip_cache_list=( ${(f)"$(zipinfo -1 $_zip_cache_name 2>/dev/null)"} )
else
return 1
fi
_wanted files expl 'file from ebook' \
_multi_parts / _zip_cache_list && return 0
;;
esac
return 1
}
''' % (opt_lines, '|'.join(tweakable_fmts)) + '\n\n').encode('utf-8'))
def do_calibredb(self, f):
from calibre.db.cli.main import COMMANDS, option_parser_for
from calibre.customize.ui import available_catalog_formats
parsers, descs = {}, {}
for command in COMMANDS:
p = option_parser_for(command)()
parsers[command] = p
lines = [x.strip().partition('.')[0] for x in p.usage.splitlines() if x.strip() and
not x.strip().startswith('%prog')]
descs[command] = lines[0]
f.write('\n_calibredb_cmds() {\n local commands; commands=(\n')
f.write(' {-h,--help}":Show help"\n')
f.write(' "--version:Show version"\n')
for command, desc in descs.iteritems():
f.write(' "%s:%s"\n'%(
command, desc.replace(':', '\\:').replace('"', '\'')))
f.write(' )\n _describe -t commands "calibredb command" commands \n}\n')
subcommands = []
for command, parser in parsers.iteritems():
exts = []
if command == 'catalog':
exts = [x.lower() for x in available_catalog_formats()]
elif command == 'set_metadata':
exts = ['opf']
exts = set(exts).union(x.upper() for x in exts)
pats = ('*.%s'%x for x in exts)
extra = ("'*:filename:_files -g \"%s\"' "%' '.join(pats),) if exts else ()
if command in {'add', 'add_format'}:
extra = ("'*:filename:_files' ",)
opts = '\\\n '.join(tuple(self.get_options(
parser)) + extra)
txt = ' _arguments -s \\\n ' + opts
subcommands.append('(%s)'%command)
subcommands.append(txt)
subcommands.append(';;')
f.write('\n_calibredb() {')
f.write((
r'''
local state line state_descr context
typeset -A opt_args
local ret=1
_arguments \
'1: :_calibredb_cmds' \
'*::calibredb subcommand options:->args' \
&& ret=0
case $state in
(args)
case $line[1] in
(-h|--help|--version)
_message 'no more arguments' && ret=0
;;
%s
esac
;;
esac
return ret
'''%'\n '.join(subcommands)).encode('utf-8'))
f.write('\n}\n\n')
def write(self):
if self.dest:
for c in ('calibredb', 'ebook-convert', 'ebook-edit'):
self.commands[c] = ' _%s "$@"' % c.replace('-', '_')
with open(self.dest, 'wb') as f:
f.write('#compdef ' + ' '.join(self.commands)+'\n')
self.do_ebook_convert(f)
self.do_calibredb(f)
self.do_ebook_edit(f)
f.write('case $service in\n')
for c, txt in self.commands.iteritems():
if isinstance(txt, type(u'')):
txt = txt.encode('utf-8')
if isinstance(c, type(u'')):
c = c.encode('utf-8')
f.write(b'%s)\n%s\n;;\n'%(c, txt))
f.write('esac\n')
# }}}
def get_bash_completion_path(root, share, info):
if root == '/usr':
# Try to get the system bash completion dir since we are installing to
# /usr
try:
path = check_output('pkg-config --variable=completionsdir bash-completion'.split()).strip().partition(os.pathsep)[0]
except Exception:
info('Failed to find directory to install bash completions, using default.')
path = '/usr/share/bash-completion/completions'
if path and os.path.exists(path) and os.path.isdir(path):
return os.path.join(path, 'calibre')
else:
# Use the default bash-completion dir under staging_share
return os.path.join(share, 'bash-completion', 'completions', 'calibre')
def write_completion(bash_comp_dest, zsh):
from calibre.ebooks.metadata.cli import option_parser as metaop, filetypes as meta_filetypes
from calibre.ebooks.lrf.lrfparser import option_parser as lrf2lrsop
from calibre.gui2.lrf_renderer.main import option_parser as lrfviewerop
from calibre.gui2.viewer.main import option_parser as viewer_op
from calibre.gui2.tweak_book.main import option_parser as tweak_op
from calibre.ebooks.metadata.sources.cli import option_parser as fem_op
from calibre.gui2.main import option_parser as guiop
from calibre.utils.smtp import option_parser as smtp_op
from calibre.srv.standalone import create_option_parser as serv_op
from calibre.ebooks.oeb.polish.main import option_parser as polish_op, SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
from calibre.debug import option_parser as debug_op
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.customize.ui import available_input_formats
input_formats = sorted(all_input_formats())
tweak_formats = sorted(x.lower() for x in SUPPORTED|IMPORTABLE)
if bash_comp_dest and not os.path.exists(os.path.dirname(bash_comp_dest)):
os.makedirs(os.path.dirname(bash_comp_dest))
complete = 'calibre-complete'
if getattr(sys, 'frozen_path', None):
complete = os.path.join(getattr(sys, 'frozen_path'), complete)
with open(bash_comp_dest or os.devnull, 'wb') as f:
def o_and_e(*args, **kwargs):
f.write(opts_and_exts(*args, **kwargs))
zsh.opts_and_exts(*args, **kwargs)
def o_and_w(*args, **kwargs):
f.write(opts_and_words(*args, **kwargs))
zsh.opts_and_words(*args, **kwargs)
f.write('# calibre Bash Shell Completion\n')
o_and_e('calibre', guiop, BOOK_EXTENSIONS)
o_and_e('lrf2lrs', lrf2lrsop, ['lrf'], file_map={'--output':['lrs']})
o_and_e('ebook-meta', metaop,
list(meta_filetypes()), cover_opts=['--cover', '-c'],
opf_opts=['--to-opf', '--from-opf'])
o_and_e('ebook-polish', polish_op,
[x.lower() for x in SUPPORTED], cover_opts=['--cover', '-c'],
opf_opts=['--opf', '-o'])
o_and_e('lrfviewer', lrfviewerop, ['lrf'])
o_and_e('ebook-viewer', viewer_op, input_formats)
o_and_e('ebook-edit', tweak_op, tweak_formats)
o_and_w('fetch-ebook-metadata', fem_op, [])
o_and_w('calibre-smtp', smtp_op, [])
o_and_w('calibre-server', serv_op, [])
o_and_e('calibre-debug', debug_op, ['py', 'recipe', 'mobi', 'azw', 'azw3', 'docx'], file_map={
'--tweak-book':['epub', 'azw3', 'mobi'],
'--subset-font':['ttf', 'otf'],
'--exec-file':['py', 'recipe'],
'--add-simple-plugin':['py'],
'--inspect-mobi':['mobi', 'azw', 'azw3'],
'--viewer':sorted(available_input_formats()),
})
f.write(textwrap.dedent('''
_ebook_device_ls()
{
local pattern search listing prefix
pattern="$1"
search="$1"
if [[ -n "{$pattern}" ]]; then
if [[ "${pattern:(-1)}" == "/" ]]; then
pattern=""
else
pattern="$(basename ${pattern} 2> /dev/null)"
search="$(dirname ${search} 2> /dev/null)"
fi
fi
if [[ "x${search}" == "x" || "x${search}" == "x." ]]; then
search="/"
fi
listing="$(ebook-device ls ${search} 2>/dev/null)"
prefix="${search}"
if [[ "x${prefix:(-1)}" != "x/" ]]; then
prefix="${prefix}/"
fi
echo $(compgen -P "${prefix}" -W "${listing}" "${pattern}")
}
_ebook_device()
{
local cur prev
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
COMPREPLY=()
case "${prev}" in
ls|rm|mkdir|touch|cat )
COMPREPLY=( $(_ebook_device_ls "${cur}") )
return 0
;;
cp )
if [[ ${cur} == dev:* ]]; then
COMPREPLY=( $(_ebook_device_ls "${cur:7}") )
return 0
else
_filedir
return 0
fi
;;
dev )
COMPREPLY=( $(compgen -W "cp ls rm mkdir touch cat info books df" "${cur}") )
return 0
;;
* )
if [[ ${cur} == dev:* ]]; then
COMPREPLY=( $(_ebook_device_ls "${cur:7}") )
return 0
else
if [[ ${prev} == dev:* ]]; then
_filedir
return 0
else
COMPREPLY=( $(compgen -W "dev:" "${cur}") )
return 0
fi
return 0
fi
;;
esac
}
complete -o nospace -F _ebook_device ebook-device
complete -o nospace -C %s ebook-convert
''')%complete)
zsh.write()
# }}}
class PostInstall:
def task_failed(self, msg):
self.warn(msg, 'with error:')
import traceback
tb = '\n\t'.join(traceback.format_exc().splitlines())
self.info('\t'+tb)
print
def warning(self, *args, **kwargs):
print '\n'+'_'*20, 'WARNING','_'*20
prints(*args, **kwargs)
print '_'*50
print ('\n')
self.warnings.append((args, kwargs))
sys.stdout.flush()
def __init__(self, opts, info=prints, warn=None, manifest=None):
self.opts = opts
self.info = info
self.warn = warn
self.warnings = []
if self.warn is None:
self.warn = self.warning
if not self.opts.staging_bindir:
self.opts.staging_bindir = os.path.join(self.opts.staging_root,
'bin')
if not self.opts.staging_sharedir:
self.opts.staging_sharedir = os.path.join(self.opts.staging_root,
'share', 'calibre')
self.opts.staging_etc = '/etc' if self.opts.staging_root == '/usr' else \
os.path.join(self.opts.staging_root, 'etc')
scripts = cPickle.loads(P('scripts.pickle', data=True))
self.manifest = manifest or []
if getattr(sys, 'frozen_path', False):
if os.access(self.opts.staging_bindir, os.W_OK):
self.info('Creating symlinks...')
for exe in scripts.keys():
dest = os.path.join(self.opts.staging_bindir, exe)
if os.path.lexists(dest):
os.unlink(dest)
tgt = os.path.join(getattr(sys, 'frozen_path'), exe)
self.info('\tSymlinking %s to %s'%(tgt, dest))
os.symlink(tgt, dest)
self.manifest.append(dest)
else:
self.warning(textwrap.fill(
'No permission to write to %s, not creating program launch symlinks,'
' you should ensure that %s is in your PATH or create the symlinks yourself' % (
self.opts.staging_bindir, getattr(sys, 'frozen_path', 'the calibre installation directory'))))
self.icon_resources = []
self.menu_resources = []
self.mime_resources = []
self.appdata_resources = []
if islinux or isbsd:
self.setup_completion()
if islinux or isbsd:
self.setup_desktop_integration()
self.create_uninstaller()
from calibre.utils.config import config_dir
if os.path.exists(config_dir):
os.chdir(config_dir)
if islinux or isbsd:
for f in os.listdir('.'):
if os.stat(f).st_uid == 0:
import shutil
shutil.rmtree(f) if os.path.isdir(f) else os.unlink(f)
if os.stat(config_dir).st_uid == 0:
os.rmdir(config_dir)
if warn is None and self.warnings:
self.info('\n\nThere were %d warnings\n'%len(self.warnings))
for args, kwargs in self.warnings:
self.info('*', *args, **kwargs)
print
def create_uninstaller(self):
base = self.opts.staging_bindir
if not os.access(base, os.W_OK) and getattr(sys, 'frozen_path', False):
base = sys.frozen_path
dest = os.path.join(base, 'calibre-uninstall')
self.info('Creating un-installer:', dest)
raw = UNINSTALL.format(
python='/usr/bin/python', euid=os.geteuid(),
manifest=self.manifest, icon_resources=self.icon_resources,
mime_resources=self.mime_resources, menu_resources=self.menu_resources,
appdata_resources=self.appdata_resources, frozen_path=getattr(sys, 'frozen_path', None))
try:
with open(dest, 'wb') as f:
f.write(raw)
os.chmod(dest, stat.S_IRWXU|stat.S_IRGRP|stat.S_IROTH)
if os.geteuid() == 0:
os.chown(dest, 0, 0)
except:
if self.opts.fatal_errors:
raise
self.task_failed('Creating uninstaller failed')
def setup_completion(self): # {{{
try:
self.info('Setting up command-line completion...')
zsh = ZshCompleter(self.opts)
if zsh.dest:
self.info('Installing zsh completion to:', zsh.dest)
self.manifest.append(zsh.dest)
bash_comp_dest = get_bash_completion_path(self.opts.staging_root, os.path.dirname(self.opts.staging_sharedir), self.info)
if bash_comp_dest is not None:
self.info('Installing bash completion to:', bash_comp_dest)
self.manifest.append(bash_comp_dest)
write_completion(bash_comp_dest, zsh)
except TypeError as err:
if 'resolve_entities' in str(err):
print 'You need python-lxml >= 2.0.5 for calibre'
sys.exit(1)
raise
except EnvironmentError as e:
if e.errno == errno.EACCES:
self.warning('Failed to setup completion, permission denied')
if self.opts.fatal_errors:
raise
self.task_failed('Setting up completion failed')
except:
if self.opts.fatal_errors:
raise
self.task_failed('Setting up completion failed')
# }}}
def setup_desktop_integration(self): # {{{
try:
self.info('Setting up desktop integration...')
env = os.environ.copy()
cc = check_call
if getattr(sys, 'frozen_path', False) and 'LD_LIBRARY_PATH' in env:
paths = env.get('LD_LIBRARY_PATH', '').split(os.pathsep)
paths = [x for x in paths if x]
npaths = [x for x in paths if x != sys.frozen_path+'/lib']
env['LD_LIBRARY_PATH'] = os.pathsep.join(npaths)
cc = partial(check_call, env=env)
with TemporaryDirectory() as tdir, CurrentDir(tdir), PreserveMIMEDefaults():
def install_single_icon(iconsrc, basename, size, context, is_last_icon=False):
filename = '%s-%s.png' % (basename, size)
render_img(iconsrc, filename, width=int(size), height=int(size))
cmd = ['xdg-icon-resource', 'install', '--noupdate', '--context', context, '--size', str(size), filename, basename]
if is_last_icon:
del cmd[2]
cc(cmd)
self.icon_resources.append((context, basename, str(size)))
def install_icons(iconsrc, basename, context, is_last_icon=False):
sizes = (16, 32, 48, 64, 128, 256)
for size in sizes:
install_single_icon(iconsrc, basename, size, context, is_last_icon and size is sizes[-1])
icons = filter(None, [x.strip() for x in '''\
mimetypes/lrf.png application-lrf mimetypes
mimetypes/lrf.png text-lrs mimetypes
mimetypes/mobi.png application-x-mobipocket-ebook mimetypes
mimetypes/tpz.png application-x-topaz-ebook mimetypes
mimetypes/azw2.png application-x-kindle-application mimetypes
mimetypes/azw3.png application-x-mobi8-ebook mimetypes
lt.png calibre-gui apps
viewer.png calibre-viewer apps
tweak.png calibre-ebook-edit apps
'''.splitlines()])
for line in icons:
iconsrc, basename, context = line.split()
install_icons(iconsrc, basename, context, is_last_icon=line is icons[-1])
mimetypes = set()
for x in all_input_formats():
mt = guess_type('dummy.'+x)[0]
if mt and 'chemical' not in mt and 'ctc-posml' not in mt:
mimetypes.add(mt)
mimetypes.discard('application/octet-stream')
def write_mimetypes(f):
f.write('MimeType=%s;\n'%';'.join(mimetypes))
from calibre.ebooks.oeb.polish.main import SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
f = open('calibre-lrfviewer.desktop', 'wb')
f.write(VIEWER)
f.close()
f = open('calibre-ebook-viewer.desktop', 'wb')
f.write(EVIEWER)
write_mimetypes(f)
f = open('calibre-ebook-edit.desktop', 'wb')
f.write(ETWEAK)
mt = {guess_type('a.' + x.lower())[0] for x in (SUPPORTED|IMPORTABLE)} - {None, 'application/octet-stream'}
f.write('MimeType=%s;\n'%';'.join(mt))
f.close()
f = open('calibre-gui.desktop', 'wb')
f.write(GUI)
write_mimetypes(f)
f.close()
des = ('calibre-gui.desktop', 'calibre-lrfviewer.desktop',
'calibre-ebook-viewer.desktop', 'calibre-ebook-edit.desktop')
appdata = os.path.join(os.path.dirname(self.opts.staging_sharedir), 'metainfo')
if not os.path.exists(appdata):
try:
os.mkdir(appdata)
except:
self.warning('Failed to create %s not installing appdata files' % appdata)
if os.path.exists(appdata) and not os.access(appdata, os.W_OK):
self.warning('Do not have write permissions for %s not installing appdata files' % appdata)
else:
from calibre.utils.localization import get_all_translators
translators = dict(get_all_translators())
APPDATA = get_appdata()
for x in des:
cmd = ['xdg-desktop-menu', 'install', '--noupdate', './'+x]
cc(' '.join(cmd), shell=True)
self.menu_resources.append(x)
ak = x.partition('.')[0]
if ak in APPDATA and os.access(appdata, os.W_OK):
self.appdata_resources.append(write_appdata(ak, APPDATA[ak], appdata, translators))
cc(['xdg-desktop-menu', 'forceupdate'])
MIME = P('calibre-mimetypes.xml')
self.mime_resources.append(MIME)
cc(['xdg-mime', 'install', MIME])
except Exception:
if self.opts.fatal_errors:
raise
self.task_failed('Setting up desktop integration failed')
# }}}
def option_parser():
from calibre.utils.config import OptionParser
parser = OptionParser()
parser.add_option('--make-errors-fatal', action='store_true', default=False,
dest='fatal_errors', help='If set die on errors.')
parser.add_option('--root', dest='staging_root', default='/usr',
help='Prefix under which to install files')
parser.add_option('--bindir', default=None, dest='staging_bindir',
help='Location where calibre launcher scripts were installed. Typically /usr/bin')
parser.add_option('--sharedir', default=None, dest='staging_sharedir',
help='Location where calibre resources were installed, typically /usr/share/calibre')
return parser
def options(option_parser):
parser = option_parser()
options = parser.option_list
for group in parser.option_groups:
options += group.option_list
opts = []
for opt in options:
opts.extend(opt._short_opts)
opts.extend(opt._long_opts)
return opts
def opts_and_words(name, op, words, takes_files=False):
opts = '|'.join(options(op))
words = '|'.join([w.replace("'", "\\'") for w in words])
fname = name.replace('-', '_')
return ('_'+fname+'()'+
'''
{
local cur opts
local IFS=$'|\\t'
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="%s"
words="%s"
case "${cur}" in
-* )
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
COMPREPLY=( $( echo ${COMPREPLY[@]} | sed 's/ /\\\\ /g' | tr '\\n' '\\t' ) )
return 0
;;
* )
COMPREPLY=( $(compgen -W "${words}" -- ${cur}) )
COMPREPLY=( $( echo ${COMPREPLY[@]} | sed 's/ /\\\\ /g' | tr '\\n' '\\t' ) )
return 0
;;
esac
}
complete -F _'''%(opts, words) + fname + ' ' + name +"\n\n").encode('utf-8')
pics = {'jpg', 'jpeg', 'gif', 'png', 'bmp'}
pics = list(sorted(pics)) # for reproducability
def opts_and_exts(name, op, exts, cover_opts=('--cover',), opf_opts=(),
file_map={}):
opts = ' '.join(options(op))
exts.extend([i.upper() for i in exts])
exts='|'.join(sorted(exts))
fname = name.replace('-', '_')
spics = pics + [i.upper() for i in pics]
spics = '|'.join(sorted(spics))
special_exts_template = '''\
%s )
_filedir %s
return 0
;;
'''
extras = []
for eopts, eexts in ((cover_opts, "${pics}"), (opf_opts, "'@(opf)'")):
for opt in eopts:
extras.append(special_exts_template%(opt, eexts))
extras = '\n'.join(extras)
return '_'+fname+'()'+\
'''
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="%(opts)s"
pics="@(%(pics)s)"
case "${prev}" in
%(extras)s
esac
case "${cur}" in
%(extras)s
-* )
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
* )
_filedir '@(%(exts)s)'
return 0
;;
esac
}
complete -o filenames -F _'''%dict(pics=spics,
opts=opts, extras=extras, exts=exts) + fname + ' ' + name +"\n\n"
VIEWER = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=LRF Viewer
GenericName=Viewer for LRF files
Comment=Viewer for LRF files (SONY ebook format files)
TryExec=lrfviewer
Exec=lrfviewer %f
Icon=calibre-viewer
MimeType=application/x-sony-bbeb;
Categories=Graphics;Viewer;
'''
EVIEWER = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=E-book Viewer
GenericName=Viewer for E-books
Comment=Viewer for E-books in all the major formats
TryExec=ebook-viewer
Exec=ebook-viewer --detach %f
Icon=calibre-viewer
Categories=Graphics;Viewer;
'''
ETWEAK = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=E-book Editor
GenericName=Editor for E-books
Comment=Edit E-books in various formats
TryExec=ebook-edit
Exec=ebook-edit --detach %f
Icon=calibre-ebook-edit
Categories=Office;
'''
GUI = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=calibre
GenericName=E-book library management
Comment=E-book library management: Convert, view, share, catalogue all your e-books
TryExec=calibre
Exec=calibre --detach %F
Icon=calibre-gui
Categories=Office;
'''
def get_appdata():
_ = lambda x: x # Make sure the text below is not translated, but is marked for translation
return {
'calibre-gui': {
'name':'calibre',
'summary':_('The one stop solution to all your e-book needs'),
'description':(
_('calibre is the one stop solution to all your e-book needs.'),
_('You can use calibre to catalog your books, fetch metadata for them automatically, convert them from and to all the various e-book formats, send them to your e-book reader devices, read the books on your computer, edit the books in a dedicated e-book editor and even make them available over the network with the built-in Content server. You can also download news and periodicals in e-book format from over a thousand different news and magazine websites.') # noqa
),
'screenshots':(
(1408, 792, 'https://lh4.googleusercontent.com/-bNE1hc_3pIc/UvHLwKPGBPI/AAAAAAAAASA/8oavs_c6xoU/w1408-h792-no/main-default.png',),
(1408, 792, 'https://lh4.googleusercontent.com/-Zu2httSKABE/UvHMYK30JJI/AAAAAAAAATg/dQTQUjBvV5s/w1408-h792-no/main-grid.png'),
(1408, 792, 'https://lh3.googleusercontent.com/-_trYUjU_BaY/UvHMYSdKhlI/AAAAAAAAATc/auPA3gyXc6o/w1408-h792-no/main-flow.png'),
),
},
'calibre-ebook-edit': {
'name':'calibre - E-book Editor',
'summary':_('Edit the text and styles inside e-books'),
'description':(
_('The calibre e-book editor allows you to edit the text and styles inside the book with a live preview of your changes.'),
_('It can edit books in both the EPUB and AZW3 (kindle) formats. It includes various useful tools for checking the book for errors, editing the Table of Contents, performing automated cleanups, etc.'), # noqa
),
'screenshots':(
(1408, 792, 'https://lh5.googleusercontent.com/-M2MAVc3A8e4/UvHMWqGRa8I/AAAAAAAAATA/cecQeWUYBVs/w1408-h792-no/edit-default.png',),
(1408, 792, 'https://lh4.googleusercontent.com/-WhoMxuRb34c/UvHMWqN8aGI/AAAAAAAAATI/8SDBYWXb7-8/w1408-h792-no/edit-check.png'),
(887, 575, 'https://lh6.googleusercontent.com/-KwaOwHabnBs/UvHMWidjyXI/AAAAAAAAAS8/H6xmCeLnSpk/w887-h575-no/edit-toc.png'),
),
},
'calibre-ebook-viewer': {
'name':'calibre - E-book Viewer',
'summary':_('Read e-books in over a dozen different formats'),
'description': (
_('The calibre E-book viewer allows you to read e-books in over a dozen different formats.'),
_('It has a full screen mode for distraction free reading and can display the text with multiple columns per screen.'),
),
'screenshots':(
(1408, 792, 'https://lh5.googleusercontent.com/-dzSO82BPpaE/UvHMYY5SpNI/AAAAAAAAATk/I_kF9fYWrZM/w1408-h792-no/viewer-default.png',),
(1920, 1080, 'https://lh6.googleusercontent.com/-n32Ae5RytAk/UvHMY0QD94I/AAAAAAAAATs/Zw8Yz08HIKk/w1920-h1080-no/viewer-fs.png'),
),
},
}
def write_appdata(key, entry, base, translators):
from lxml.etree import tostring
from lxml.builder import E
fpath = os.path.join(base, '%s.appdata.xml' % key)
screenshots = E.screenshots()
for w, h, url in entry['screenshots']:
s = E.screenshot(E.image(url, width=str(w), height=str(h)))
screenshots.append(s)
screenshots[0].set('type', 'default')
description = E.description()
for para in entry['description']:
description.append(E.p(para))
for lang, t in translators.iteritems():
tp = t.ugettext(para)
if tp != para:
description.append(E.p(tp))
description[-1].set('{http://www.w3.org/XML/1998/namespace}lang', lang)
root = E.component(
E.id(key + '.desktop'),
E.name(entry['name']),
E.metadata_license('CC0-1.0'),
E.project_license('GPL-3.0'),
E.summary(entry['summary']),
description,
E.url('https://calibre-ebook.com', type='homepage'),
screenshots,
type='desktop'
)
for lang, t in translators.iteritems():
tp = t.ugettext(entry['summary'])
if tp != entry['summary']:
root.append(E.summary(tp))
root[-1].set('{http://www.w3.org/XML/1998/namespace}lang', lang)
with open(fpath, 'wb') as f:
f.write(tostring(root, encoding='utf-8', xml_declaration=True, pretty_print=True))
return fpath
def render_img(image, dest, width=128, height=128):
from PyQt5.Qt import QImage, Qt
img = QImage(I(image)).scaled(width, height, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
img.save(dest)
def main():
p = option_parser()
opts, args = p.parse_args()
PostInstall(opts)
return 0
def cli_index_strings():
return _('Command Line Interface'), _(
'On macOS, the command line tools are inside the calibre bundle, for example,'
' if you installed calibre in :file:`/Applications` the command line tools'
' are in :file:`/Applications/calibre.app/Contents/console.app/Contents/MacOS/`.'), _(
'Documented commands'), _('Undocumented commands'), _(
'You can see usage for undocumented commands by executing them without arguments in a terminal.'), _(
'Change language'), _('Search')
if __name__ == '__main__':
sys.exit(main())
|
jelly/calibre
|
src/calibre/linux.py
|
Python
|
gpl-3.0
| 45,645 | 0.003856 |
# © 2016 Tecnativa - Vicent Cubells
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl-3.0).
from odoo.exceptions import UserError
from odoo.tests import common
class TestRecursion(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestRecursion, cls).setUpClass()
cls.department_obj = cls.env["res.partner.department"]
# Instances
cls.dpt1 = cls.department_obj.create({"name": "Dpt. 1"})
cls.dpt2 = cls.department_obj.create(
{"name": "Dep. 2", "parent_id": cls.dpt1.id}
)
def test_recursion(self):
""" Testing recursion """
self.dpt3 = self.department_obj.create(
{"name": "Dep. 3", "parent_id": self.dpt2.id}
)
# Creating a parent's child department using dpt1.
with self.assertRaises(UserError):
self.dpt1.write(vals={"parent_id": self.dpt3.id})
|
OCA/partner-contact
|
partner_contact_department/tests/test_recursion.py
|
Python
|
agpl-3.0
| 915 | 0 |
#
# The MIT License
#
# Copyright (c) 2009 Ben Morris
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Runkeeper Python API
The Runkeeper Python API is used to interact with
Runkeeper (http://runkeeper.com). Runkeeper does not provide an official API
so BeautifulSoup is used to scrape pages.
Here is the basic example of getting total distance for a user
from runkeeper import User
user = User("bnmrrs")
activities = user.get_all_activities()
total_distance = 0
for activity in activities:
total_distance += activity.get_distance()
print total_distance
"""
import urllib
def get(url):
"""Used to make very basic HTTP requests. Currently no error handling.
Takes a URL as it's only argument and returns the resulting page
"""
f = urllib.urlopen(url)
s = f.read()
f.close()
return s
|
bnmrrs/runkeeper-api
|
runkeeper/httpclient.py
|
Python
|
mit
| 1,887 | 0.00159 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
# Utility classes that can be used to generate parse tree patterns. These
# utilities take a sample expression or statement, and return a parse tree that
# uses symbolic names for the nodes. You'll need to then do additional editing on
# the parse tree as needed (for example, replacing a specific value with a pattern).
import parser
from symbol import sym_name
from token import tok_name
from pprint import pprint
# pretty-prints a symbolic parse tree for expr (as for use with 'eval')
# the symbolic names will be strings, so to use this as a constant
# in some code you'll need to replace the quotes with nothing
# (except for the actual string constants ...)
def print_eval_tree(expr):
t = parser.ast2tuple(parser.expr(expr))
# t = parser.ast2tuple(parser.suite(expr))
pprint(integer2symbolic(t))
# same as print_eval_tree, except as for use with 'exec' (for definitions, statements, etc)
def print_exec_tree(expr):
t = parser.ast2tuple(parser.suite(expr))
pprint(integer2symbolic(t))
# take a parse tree represented as a tuple, and return a new tuple
# where the integers representing internal nodes and terminal nodes are
# replaced with symbolic names
def integer2symbolic(fragment):
head = fragment[0]
if head in sym_name:
rest = tuple(map(integer2symbolic, fragment[1:]))
return ('symbol.' + sym_name[head], ) + rest
if head in tok_name:
return ('token.' + tok_name[head], ) + fragment[1:]
raise ValueError("bad value in parsetree")
# examples of use:
# print_eval_tree("urbansim.gridcell.population**2")
# print_exec_tree("x = urbansim.gridcell.population**2")
s = """def foo(x=5):
y = x+3
return y*2
"""
print_exec_tree(s)
|
apdjustino/DRCOG_Urbansim
|
src/opus_core/variables/utils/parse_tree_pattern_generator.py
|
Python
|
agpl-3.0
| 1,882 | 0.005845 |
#!/usr/bin/env python
from collections import namedtuple
Payload = namedtuple('Payload', ['iden', 'body', 'send_date', 'sender'])
class Handler(object):
@staticmethod
def config():
return
def __init__(self, logger):
self.logger = logger
def create_translator():
return
def create_listener(task):
return
def configure_modules(modules, push_config):
return
class Translator(object):
def get_recent():
return
def is_valid(message):
return
def get_module(message, modules):
return
def cleanup(message):
return
def to_payload(message):
return
def respond(message, response):
return
|
JimboMonkey1234/pushserver
|
handlers/Handler.py
|
Python
|
mit
| 725 | 0.002759 |
class StreamCache:
def __init__(self):
self.initiated = False
self.new_cache = []
self.old_cache = []
def push(self, streams):
assert isinstance(streams, list)
self.old_cache = self.new_cache
self.new_cache = streams
if not self.initiated:
self.initiated = True
def get_all(self):
return set(self.new_cache + self.old_cache)
def __contains__(self, stream):
return stream in self.new_cache or stream in self.old_cache
|
moopie/botologist
|
plugins/streams/cache.py
|
Python
|
mit
| 445 | 0.031461 |
#!/usr/bin/env python
""" Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues.
This module contains one function diagnose_car(). It is an expert system to
interactive diagnose car issues.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
"""
"""
# Interactively queries the user with yes/no questions to identify a possible issue with a car.
# Inputs: As is but not nested - same indentation all the way through
# Expected Outputs: To follow the decision logic of the question tree
# Errors: Did not proceed according to logic. fixed by nesting properly
"""
"""
def diagnose_car():
silent = raw_input("Is the car silent when you turn the key? ")
#this begins the line of questions on the left side of the question tree
if silent == 'Y':
corroded = raw_input("Are the battery terminals corroded?")
if corroded == 'Y':
print "Clean terminals and try starting again."
elif corroded == 'N':
print "Replace cables and try again."
elif silent == 'N':
#this begins the line of questions on the right side of the question tree
clicking = raw_input("Does the car make a clicking noise?")
if clicking == 'Y':
print "Replace the battery."
elif clicking == 'N':
crank = raw_input("Does the car crank up but fails to start?")
if crank == 'Y':
print "Check spark plug connections."
elif crank == 'N':
start_and_die = raw_input("Does the engine start and then die?")
if start_and_die == 'Y':
fuel_injection = raw_input("Does your car have fuel injection?")
if fuel_injection == 'N':
print "Check to ensure the choke is opening and closing."
elif fuel_injection == 'Y':
print "Get it in for service."
elif start_and_die == 'N':
print "Engine is not getting enough fuel. Clean fuel pump."
diagnose_car()
|
SLiana/inf1340_2015_asst1
|
exercise3.py
|
Python
|
mit
| 2,130 | 0.004695 |
import bpy
import os
# join them together ctrl+j
bpy.ops.object.join()
def get_override(area_type, region_type):
for area in bpy.context.screen.areas:
if area.type == area_type:
for region in area.regions:
if region.type == region_type:
override = {'area': area, 'region': region}
return override
#error message if the area or region wasn't found
raise RuntimeError("Wasn't able to find", region_type," in area ", area_type,
"\n Make sure it's open while executing script.")
#we need to override the context of our operator
override = get_override( 'VIEW_3D', 'WINDOW' )
#rotate about the X-axis by 45 degrees
bpy.ops.transform.rotate(override, axis=(0,0,1))
bpy.ops.transform.rotate(override, axis=(0,0,1))
blend_file_path = bpy.data.filepath
directory = os.path.dirname(blend_file_path)
#target_file = os.path.join(directory, 'agent.obj')
#target_file = os.path.join(directory, 'exported/agent.b3d')
target_file = os.path.join(directory, 'iron_golem.b3d')
#bpy.ops.export_scene.obj(filepath=target_file)
bpy.ops.screen.b3d_export(filepath=target_file)
#bpy.ops.export_scene.obj()
#bpy.ops.screen.b3d_export()
# exits blender
bpy.ops.wm.quit_blender()
|
22i/minecraft-voxel-blender-models
|
models/extra/blender-scripting/lib/iron_golem.py
|
Python
|
gpl-3.0
| 1,325 | 0.018113 |
import time
import appium
import selenium
from common.helper import *
from common.processes import *
capabilities = {
"androidDeviceSocket": "com.peerio_devtools_remote",
"chromeOptions": {
'androidPackage': 'com.peerio',
'androidActivity': '.MainActivity',
"androidDeviceSocket": "com.peerio_devtools_remote"
}
}
restartAppium()
restartChromedriver()
test_connect_android()
|
PeerioTechnologies/peerio-client-mobile
|
tests/test.py
|
Python
|
gpl-3.0
| 414 | 0.002415 |
#
# Utility stackables
#
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stackable import Stackable, StackableError
import json, pickle
from time import sleep
from threading import Thread, Event
from datetime import datetime, timedelta
class StackablePickler(Stackable):
'Pickle codec'
def process_input(self, data):
return pickle.loads(data)
def process_output(self, data):
return pickle.dumps(data, protocol=2)
class StackableJSON(Stackable):
'JSON codec'
def process_input(self, data):
try:
return json.loads(data)
except ValueError:
return None
def process_output(self, data):
return json.dumps(data)
class StackableWriter(Stackable):
'Reads and writes from/to a file'
def __init__(self, filename):
super(StackableWriter, self).__init__()
self.fd = open(filename, "w")
def process_input(self, data):
self.fd.write(data)
self.fd.flush()
def process_output(self, data):
return data
# def poll(self):
# return self.fd.read()
class StackablePrinter(Stackable):
'''Prints all input and output, and returns it unmodified.
Useful for quick debugging of Stackables.'''
def __init__(self, printer=print):
'Takes a printing function as argument - defaults to print'
self.printer = printer
super(StackablePrinter, self).__init__()
def process_input(self, data):
self.printer(data)
return data
def process_output(self, data):
self.printer(data)
return data
import sys
class StackableStdout(Stackable):
def process_input(self, data):
sys.stdout.write(data)
return data
def process_output(self, data):
return data
from collections import deque
class StackableInjector(Stackable):
def __init__(self):
super(StackableInjector, self).__init__()
self.in_buf = deque()
self.out_buf = deque()
def push(self, data):
self.in_buf.append(data)
def poll(self):
if len(self.in_buf):
return self.in_buf.popleft()
return None
def process_output(self, data):
self.out_buf.append(data)
return data
class StackablePoker(Stackable):
def __init__(self, interval=20, send=True, ping_string='__stack_ping', pong_string='__stack_pong'):
super(StackablePoker, self).__init__()
self.ping_string = ping_string.encode('utf-8')
self.pong_string = pong_string.encode('utf-8')
self.w = Event()
self.interval = interval
self.send = send
if self.send:
self.reset()
def _detach(self):
super(StackablePoker, self)._detach()
self.w.set()
def reset(self):
self.timestamp = datetime.now()
def ping():
self.w.wait(self.interval)
try:
self._feed(self.ping_string)
except:
pass
x = Thread(target=ping)
x.daemon = True
x.start()
def process_output(self, data):
if self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
def process_input(self, data):
if data == self.pong_string:
self.reset()
return None
elif data == self.ping_string:
self._feed(self.pong_string)
return None
elif self.send and (datetime.now() - self.timestamp) > timedelta(seconds=30):
raise StackableError('Pong not received')
return data
|
joushou/stackable
|
utils.py
|
Python
|
mit
| 3,175 | 0.033701 |
from django.urls import path, include
from .routers import router
from . import views
app_name = "milage"
urlpatterns = [
path("api/", include(router.urls), name="api_router"),
path("class-based/", views.ClassBasedView.as_view(), name="class_based_drf"),
path(
"class-based-detail/<int:pk>",
views.ClassBasedDetailView.as_view(),
name="class_detail",
),
path("", views.BaseView.as_view(), name="index"),
]
|
GarrettArm/TheDjangoBook
|
mysite_project/milage/urls.py
|
Python
|
gpl-3.0
| 452 | 0.002212 |
import os, extended_messages
from setuptools import setup, find_packages
if extended_messages.VERSION[-1] == 'final':
CLASSIFIERS = ['Development Status :: 5 - Stable']
elif 'beta' in extended_messages.VERSION[-1]:
CLASSIFIERS = ['Development Status :: 4 - Beta']
else:
CLASSIFIERS = ['Development Status :: 3 - Alpha']
CLASSIFIERS += [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
setup(
author = extended_messages.__maintainer__,
author_email = extended_messages.__email__,
name = 'django-extended-messages',
version = extended_messages.__version__,
description = 'Extended version of django.contrib.messages',
long_description = open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
url = 'http://github.com/Anber/django-extended-messages/tree/master',
license = 'BSD License',
platforms=['OS Independent'],
classifiers = CLASSIFIERS,
requires=[
'django (>1.2.0)',
'simplejson',
],
packages=find_packages(),
zip_safe=False
)
|
Anber/django-extended-messages
|
setup.py
|
Python
|
bsd-3-clause
| 1,190 | 0.017647 |
[
{
"name": "syoukuloader",
"status": "0"
},
{
"name": "syoukuplayer",
"status": "0"
},
{
"name": "sku6",
"status": "0"
},
{
"name": "studou",
"status": "0"
},
{
"name": "sletv",
"status": "0"
},
{
"name": "siqiyi",
"status": "0"
},
{
"name": "spps",
"status": "0"
},
{
"name": "ssohu",
"status": "0"
},
{
"name": "ssohu_live",
"status": "0"
}
]
|
kafan15536900/ADfree-Player-Offline
|
onServer/ruletool/oconfiglist.py
|
Python
|
gpl-3.0
| 562 | 0 |
from .item_status import * # noqa
|
vyos-legacy/vyconfd
|
vyconf/utils/__init__.py
|
Python
|
lgpl-2.1
| 35 | 0 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from wtforms.fields import StringField, BooleanField, SelectField
from wtforms.validators import DataRequired, ValidationError
from indico.core.db import db
from indico.modules.groups.models.groups import LocalGroup
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import PrincipalListField
class SearchForm(IndicoForm):
provider = SelectField(_('Provider'))
name = StringField(_('Group name'), [DataRequired()])
exact = BooleanField(_('Exact match'))
class EditGroupForm(IndicoForm):
name = StringField(_('Group name'), [DataRequired()])
members = PrincipalListField(_('Group members'))
def __init__(self, *args, **kwargs):
self.group = kwargs.pop('group', None)
super(EditGroupForm, self).__init__(*args, **kwargs)
def validate_name(self, field):
query = LocalGroup.find(db.func.lower(LocalGroup.name) == field.data.lower())
if self.group:
query = query.filter(LocalGroup.id != self.group.id)
if query.count():
raise ValidationError(_('A group with this name already exists.'))
|
belokop/indico_bare
|
indico/modules/groups/forms.py
|
Python
|
gpl-3.0
| 1,904 | 0.000525 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import datetime
import time
import logging
import traceback
from django.conf import settings
from django import forms
from django.forms.widgets import Textarea
from django.forms.widgets import HiddenInput
from django.core.urlresolvers import reverse
from omeroweb.custom_forms import NonASCIIForm
from custom_forms import UrlField, MetadataModelChoiceField, \
AnnotationModelMultipleChoiceField, \
ObjectModelMultipleChoiceField
from omeroweb.webadmin.custom_forms import ExperimenterModelChoiceField, \
ExperimenterModelMultipleChoiceField, \
GroupModelMultipleChoiceField, GroupModelChoiceField
logger = logging.getLogger(__name__)
##################################################################
# Static values
# TODO: change to reverse
help_button = "%swebgateway/img/help16.png" % settings.STATIC_URL
help_wiki = '<span id="markup" title="Markups - <small>If you\'d like to include URL please type:<br/><b>http://www.openmicroscopy.org.uk/</b></small>"><img src="%s" /></span>' % help_button
help_wiki_c = '<span id="markup_c" title="Markups - <small>If you\'d like to include URL please type:<br/><b>http://www.openmicroscopy.org.uk/</b></small>"><img src="%s" /></span>' % help_button
help_enable = '<span id="enable" title="Enable/Disable - <small>This option allows the owner to keep the access control of the share.</small>"><img src="%s" /></span>' % help_button
help_expire = '<span id="expire" title="Expire date - <small>This date defines when share will stop being available. Date format: YY-MM-DD.</small>"><img src="%s" /></span>' % help_button
#################################################################
# Non-model Form
class GlobalSearchForm(NonASCIIForm):
search_query = forms.CharField(widget=forms.TextInput(attrs={'size':25}))
class ShareForm(NonASCIIForm):
def __init__(self, *args, **kwargs):
super(ShareForm, self).__init__(*args, **kwargs)
try:
if kwargs['initial']['shareMembers']: pass
self.fields['members'] = ExperimenterModelMultipleChoiceField(queryset=kwargs['initial']['experimenters'], initial=kwargs['initial']['shareMembers'], widget=forms.SelectMultiple(attrs={'size':5}))
except:
self.fields['members'] = ExperimenterModelMultipleChoiceField(queryset=kwargs['initial']['experimenters'], widget=forms.SelectMultiple(attrs={'size':5}))
self.fields.keyOrder = ['message', 'expiration', 'enable', 'members']#, 'guests']
message = forms.CharField(widget=forms.Textarea(attrs={'rows': 7, 'cols': 39}), help_text=help_wiki_c)
expiration = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':20}), label="Expire date", help_text=help_expire, required=False)
enable = forms.CharField(widget=forms.CheckboxInput(attrs={'size':1}), required=False, help_text=help_enable)
#guests = MultiEmailField(required=False, widget=forms.TextInput(attrs={'size':75}))
def clean_expiration(self):
if self.cleaned_data['expiration'] is not None and len(self.cleaned_data['expiration']) < 1:
return None
if self.cleaned_data['expiration'] is not None:
d = str(self.cleaned_data['expiration']).rsplit("-")
try:
date = datetime.datetime.strptime(("%s-%s-%s" % (d[0],d[1],d[2])), "%Y-%m-%d")
except:
raise forms.ValidationError('Date is in the wrong format. YY-MM-DD')
if time.mktime(date.timetuple()) <= time.time():
raise forms.ValidationError('Expire date must be in the future.')
return self.cleaned_data['expiration']
class BasketShareForm(ShareForm):
def __init__(self, *args, **kwargs):
super(BasketShareForm, self).__init__(*args, **kwargs)
try:
self.fields['image'] = GroupModelMultipleChoiceField(queryset=kwargs['initial']['images'], initial=kwargs['initial']['selected'], widget=forms.SelectMultiple(attrs={'size':10}))
except:
self.fields['image'] = GroupModelMultipleChoiceField(queryset=kwargs['initial']['images'], widget=forms.SelectMultiple(attrs={'size':10}))
class ContainerForm(NonASCIIForm):
name = forms.CharField(max_length=250, widget=forms.TextInput(attrs={'size':45}))
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 2, 'cols': 49}), required=False, help_text=help_wiki)
class ContainerNameForm(NonASCIIForm):
name = forms.CharField(max_length=250, widget=forms.TextInput(attrs={'size':45}))
class ContainerDescriptionForm(NonASCIIForm):
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 3, 'cols': 39}), required=False)
class BaseAnnotationForm(NonASCIIForm):
"""
This is the superclass of the various forms used for annotating single or multiple objects.
All these forms use hidden fields to specify the object(s) currently being annotated.
"""
def __init__(self, *args, **kwargs):
super(BaseAnnotationForm, self).__init__(*args, **kwargs)
images = 'images' in kwargs['initial'] and kwargs['initial']['images'] or list()
if len(images) > 0:
try:
self.fields['image'] = ObjectModelMultipleChoiceField(queryset=images, initial=kwargs['initial']['selected']['images'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['image'] = ObjectModelMultipleChoiceField(queryset=images, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
datasets = 'datasets' in kwargs['initial'] and kwargs['initial']['datasets'] or list()
if len(datasets) > 0:
try:
self.fields['dataset'] = ObjectModelMultipleChoiceField(queryset=datasets, initial=kwargs['initial']['selected']['datasets'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['dataset'] = ObjectModelMultipleChoiceField(queryset=datasets, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
projects = 'projects' in kwargs['initial'] and kwargs['initial']['projects'] or list()
if len(projects) > 0:
try:
self.fields['project'] = ObjectModelMultipleChoiceField(queryset=projects, initial=kwargs['initial']['selected']['projects'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['project'] = ObjectModelMultipleChoiceField(queryset=projects, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
screens = 'screens' in kwargs['initial'] and kwargs['initial']['screens'] or list()
if len(screens) > 0:
try:
self.fields['screen'] = ObjectModelMultipleChoiceField(queryset=screens, initial=kwargs['initial']['selected']['screens'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['screen'] = ObjectModelMultipleChoiceField(queryset=screens, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
plates = 'plates' in kwargs['initial'] and kwargs['initial']['plates'] or list()
if len(plates) > 0:
try:
self.fields['plate'] = ObjectModelMultipleChoiceField(queryset=plates, initial=kwargs['initial']['selected']['plates'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['plate'] = ObjectModelMultipleChoiceField(queryset=plates, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
acquisitions = 'acquisitions' in kwargs['initial'] and kwargs['initial']['acquisitions'] or list()
if len(acquisitions) > 0:
try:
self.fields['acquisition'] = ObjectModelMultipleChoiceField(queryset=acquisitions, initial=kwargs['initial']['selected']['acquisitions'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['acquisition'] = ObjectModelMultipleChoiceField(queryset=acquisitions, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
wells = 'wells' in kwargs['initial'] and kwargs['initial']['wells'] or list()
if len(wells) > 0:
try:
self.fields['well'] = ObjectModelMultipleChoiceField(queryset=wells, initial=kwargs['initial']['selected']['wells'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['well'] = ObjectModelMultipleChoiceField(queryset=wells, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
shares = 'shares' in kwargs['initial'] and kwargs['initial']['shares'] or list()
if len(shares) > 0:
try:
self.fields['share'] = ObjectModelMultipleChoiceField(queryset=shares, initial=kwargs['initial']['selected']['shares'], widget=forms.SelectMultiple(attrs={'size':10}), required=False)
except:
self.fields['share'] = ObjectModelMultipleChoiceField(queryset=shares, widget=forms.SelectMultiple(attrs={'size':10}), required=False)
class TagsAnnotationForm(BaseAnnotationForm):
""" Form for annotating one or more objects with existing Tags or New tags """
def __init__(self, *args, **kwargs):
super(TagsAnnotationForm, self).__init__(*args, **kwargs)
self.fields['tags'] = AnnotationModelMultipleChoiceField(queryset=kwargs['initial']['tags'],
widget=forms.SelectMultiple(attrs={'size':6, 'class':'existing'}), required=False)
tag = forms.CharField(widget=forms.TextInput(attrs={'size':36}), required=False)
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 3, 'cols': 31}), required=False, label="Desc")
class FilesAnnotationForm(BaseAnnotationForm):
def __init__(self, *args, **kwargs):
super(FilesAnnotationForm, self).__init__(*args, **kwargs)
self.fields['files'] = AnnotationModelMultipleChoiceField(queryset=kwargs['initial']['files'], widget=forms.SelectMultiple(attrs={'size':8, 'class':'existing'}), required=False)
annotation_file = forms.FileField(required=False)
class CommentAnnotationForm(BaseAnnotationForm):
comment = forms.CharField(widget=forms.Textarea(attrs={'rows': 2, 'cols': 39}))
class UsersForm(forms.Form):
def __init__(self, *args, **kwargs):
super(UsersForm, self).__init__(*args, **kwargs)
try:
empty_label = kwargs['initial']['empty_label']
except:
empty_label='---------'
try:
menu = kwargs['initial']['menu']
except:
menu = '----------'
try:
user = kwargs['initial']['user']
except:
user = None
users = kwargs['initial']['users']
self.fields['experimenter'] = ExperimenterModelChoiceField(queryset=users, initial=user, widget=forms.Select(attrs={'onchange':'window.location.href=\''+reverse(viewname="load_template", args=[menu])+'?experimenter=\'+this.options[this.selectedIndex].value'}), required=False, empty_label=empty_label)
if users is None or len(users)<2:
self.fields['experimenter'].widget.attrs['disabled'] = True
self.fields['experimenter'].widget.attrs['class'] = 'disabled'
self.fields.keyOrder = ['experimenter']
class ActiveGroupForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ActiveGroupForm, self).__init__(*args, **kwargs)
try:
self.fields['active_group'] = GroupModelChoiceField(queryset=kwargs['initial']['mygroups'], initial=kwargs['initial']['activeGroup'], empty_label=None, widget=forms.Select(attrs={'onchange':'window.location.href=\''+reverse(viewname="change_active_group")+'?url='+kwargs['initial']['url']+'&active_group=\'+this.options[this.selectedIndex].value'}))
except:
self.fields['active_group'] = GroupModelChoiceField(queryset=kwargs['initial']['mygroups'], initial=kwargs['initial']['activeGroup'], empty_label=None, widget=forms.Select(attrs={'onchange':'window.location.href=\''+reverse(viewname="change_active_group")+'?active_group=\'+this.options[this.selectedIndex].value'}))
self.fields.keyOrder = ['active_group']
class WellIndexForm(forms.Form):
def __init__(self, *args, **kwargs):
super(WellIndexForm, self).__init__(*args, **kwargs)
rmin, rmax = kwargs['initial']['range']
choices = [(str(i), "Field#%i" % (i-rmin+1)) for i in range(rmin, rmax+1)]
self.fields['index'] = forms.ChoiceField(choices=tuple(choices), widget=forms.Select(attrs={'onchange':'changeFiled(this.options[this.selectedIndex].value);'}))
self.fields.keyOrder = ['index']
###############################
# METADATA FORMS
class MetadataChannelForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataChannelForm, self).__init__(*args, **kwargs)
# Logical channel
# Name
try:
if kwargs['initial']['logicalChannel'] is not None:
self.fields['name'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), initial=kwargs['initial']['logicalChannel'].name, required=False)
else:
self.fields['name'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), required=False)
self.fields['name'].widget.attrs['disabled'] = True
self.fields['name'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['name'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['name'].widget.attrs['disabled'] = True
self.fields['name'].widget.attrs['class'] = 'disabled-metadata'
# excitationWave
try:
if kwargs['initial']['logicalChannel'] is not None:
self.fields['excitationWave'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), initial=kwargs['initial']['logicalChannel'].excitationWave, label="Excitation", required=False)
else:
self.fields['excitationWave'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), label="Excitation", required=False)
self.fields['excitationWave'].widget.attrs['disabled'] = True
self.fields['excitationWave'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['excitationWave'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Excitation", required=False)
self.fields['excitationWave'].widget.attrs['disabled'] = True
self.fields['excitationWave'].widget.attrs['class'] = 'disabled-metadata'
# emissionWave
try:
if kwargs['initial']['logicalChannel'] is not None:
self.fields['emissionWave'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), initial=kwargs['initial']['logicalChannel'].emissionWave, label="Emission", required=False)
else:
self.fields['emissionWave'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), label="Emission", required=False)
self.fields['emissionWave'].widget.attrs['disabled'] = True
self.fields['emissionWave'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['emissionWave'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Emission", required=False)
self.fields['emissionWave'].widget.attrs['disabled'] = True
self.fields['emissionWave'].widget.attrs['class'] = 'disabled-metadata'
# ndFilter
try:
if kwargs['initial']['logicalChannel'] is not None:
self.fields['ndFilter'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), initial=kwargs['initial']['logicalChannel'].ndFilter, label="ND filter [%]", required=False)
else:
self.fields['ndFilter'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), label="ND filter [%]", required=False)
self.fields['ndFilter'].widget.attrs['disabled'] = True
except:
self.fields['ndFilter'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="ND filter [%]", required=False)
self.fields['ndFilter'].widget.attrs['disabled'] = True
# pinHoleSize
try:
if kwargs['initial']['logicalChannel'] is not None:
self.fields['pinHoleSize'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), initial=kwargs['initial']['logicalChannel'].pinHoleSize, label="Pin hole size", required=False)
else:
self.fields['pinHoleSize'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), label="Pin hole size", required=False)
self.fields['pinHoleSize'].widget.attrs['disabled'] = True
except:
self.fields['pinHoleSize'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Pin hole size", required=False)
self.fields['pinHoleSize'].widget.attrs['disabled'] = True
# fluor
try:
if kwargs['initial']['logicalChannel'] is not None:
self.fields['fluor'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), initial=kwargs['initial']['logicalChannel'].fluor, required=False)
else:
self.fields['fluor'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), required=False)
self.fields['fluor'].widget.attrs['disabled'] = True
except:
self.fields['fluor'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['fluor'].widget.attrs['disabled'] = True
# Illumination
try:
if kwargs['initial']['logicalChannel'].getIllumination() is not None:
self.fields['illumination'] = MetadataModelChoiceField(queryset=kwargs['initial']['illuminations'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'illumination\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['logicalChannel'].getIllumination(), required=False)
else:
self.fields['illumination'] = MetadataModelChoiceField(queryset=kwargs['initial']['illuminations'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'illumination\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['illumination'].widget.attrs['disabled'] = True
except:
self.fields['illumination'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['illumination'].widget.attrs['disabled'] = True
# contrastMethods
try:
if kwargs['initial']['logicalChannel'].contrastMethod is not None:
self.fields['contrastMethod'] = MetadataModelChoiceField(queryset=kwargs['initial']['contrastMethods'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'contrastMethod\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['logicalChannel'].getContrastMethod(), label="Contrast method", required=False)
else:
self.fields['contrastMethod'] = MetadataModelChoiceField(queryset=kwargs['initial']['contrastMethods'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'contrastMethod\', this.options[this.selectedIndex].value);'}), label="Contrast method", required=False)
self.fields['contrastMethod'].widget.attrs['disabled'] = True
self.fields['contrastMethod'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['contrastMethod'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Contrast method", required=False)
self.fields['contrastMethod'].widget.attrs['disabled'] = True
self.fields['contrastMethod'].widget.attrs['class'] = 'disabled-metadata'
# Mode
try:
if kwargs['initial']['logicalChannel'].getMode() is not None:
self.fields['mode'] = MetadataModelChoiceField(queryset=kwargs['initial']['modes'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'mode\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['logicalChannel'].getMode().value, required=False)
else:
self.fields['mode'] = MetadataModelChoiceField(queryset=kwargs['initial']['modes'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'mode\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['mode'].widget.attrs['disabled'] = True
self.fields['mode'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['mode'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['mode'].widget.attrs['disabled'] = True
self.fields['mode'].widget.attrs['class'] = 'disabled-metadata'
# pockelCellSetting
try:
if kwargs['initial']['logicalChannel'].pockelCellSetting is not None:
self.fields['pockelCellSetting'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), initial=kwargs['initial']['logicalChannel'].pockelCellSetting, label="Pockel cell", required=False)
else:
self.fields['pockelCellSetting'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalChannel'].id)+', \'name\', this.value);'}), label="Pockel cell", required=False)
self.fields['pockelCellSetting'].widget.attrs['disabled'] = True
self.fields['pockelCellSetting'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['pockelCellSetting'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Pockel cell" ,required=False)
self.fields['pockelCellSetting'].widget.attrs['disabled'] = True
self.fields['pockelCellSetting'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['name', 'excitationWave', 'emissionWave', 'ndFilter', 'pinHoleSize', 'fluor', 'illumination', 'contrastMethod', 'mode', 'pockelCellSetting']
class MetadataDichroicForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataDichroicForm, self).__init__(*args, **kwargs)
# Manufacturer
try:
if kwargs['initial']['dichroic'].manufacturer is not None:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].id)+', \'manufacturer\', this.value);'}), initial=kwargs['initial']['dichroic'].manufacturer, required=False)
else:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].id)+', \'manufacturer\', this.value);'}), required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['manufacturer'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
# Model
try:
if kwargs['initial']['dichroic'].model is not None:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].id)+', \'model\', this.value);'}), initial=kwargs['initial']['dichroic'].model, required=False)
else:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].id)+', \'model\', this.value);'}), required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['model'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
# Serial number
try:
if kwargs['initial']['dichroic'].serialNumber is not None:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].serialNumber)+', \'serialNumber\', this.value);'}), initial=kwargs['initial']['dichroic'].serialNumber, label="Serial number", required=False)
else:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].serialNumber)+', \'serialNumber\', this.value);'}), label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['serialNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
# Lot number
try:
if kwargs['initial']['dichroic'].lotNumber is not None:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].lotNumber)+', \'lotNumber\', this.value);'}), initial=kwargs['initial']['dichroic'].lotNumber, label="Lot number", required=False)
else:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['dichroic'].lotNumber)+', \'lotNumber\', this.value);'}), label="Lot number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lotNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Lot number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['manufacturer', 'model', 'serialNumber', 'lotNumber']
class MetadataMicroscopeForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataMicroscopeForm, self).__init__(*args, **kwargs)
# Model
try:
if kwargs['initial']['microscope'].model is not None:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'model\', this.value);'}), initial=kwargs['initial']['microscope'].model, required=False)
else:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'model\', this.value);'}), required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['model'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
# Manufacturer
try:
if kwargs['initial']['microscope'].manufacturer is not None:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'manufacturer\', this.value);'}), initial=kwargs['initial']['microscope'].manufacturer, required=False)
else:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'manufacturer\', this.value);'}), required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['manufacturer'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
# Serial number
try:
if kwargs['initial']['microscope'].serialNumber is not None:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'lotNumber\', this.value);'}), initial=kwargs['initial']['microscope'].serialNumber, label="Serial number", required=False)
else:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'lotNumber\', this.value);'}), label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['serialNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
# Lot number
try:
if kwargs['initial']['microscope'].lotNumber is not None:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'lotNumber\', this.value);'}), initial=kwargs['initial']['microscope'].lotNumber, label="Serial number", required=False)
else:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'lotNumber\', this.value);'}), label="Serial number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lotNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Serial number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
# Type
try:
if kwargs['initial']['microscope'].getMicroscopeType() is not None:
self.fields['type'] = MetadataModelChoiceField(queryset=kwargs['initial']['microscopeTypes'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'type\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['microscope'].getMicroscopeType().value, required=False)
else:
self.fields['type'] = MetadataModelChoiceField(queryset=kwargs['initial']['microscopeTypes'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['microscope'].id)+', \'type\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['type'].widget.attrs['disabled'] = True
self.fields['type'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['type'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['type'].widget.attrs['disabled'] = True
self.fields['type'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['model', 'manufacturer', 'serialNumber', 'lotNumber', 'type']
class MetadataObjectiveForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataObjectiveForm, self).__init__(*args, **kwargs)
# Model
try:
if kwargs['initial']['objective'].model is not None:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'model\', this.value);'}), initial=kwargs['initial']['objective'].model, required=False)
else:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'model\', this.value);'}), required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['model'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
# Manufacturer
try:
if kwargs['initial']['objective'].manufacturer is not None:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'manufacturer\', this.value);'}), initial=kwargs['initial']['objective'].manufacturer, required=False)
else:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'manufacturer\', this.value);'}), required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['manufacturer'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
# Serial Number
try:
if kwargs['initial']['objective'].serialNumber is not None:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'serialNumber\', this.value);'}), initial=kwargs['initial']['objective'].serialNumber, label="Serial number", required=False)
else:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'serialNumber\', this.value);'}), label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['serialNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
# Lot number
try:
if kwargs['initial']['objective'].lotNumber is not None:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalchannel'].getObjective().lotNumber)+', \'lotNumber\', this.value);'}), initial=kwargs['initial']['logicalchannel'].getObjective().lotNumber, label="Serial number", required=False)
else:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['logicalchannel'].getObjective().lotNumber)+', \'lotNumber\', this.value);'}), label="Serial number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lotNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Serial number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
# Nominal Magnification
try:
if kwargs['initial']['objective'].nominalMagnification is not None:
self.fields['nominalMagnification'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'nominalMagnification\', this.value);'}), initial=kwargs['initial']['objective'].nominalMagnification, label="Nominal magnification", required=False)
else:
self.fields['nominalMagnification'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'nominalMagnification\', this.value);'}), label="Nominal magnification", required=False)
self.fields['nominalMagnification'].widget.attrs['disabled'] = True
self.fields['nominalMagnification'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['nominalMagnification'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Nominal magnification", required=False)
self.fields['nominalMagnification'].widget.attrs['disabled'] = True
self.fields['nominalMagnification'].widget.attrs['class'] = 'disabled-metadata'
# Calibrated Magnification
try:
if kwargs['initial']['objective'].calibratedMagnification is not None:
self.fields['calibratedMagnification'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'calibratedMagnification\', this.value);'}), initial=kwargs['initial']['objective'].calibratedMagnification, label="Calibrated magnification", required=False)
else:
self.fields['calibratedMagnification'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'calibratedMagnification\', this.value);'}), label="Calibrated magnification", required=False)
self.fields['calibratedMagnification'].widget.attrs['disabled'] = True
self.fields['calibratedMagnification'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['calibratedMagnification'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Calibrated magnification", required=False)
self.fields['calibratedMagnification'].widget.attrs['disabled'] = True
self.fields['calibratedMagnification'].widget.attrs['class'] = 'disabled-metadata'
# Lens NA
try:
if kwargs['initial']['objective'].lensNA is not None:
self.fields['lensNA'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'lensNA\', this.value);'}), initial=kwargs['initial']['objective'].lensNA, label="Lens NA", required=False)
else:
self.fields['lensNA'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'lensNA\', this.value);'}), required=False)
self.fields['lensNA'].widget.attrs['disabled'] = True
self.fields['lensNA'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lensNA'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Lens NA", required=False)
self.fields['lensNA'].widget.attrs['disabled'] = True
self.fields['lensNA'].widget.attrs['class'] = 'disabled-metadata'
# Immersion
try:
if kwargs['initial']['objective'].getImmersion() is not None:
self.fields['immersion'] = MetadataModelChoiceField(queryset=kwargs['initial']['immersions'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['objective'].id)+', \'immersion\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['objective'].getImmersion().value, required=False)
else:
self.fields['immersion'] = MetadataModelChoiceField(queryset=kwargs['initial']['immersions'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['objective'].id)+', \'immersion\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['immersion'].widget.attrs['disabled'] = True
self.fields['immersion'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['immersion'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['immersion'].widget.attrs['disabled'] = True
self.fields['immersion'].widget.attrs['class'] = 'disabled-metadata'
# Correction
try:
if kwargs['initial']['objective'].getCorrection() is not None:
self.fields['correction'] = MetadataModelChoiceField(queryset=kwargs['initial']['corrections'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['objective'].id)+', \'correction\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['objective'].getCorrection().value, required=False)
else:
self.fields['correction'] = MetadataModelChoiceField(queryset=kwargs['initial']['corrections'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['objective'].id)+', \'correction\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['correction'].widget.attrs['disabled'] = True
self.fields['correction'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['correction'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['correction'].widget.attrs['disabled'] = True
self.fields['correction'].widget.attrs['class'] = 'disabled-metadata'
# Working Distance
try:
if kwargs['initial']['objective'].workingDistance is not None:
self.fields['workingDistance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'workingDistance\', this.value);'}), initial=kwargs['initial']['objective'].workingDistance, label="Working distance", required=False)
else:
self.fields['workingDistance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'workingDistance\', this.value);'}), label="Working distance", required=False)
self.fields['workingDistance'].widget.attrs['disabled'] = True
self.fields['workingDistance'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['workingDistance'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Working distance", required=False)
self.fields['workingDistance'].widget.attrs['disabled'] = True
self.fields['workingDistance'].widget.attrs['class'] = 'disabled-metadata'
# Iris
try:
if kwargs['initial']['objective'].getIris() is not None:
self.fields['iris'] = forms.ChoiceField(choices=self.BOOLEAN_CHOICES, widget=forms.Select(attrs={'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'iris\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['objective'].getIris().value, required=False)
else:
self.fields['iris'] = forms.ChoiceField(choices=self.BOOLEAN_CHOICES, widget=forms.Select(attrs={'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objective'].id)+', \'iris\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['iris'].widget.attrs['disabled'] = True
self.fields['iris'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['iris'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['iris'].widget.attrs['disabled'] = True
self.fields['iris'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['model', 'manufacturer', 'serialNumber', 'lotNumber', 'nominalMagnification', 'calibratedMagnification', 'lensNA', 'immersion', 'correction', 'workingDistance', 'iris']
class MetadataObjectiveSettingsForm(MetadataObjectiveForm):
BOOLEAN_CHOICES = (
('', '---------'),
('True', 'True'),
('False', 'False'),
)
def __init__(self, *args, **kwargs):
super(MetadataObjectiveSettingsForm, self).__init__(*args, **kwargs)
# Objective Settings
# Correction Collar
try:
if kwargs['initial']['objectiveSettings'].correctionCollar is not None:
self.fields['correctionCollar'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objectiveSettings'].id)+', \'correctionCollar\', this.value);'}), initial=kwargs['initial']['objectiveSettings'].correctionCollar, label="Correction collar", required=False)
else:
self.fields['correctionCollar'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objectiveSettings'].id)+', \'correctionCollar\', this.value);'}), label="Correction collar", required=False)
self.fields['correctionCollar'].widget.attrs['disabled'] = True
self.fields['correctionCollar'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['correctionCollar'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Correction collar", required=False)
self.fields['correctionCollar'].widget.attrs['disabled'] = True
self.fields['correctionCollar'].widget.attrs['class'] = 'disabled-metadata'
# Medium
try:
if kwargs['initial']['objectiveSettings'].getMedium() is not None:
self.fields['medium'] = MetadataModelChoiceField(queryset=kwargs['initial']['mediums'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['objectiveSettings'].id)+', \'medium\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['objectiveSettings'].getMedium().value, required=False)
else:
self.fields['medium'] = MetadataModelChoiceField(queryset=kwargs['initial']['mediums'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['objectiveSettings'].id)+', \'medium\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['medium'].widget.attrs['disabled'] = True
self.fields['medium'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['medium'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['medium'].widget.attrs['disabled'] = True
self.fields['medium'].widget.attrs['class'] = 'disabled-metadata'
# Refractive Index
try:
if kwargs['initial']['objectiveSettings'].refractiveIndex is not None:
self.fields['refractiveIndex'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objectiveSettings'].id)+', \'refractiveIndex\', this.value);'}), initial=kwargs['initial']['objectiveSettings'].refractiveIndex, label="Refractive index", required=False)
else:
self.fields['refractiveIndex'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['objectiveSettings'].id)+', \'refractiveIndex\', this.value);'}), label="Refractive index", required=False)
self.fields['refractiveIndex'].widget.attrs['disabled'] = True
self.fields['refractiveIndex'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['refractiveIndex'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Refractive index", required=False)
self.fields['refractiveIndex'].widget.attrs['disabled'] = True
self.fields['refractiveIndex'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['model', 'manufacturer', 'serialNumber', 'lotNumber', 'nominalMagnification', 'calibratedMagnification', 'lensNA', 'immersion', 'correction', 'workingDistance', 'iris', 'correctionCollar', 'medium', 'refractiveIndex']
class MetadataFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataFilterForm, self).__init__(*args, **kwargs)
# Filter
# Manufacturer
try:
if kwargs['initial']['filter'].manufacturer is not None:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'manufacturer\', this.value);'}), initial=kwargs['initial']['filter'].manufacturer, required=False)
else:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'manufacturer\', this.value);'}), required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['manufacturer'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
# Model
try:
if kwargs['initial']['filter'].model is not None:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'model\', this.value);'}), initial=kwargs['initial']['filter'].model, required=False)
else:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'model\', this.value);'}), required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['model'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
# Serial Number
try:
if kwargs['initial']['filter'].serialNumber is not None:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'serialNumber\', this.value);'}), initial=kwargs['initial']['filter'].serialNumber, label="Serial number", required=False)
else:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'serialNumber\', this.value);'}), label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['serialNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
# Lot number
try:
if kwargs['initial']['filter'].lotNumber is not None:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'lotNumber\', this.value);'}), initial=kwargs['initial']['filter'].lotNumber, label="Lot number", required=False)
else:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'lotNumber\', this.value);'}), label="Lot number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lotNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Lot number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
# Filter wheel
try:
if kwargs['initial']['filter'].filterWheel is not None:
self.fields['filterWheel'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'filterWheel\', this.value);'}), initial=kwargs['initial']['filter'].filterWheel, label="Filter wheel", required=False)
else:
self.fields['filterWheel'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'filterWheel\', this.value);'}), label="Filter wheel", required=False)
self.fields['filterWheel'].widget.attrs['disabled'] = True
self.fields['filterWheel'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['filterWheel'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Filter wheel", required=False)
self.fields['filterWheel'].widget.attrs['disabled'] = True
self.fields['filterWheel'].widget.attrs['class'] = 'disabled-metadata'
# Type
try:
if kwargs['initial']['filter'].getFilterType() is not None:
self.fields['type'] = MetadataModelChoiceField(queryset=kwargs['initial']['types'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['filter'].id)+', \'type\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['filter'].getFilterType().value, required=False)
else:
self.fields['type'] = MetadataModelChoiceField(queryset=kwargs['initial']['types'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['filter'].id)+', \'type\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['type'].widget.attrs['disabled'] = True
self.fields['type'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['type'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['type'].widget.attrs['disabled'] = True
self.fields['type'].widget.attrs['class'] = 'disabled-metadata'
# Cut in
try:
if kwargs['initial']['filter'].transmittanceRange is not None:
self.fields['cutIn'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutIn\', this.value);'}), initial=kwargs['initial']['filter'].getTransmittanceRange().cutIn, label="Cut in", required=False)
else:
self.fields['cutIn'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutIn\', this.value);'}), label="Cut in", required=False)
self.fields['cutIn'].widget.attrs['disabled'] = True
self.fields['cutIn'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['cutIn'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Cut in", required=False)
self.fields['cutIn'].widget.attrs['disabled'] = True
self.fields['cutIn'].widget.attrs['class'] = 'disabled-metadata'
# Cut out
try:
if kwargs['initial']['filter'].transmittanceRange is not None:
self.fields['cutOut'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutOut\', this.value);'}), initial=kwargs['initial']['filter'].getTransmittanceRange().cutOut, label="Cut out", required=False)
else:
self.fields['cutOut'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutOut\', this.value);'}), label="Cut out", required=False)
self.fields['cutOut'].widget.attrs['disabled'] = True
self.fields['cutOut'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['cutOut'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Cut out", required=False)
self.fields['cutOut'].widget.attrs['disabled'] = True
self.fields['cutOut'].widget.attrs['class'] = 'disabled-metadata'
# Cut in tolerance
try:
if kwargs['initial']['filter'].transmittanceRange is not None:
self.fields['cutInTolerance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutInTolerance\', this.value);'}), initial=kwargs['initial']['filter'].getTransmittanceRange().cutInTolerance, label="Cut in tolerance", required=False)
else:
self.fields['cutInTolerance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutInTolerance\', this.value);'}), label="Cut in tolerance", required=False)
self.fields['cutInTolerance'].widget.attrs['disabled'] = True
self.fields['cutInTolerance'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['cutInTolerance'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Cut in tolerance", required=False)
self.fields['cutInTolerance'].widget.attrs['disabled'] = True
self.fields['cutInTolerance'].widget.attrs['class'] = 'disabled-metadata'
# Cut on tolerance
try:
if kwargs['initial']['filter'].transmittanceRange is not None:
self.fields['cutOutTolerance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutOut\', this.value);'}), initial=kwargs['initial']['filter'].getTransmittanceRange().cutOutTolerance, label="Cut out tolerance", required=False)
else:
self.fields['cutOutTolerance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'cutOut\', this.value);'}), label="Cut out tolerance", required=False)
self.fields['cutOutTolerance'].widget.attrs['disabled'] = True
self.fields['cutOutTolerance'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['cutOutTolerance'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Cut out tolerance", required=False)
self.fields['cutOutTolerance'].widget.attrs['disabled'] = True
self.fields['cutOutTolerance'].widget.attrs['class'] = 'disabled-metadata'
# Transmittance
try:
if kwargs['initial']['filter'].transmittanceRange is not None:
self.fields['transmittance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'transmittance\', this.value);'}), initial=kwargs['initial']['filter'].getTransmittanceRange().transmittance, required=False)
else:
self.fields['transmittance'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['filter'].id)+', \'transmittance\', this.value);'}), required=False)
self.fields['transmittance'].widget.attrs['disabled'] = True
self.fields['transmittance'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['transmittance'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['transmittance'].widget.attrs['disabled'] = True
self.fields['transmittance'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['manufacturer', 'model', 'serialNumber', 'lotNumber', 'type', 'filterWheel', 'cutIn', 'cutOut', 'cutInTolerance', 'cutOutTolerance', 'transmittance']
class MetadataDetectorForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataDetectorForm, self).__init__(*args, **kwargs)
# Filter
# Manufacturer
try:
if kwargs['initial']['detector'] is not None:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'manufacturer\', this.value);'}), initial=kwargs['initial']['detector'].manufacturer, required=False)
else:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'manufacturer\', this.value);'}), required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['manufacturer'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
# Model
try:
if kwargs['initial']['detector'] is not None:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'model\', this.value);'}), initial=kwargs['initial']['detector'].model, required=False)
else:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'model\', this.value);'}), required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['model'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
# SN
try:
if kwargs['initial']['detector'] is not None:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'serialNumber\', this.value);'}), initial=kwargs['initial']['detector'].serialNumber, required=False)
else:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'serialNumber\', this.value);'}), required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['serialNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
# Lot number (NB. Untill OMERO model is updated in 4.3, this will throw since lotNumber is not yet supported)
try:
if kwargs['initial']['detector'] is not None:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'lotNumber\', this.value);'}), initial=kwargs['initial']['detector'].lotNumber, required=False)
else:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'lotNumber\', this.value);'}), required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lotNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
# Type
try:
if kwargs['initial']['detector'].getDetectorType() is not None:
self.fields['type'] = MetadataModelChoiceField(queryset=kwargs['initial']['types'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['detector'].id)+', \'type\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['detector'].getDetectorType().value, required=False)
else:
self.fields['type'] = MetadataModelChoiceField(queryset=kwargs['initial']['types'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['detector'].id)+', \'type\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['type'].widget.attrs['disabled'] = True
self.fields['type'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['type'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['type'].widget.attrs['disabled'] = True
self.fields['type'].widget.attrs['class'] = 'disabled-metadata'
# Gain
try:
if kwargs['initial']['detectorSettings'] is not None:
self.fields['gain'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'gain\', this.value);'}), initial=kwargs['initial']['detectorSettings'].gain, required=False)
elif kwargs['initial']['detector'] is not None:
self.fields['gain'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'gain\', this.value);'}), initial=kwargs['initial']['detector'].gain, required=False)
else:
self.fields['gain'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'gain\', this.value);'}), required=False)
self.fields['gain'].widget.attrs['disabled'] = True
self.fields['gain'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['gain'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['gain'].widget.attrs['disabled'] = True
self.fields['gain'].widget.attrs['class'] = 'disabled-metadata'
# Voltage
try:
if kwargs['initial']['detectorSettings'] is not None:
self.fields['voltage'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'voltage\', this.value);'}), initial=kwargs['initial']['detectorSettings'].voltage, required=False)
elif kwargs['initial']['detector'] is not None:
self.fields['voltage'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'voltage\', this.value);'}), initial=kwargs['initial']['detector'].voltage, required=False)
else:
self.fields['voltage'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'voltage\', this.value);'}), required=False)
self.fields['voltage'].widget.attrs['disabled'] = True
self.fields['voltage'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['voltage'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['voltage'].widget.attrs['disabled'] = True
self.fields['voltage'].widget.attrs['class'] = 'disabled-metadata'
# Offset
try:
if kwargs['initial']['detectorSettings'] is not None:
self.fields['offsetValue'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'offsetValue\', this.value);'}), initial=kwargs['initial']['detectorSettings'].offsetValue, label="Offset", required=False)
elif kwargs['initial']['detector'] is not None:
self.fields['offsetValue'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'offsetValue\', this.value);'}), initial=kwargs['initial']['detector'].offsetValue, label="Offset", required=False)
else:
self.fields['offsetValue'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'offsetValue\', this.value);'}), label="Offset", required=False)
self.fields['offsetValue'].widget.attrs['disabled'] = True
self.fields['offsetValue'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['offsetValue'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Offset", required=False)
self.fields['offsetValue'].widget.attrs['disabled'] = True
self.fields['offsetValue'].widget.attrs['class'] = 'disabled-metadata'
# Zoom
try:
if kwargs['initial']['detector'] is not None:
self.fields['zoom'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'zoom\', this.value);'}), initial=kwargs['initial']['detector'].zoom, required=False)
else:
self.fields['zoom'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'voltage\', this.value);'}), required=False)
self.fields['zoom'].widget.attrs['disabled'] = True
self.fields['zoom'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['zoom'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['zoom'].widget.attrs['disabled'] = True
self.fields['zoom'].widget.attrs['class'] = 'disabled-metadata'
# Amplification gain
try:
if kwargs['initial']['detector'] is not None:
self.fields['amplificationGain'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'amplificationGain\', this.value);'}), initial=kwargs['initial']['detector'].amplificationGain, label="Amplification gain", required=False)
else:
self.fields['amplificationGain'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detector'].id)+', \'amplificationGain\', this.value);'}), label="Amplification gain", required=False)
self.fields['amplificationGain'].widget.attrs['disabled'] = True
self.fields['amplificationGain'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['amplificationGain'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Amplification gain", required=False)
self.fields['amplificationGain'].widget.attrs['disabled'] = True
self.fields['amplificationGain'].widget.attrs['class'] = 'disabled-metadata'
# Read out rate
try:
if kwargs['initial']['detectorSettings'] is not None:
self.fields['readOutRate'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'readOutRate\', this.value);'}), initial=kwargs['initial']['detectorSettings'].readOutRate, label="Read out rate", required=False)
else:
self.fields['readOutRate'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'readOutRate\', this.value);'}), label="Read out rate", required=False)
self.fields['readOutRate'].widget.attrs['disabled'] = True
self.fields['readOutRate'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['readOutRate'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Read out rate", required=False)
self.fields['readOutRate'].widget.attrs['disabled'] = True
self.fields['readOutRate'].widget.attrs['class'] = 'disabled-metadata'
# Binning
try:
if kwargs['initial']['detectorSettings'] is not None:
self.fields['binning'] = MetadataModelChoiceField(queryset=kwargs['initial']['binnings'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'type\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['detectorSettings'].getBinning().value, required=False)
else:
self.fields['binning'] = MetadataModelChoiceField(queryset=kwargs['initial']['binnings'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['detectorSettings'].id)+', \'type\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['binning'].widget.attrs['disabled'] = True
self.fields['binning'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['binning'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['binning'].widget.attrs['disabled'] = True
self.fields['binning'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['manufacturer', 'model', 'serialNumber', 'lotNumber', 'type', 'gain', 'voltage', 'offsetValue', 'zoom', 'amplificationGain', 'readOutRate', 'binning']
class MetadataLightSourceForm(forms.Form):
BOOLEAN_CHOICES = (
('', '---------'),
('True', 'True'),
('False', 'False'),
)
def __init__(self, *args, **kwargs):
super(MetadataLightSourceForm, self).__init__(*args, **kwargs)
# Filter
# Manufacturer
try:
if kwargs['initial']['lightSource'].manufacturer is not None:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'model\', this.value);'}), initial=kwargs['initial']['lightSource'].manufacturer, required=False)
else:
self.fields['manufacturer'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'model\', this.value);'}), required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['manufacturer'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['manufacturer'].widget.attrs['disabled'] = True
self.fields['manufacturer'].widget.attrs['class'] = 'disabled-metadata'
# Model
try:
if kwargs['initial']['lightSource'].model is not None:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'model\', this.value);'}), initial=kwargs['initial']['lightSource'].model, required=False)
else:
self.fields['model'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'model\', this.value);'}), required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['model'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['model'].widget.attrs['disabled'] = True
self.fields['model'].widget.attrs['class'] = 'disabled-metadata'
# Serial Number
try:
if kwargs['initial']['lightSource'].serialNumber is not None:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'serialNumber\', this.value);'}), initial=kwargs['initial']['lightSource'].serialNumber, label="Serial number", required=False)
else:
self.fields['serialNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'serialNumber\', this.value);'}), label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['serialNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Serial number", required=False)
self.fields['serialNumber'].widget.attrs['disabled'] = True
self.fields['serialNumber'].widget.attrs['class'] = 'disabled-metadata'
# Lot Number
try:
if kwargs['initial']['lightSource'].lotNumber is not None:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'lotNumber\', this.value);'}), initial=kwargs['initial']['lightSource'].lotNumber, label="Lot number", required=False)
else:
self.fields['lotNumber'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'lotNumber\', this.value);'}), label="Lot number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lotNumber'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Lot number", required=False)
self.fields['lotNumber'].widget.attrs['disabled'] = True
self.fields['lotNumber'].widget.attrs['class'] = 'disabled-metadata'
# Power
try:
if kwargs['initial']['lightSource'].power is not None:
self.fields['power'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'power\', this.value);'}), initial=kwargs['initial']['lightSource'].power, required=False)
else:
self.fields['power'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'power\', this.value);'}), required=False)
self.fields['power'].widget.attrs['disabled'] = True
self.fields['power'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['power'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['power'].widget.attrs['disabled'] = True
self.fields['power'].widget.attrs['class'] = 'disabled-metadata'
# Type
try:
if kwargs['initial']['lightSource'].getLightSourceType() is not None:
self.fields['lstype'] = MetadataModelChoiceField(queryset=kwargs['initial']['lstypes'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'type\', this.options[this.selectedIndex].value);'}), label="Type", initial=kwargs['initial']['lightSource'].getLightSourceType().value, required=False)
else:
self.fields['lstype'] = MetadataModelChoiceField(queryset=kwargs['initial']['lstypes'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'type\', this.options[this.selectedIndex].value);'}), label="Type", required=False)
self.fields['lstype'].widget.attrs['disabled'] = True
self.fields['lstype'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lstype'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Type", required=False)
self.fields['lstype'].widget.attrs['disabled'] = True
self.fields['lstype'].widget.attrs['class'] = 'disabled-metadata'
# Medium
try:
if kwargs['initial']['lightSource'].getLaserMedium() is not None:
self.fields['lmedium'] = MetadataModelChoiceField(queryset=kwargs['initial']['mediums'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'medium\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['lightSource'].getLaserMedium().value, label="Medium", required=False)
else:
self.fields['lmedium'] = MetadataModelChoiceField(queryset=kwargs['initial']['mediums'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'medium\', this.options[this.selectedIndex].value);'}), label="Medium", required=False)
self.fields['lmedium'].widget.attrs['disabled'] = True
self.fields['lmedium'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['lmedium'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Medium", required=False)
self.fields['lmedium'].widget.attrs['disabled'] = True
self.fields['lmedium'].widget.attrs['class'] = 'disabled-metadata'
# Wavelength
try:
if kwargs['initial']['lightSource'].wavelength is not None:
self.fields['wavelength'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'wavelength\', this.value);'}), initial=kwargs['initial']['lightSource'].wavelength, required=False)
else:
self.fields['wavelength'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'wavelength\', this.value);'}), required=False)
self.fields['wavelength'].widget.attrs['disabled'] = True
self.fields['wavelength'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['wavelength'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['wavelength'].widget.attrs['disabled'] = True
self.fields['wavelength'].widget.attrs['class'] = 'disabled-metadata'
# FrequencyMultiplication
try:
if kwargs['initial']['lightSource'].frequencyMultiplication is not None:
self.fields['frequencyMultiplication'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'frequencyMultiplication\', this.value);'}), initial=kwargs['initial']['lightSource'].frequencyMultiplication, label="Frequency Multiplication", required=False)
else:
self.fields['frequencyMultiplication'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'frequencyMultiplication\', this.value);'}), label="Frequency Multiplication", required=False)
self.fields['frequencyMultiplication'].widget.attrs['disabled'] = True
self.fields['frequencyMultiplication'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['frequencyMultiplication'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Frequency Multiplication", required=False)
self.fields['frequencyMultiplication'].widget.attrs['disabled'] = True
self.fields['frequencyMultiplication'].widget.attrs['class'] = 'disabled-metadata'
# Tuneable
try:
if kwargs['initial']['lightSource'].tuneable is not None:
self.fields['tuneable'] = forms.ChoiceField(choices=self.BOOLEAN_CHOICES, widget=forms.Select(attrs={'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'tuneable\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['lightSource'].tuneable, required=False)
else:
self.fields['tuneable'] = forms.ChoiceField(choices=self.BOOLEAN_CHOICES, widget=forms.Select(attrs={'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'tuneable\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['tuneable'].widget.attrs['disabled'] = True
self.fields['tuneable'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['tuneable'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['tuneable'].widget.attrs['disabled'] = True
self.fields['tuneable'].widget.attrs['class'] = 'disabled-metadata'
# Pulse
try:
if kwargs['initial']['lightSource'].pulse is not None:
self.fields['pulse'] = MetadataModelChoiceField(queryset=kwargs['initial']['pulses'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'pulse\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['lightSource'].pulse, required=False)
else:
self.fields['pulse'] = MetadataModelChoiceField(queryset=kwargs['initial']['pulses'], empty_label=u"Not set", widget=forms.Select(attrs={'onchange':'saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'pulse\', this.options[this.selectedIndex].value);'}), required=False)
self.fields['pulse'].widget.attrs['disabled'] = True
self.fields['pulse'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['pulse'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['pulse'].widget.attrs['disabled'] = True
self.fields['pulse'].widget.attrs['class'] = 'disabled-metadata'
# Repetition Rate
try:
if kwargs['initial']['lightSource'].repetitionRate is not None:
self.fields['repetitionRate'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'repetitionRate\', this.value);'}), initial=kwargs['initial']['lightSource'].repetitionRate, label="Repetition rate", required=False)
else:
self.fields['repetitionRate'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'repetitionRate\', this.value);'}), label="Repetition rate", required=False)
self.fields['repetitionRate'].widget.attrs['disabled'] = True
self.fields['repetitionRate'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['repetitionRate'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Repetition rate", required=False)
self.fields['repetitionRate'].widget.attrs['disabled'] = True
self.fields['repetitionRate'].widget.attrs['class'] = 'disabled-metadata'
# Pockel Cell
try:
if kwargs['initial']['lightSource'].pockelCell is not None:
self.fields['pockelCell'] = forms.ChoiceField(choices=self.BOOLEAN_CHOICES, widget=forms.Select(attrs={'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'pockelCell\', this.options[this.selectedIndex].value);'}), initial=kwargs['initial']['lightSource'].pockelCell, label="Pockel Cell", required=False)
else:
self.fields['pockelCell'] = forms.ChoiceField(choices=self.BOOLEAN_CHOICES, widget=forms.Select(attrs={'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'pockelCell\', this.options[this.selectedIndex].value);'}), label="Pockel Cell", required=False)
self.fields['pockelCell'].widget.attrs['disabled'] = True
self.fields['pockelCell'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['pockelCell'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Pockel Cell", required=False)
self.fields['pockelCell'].widget.attrs['disabled'] = True
self.fields['pockelCell'].widget.attrs['class'] = 'disabled-metadata'
# Attenuation
#try:
# if kwargs['initial']['lightSource'].attenuation is not None:
# self.fields['attenuation'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'attenuation\', this.value);'}), initial=kwargs['initial']['lightSource'].attenuation, required=False)
# else:
# self.fields['attenuation'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['lightSource'].id)+', \'attenuation\', this.value);'}), required=False)
# self.fields['attenuation'].widget.attrs['disabled'] = True
# self.fields['attenuation'].widget.attrs['class'] = 'disabled-metadata'
#except:
# self.fields['attenuation'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
# self.fields['attenuation'].widget.attrs['disabled'] = True
# self.fields['attenuation'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['manufacturer', 'model', 'serialNumber', 'lotNumber', 'power', 'lstype', 'lmedium', 'wavelength', 'frequencyMultiplication', 'tuneable', 'pulse' , 'repetitionRate', 'pockelCell']
class MetadataEnvironmentForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataEnvironmentForm, self).__init__(*args, **kwargs)
# Imaging environment
# Temperature
try:
if kwargs['initial']['image'].getImagingEnvironment().temperature is not None:
self.fields['temperature'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'temperature\', this.value);'}), initial=kwargs['initial']['image'].getImagingEnvironment().temperature, required=False)
else:
self.fields['temperature'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'temperature\', this.value);'}), required=False)
self.fields['temperature'].widget.attrs['disabled'] = True
self.fields['temperature'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['temperature'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['temperature'].widget.attrs['disabled'] = True
self.fields['temperature'].widget.attrs['class'] = 'disabled-metadata'
# Air Pressure
try:
if kwargs['initial']['image'].getImagingEnvironment().airPressure is not None:
self.fields['airPressure'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'airPressure\', this.value);'}), initial=kwargs['initial']['image'].getImagingEnvironment().airPressure, label="Air Pressure", required=False)
else:
self.fields['airPressure'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'airPressure\', this.value);'}), label="Air Pressure", required=False)
self.fields['airPressure'].widget.attrs['disabled'] = True
self.fields['airPressure'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['airPressure'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), label="Air Pressure", initial="N/A", required=False)
self.fields['airPressure'].widget.attrs['disabled'] = True
self.fields['airPressure'].widget.attrs['class'] = 'disabled-metadata'
# Humidity
try:
if kwargs['initial']['image'].getImagingEnvironment().humidity is not None:
self.fields['humidity'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'humidity\', this.value);'}), initial=kwargs['initial']['image'].getImagingEnvironment().humidity, required=False)
else:
self.fields['humidity'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'humidity\', this.value);'}), required=False)
self.fields['humidity'].widget.attrs['disabled'] = True
self.fields['humidity'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['humidity'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", required=False)
self.fields['humidity'].widget.attrs['disabled'] = True
self.fields['humidity'].widget.attrs['class'] = 'disabled-metadata'
# CO2 percent
try:
if kwargs['initial']['image'].getImagingEnvironment().co2percent is not None:
self.fields['co2percent'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'co2percent\', this.value);'}), initial=kwargs['initial']['image'].getImagingEnvironment().co2percent, label="CO2 [%]", required=False)
else:
self.fields['co2percent'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'co2percent\', this.value);'}), label="CO2 [%]", required=False)
self.fields['co2percent'].widget.attrs['disabled'] = True
self.fields['co2percent'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['co2percent'] = forms.CharField(max_length=10, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="CO2 [%]", required=False)
self.fields['co2percent'].widget.attrs['disabled'] = True
self.fields['co2percent'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['airPressure', 'co2percent', 'humidity', 'temperature']
class MetadataStageLabelForm(forms.Form):
def __init__(self, *args, **kwargs):
super(MetadataStageLabelForm, self).__init__(*args, **kwargs)
# Stage label
# Position x
try:
if kwargs['initial']['image'].getStageLabel() is not None:
self.fields['positionx'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'positionx\', this.value);'}), initial=kwargs['initial']['image'].getStageLabel().positionx, label="Position X", required=False)
else:
self.fields['positionx'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'positionx\', this.value);'}), label="Position X", required=False)
self.fields['positionx'].widget.attrs['disabled'] = True
self.fields['positionx'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['positionx'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Position X", required=False)
self.fields['positionx'].widget.attrs['disabled'] = True
self.fields['positionx'].widget.attrs['class'] = 'disabled-metadata'
# Position y
try:
if kwargs['initial']['image'].getStageLabel() is not None:
self.fields['positiony'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'positiony\', this.value);'}), initial=kwargs['initial']['image'].getStageLabel().positiony, label="Position Y", required=False)
else:
self.fields['positiony'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'positiony\', this.value);'}), label="Position Y", required=False)
self.fields['positiony'].widget.attrs['disabled'] = True
self.fields['positiony'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['positiony'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Position Y", required=False)
self.fields['positiony'].widget.attrs['disabled'] = True
self.fields['positiony'].widget.attrs['class'] = 'disabled-metadata'
# Position z
try:
if kwargs['initial']['image'].getStageLabel() is not None:
self.fields['positionz'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'positionz\', this.value);'}), initial=kwargs['initial']['image'].getStageLabel().positionz, label="Position Z", required=False)
else:
self.fields['positionz'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25, 'onchange':'javascript:saveMetadata('+str(kwargs['initial']['image'].id)+', \'positionz\', this.value);'}), label="Position Z", required=False)
self.fields['positionz'].widget.attrs['disabled'] = True
self.fields['positionz'].widget.attrs['class'] = 'disabled-metadata'
except:
self.fields['positionz'] = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'size':25}), initial="N/A", label="Position Z", required=False)
self.fields['positionz'].widget.attrs['disabled'] = True
self.fields['positionz'].widget.attrs['class'] = 'disabled-metadata'
self.fields.keyOrder = ['positionx', 'positiony', 'positionz']
|
jballanc/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webclient/forms.py
|
Python
|
gpl-2.0
| 111,021 | 0.010998 |
import time
import glob
import os
import types
import socket
def read_paths ():
fulllist = []
for file in glob.glob("*96*messages"):
print 'reading ' + file
fullfile = (open(file).read().splitlines())
for x in fullfile:
if 'RPD_MPLS_LSP_CHANGE'in x and 'Sep 17' in x:
if 'flag' in x:
fulllist.append(x.split())
print 'done reading'
return fulllist
newpaths=read_paths()
dnsdict = {}
def convert_paths (newpaths):
convertedpaths = []
dnsfile = (open("/home/mkasten/configs/addresses.txt").read().splitlines())
for x in dnsfile:
if '96c'in x or 'ibr' in x or '96l' in x or '20lsr' in x :
dnsdict[x.split(":")[0]] = x.split(":")[1] +" " + x.split(":")[2]
for x in newpaths:
z = [x[8],x[12]]
for y in x:
if 'flag=0x2' in y:
rest = y.split('(',1)[0]
z.append(dnsdict[rest])
if rest not in dnsdict:
try :
a=socket.gethostbyaddr(rest)[0]
except :
print "Unknown : " + rest
a=rest
dnsdict[rest] = a.split('.',1)[0]
dnsdict[rest] = a
z.append(a)
z.append(a.split('.',1)[0])
a='None'
convertedpaths.append(z)
print 'done converting'
return convertedpaths
listofresignals = convert_paths(newpaths)
filename = 'resignallists'
outputfile = open(filename,'w')
print 'starting write'
for resig in listofresignals:
outputfile.write( ' '.join(resig) +'\n')
|
shashankjagannath/shashankfoo
|
genresigpath.py
|
Python
|
cc0-1.0
| 1,485 | 0.032997 |
# _V!(3, 3)
# This file was automatically generated by `dump_dis.py`.
# This file is designed for Python (3, 3).
import sys
# Check Python version
if sys.version_info[0:2] != (3, 3):
raise SystemError("Inappropriate Python version for these bytecode symbols.")
# Begin tokens. These are ordered.
POP_TOP = 1
ROT_TWO = 2
ROT_THREE = 3
DUP_TOP = 4
DUP_TOP_TWO = 5
NOP = 9
UNARY_POSITIVE = 10
UNARY_NEGATIVE = 11
UNARY_NOT = 12
UNARY_INVERT = 15
BINARY_POWER = 19
BINARY_MULTIPLY = 20
BINARY_MODULO = 22
BINARY_ADD = 23
BINARY_SUBTRACT = 24
BINARY_SUBSCR = 25
BINARY_FLOOR_DIVIDE = 26
BINARY_TRUE_DIVIDE = 27
INPLACE_FLOOR_DIVIDE = 28
INPLACE_TRUE_DIVIDE = 29
STORE_MAP = 54
INPLACE_ADD = 55
INPLACE_SUBTRACT = 56
INPLACE_MULTIPLY = 57
INPLACE_MODULO = 59
STORE_SUBSCR = 60
DELETE_SUBSCR = 61
BINARY_LSHIFT = 62
BINARY_RSHIFT = 63
BINARY_AND = 64
BINARY_XOR = 65
BINARY_OR = 66
INPLACE_POWER = 67
GET_ITER = 68
STORE_LOCALS = 69
PRINT_EXPR = 70
LOAD_BUILD_CLASS = 71
YIELD_FROM = 72
INPLACE_LSHIFT = 75
INPLACE_RSHIFT = 76
INPLACE_AND = 77
INPLACE_XOR = 78
INPLACE_OR = 79
BREAK_LOOP = 80
WITH_CLEANUP = 81
RETURN_VALUE = 83
IMPORT_STAR = 84
YIELD_VALUE = 86
POP_BLOCK = 87
END_FINALLY = 88
POP_EXCEPT = 89
STORE_NAME = 90
DELETE_NAME = 91
UNPACK_SEQUENCE = 92
FOR_ITER = 93
UNPACK_EX = 94
STORE_ATTR = 95
DELETE_ATTR = 96
STORE_GLOBAL = 97
DELETE_GLOBAL = 98
LOAD_CONST = 100
LOAD_NAME = 101
BUILD_TUPLE = 102
BUILD_LIST = 103
BUILD_SET = 104
BUILD_MAP = 105
LOAD_ATTR = 106
COMPARE_OP = 107
IMPORT_NAME = 108
IMPORT_FROM = 109
JUMP_FORWARD = 110
JUMP_IF_FALSE_OR_POP = 111
JUMP_IF_TRUE_OR_POP = 112
JUMP_ABSOLUTE = 113
POP_JUMP_IF_FALSE = 114
POP_JUMP_IF_TRUE = 115
LOAD_GLOBAL = 116
CONTINUE_LOOP = 119
SETUP_LOOP = 120
SETUP_EXCEPT = 121
SETUP_FINALLY = 122
LOAD_FAST = 124
STORE_FAST = 125
DELETE_FAST = 126
RAISE_VARARGS = 130
CALL_FUNCTION = 131
MAKE_FUNCTION = 132
BUILD_SLICE = 133
MAKE_CLOSURE = 134
LOAD_CLOSURE = 135
LOAD_DEREF = 136
STORE_DEREF = 137
DELETE_DEREF = 138
CALL_FUNCTION_VAR = 140
CALL_FUNCTION_KW = 141
CALL_FUNCTION_VAR_KW = 142
SETUP_WITH = 143
EXTENDED_ARG = 144
LIST_APPEND = 145
SET_ADD = 146
MAP_ADD = 147
|
SunDwarf/Pyte
|
pyte/tokens_33.py
|
Python
|
mit
| 2,143 | 0.000467 |
"""SCons.Tool.applelink
Tool-specific initialization for the Apple gnu-like linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/applelink.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import SCons.Util
# Even though the Mac is based on the GNU toolchain, it doesn't understand
# the -rpath option, so we use the "link" tool instead of "gnulink".
import link
def generate(env):
"""Add Builders and construction variables for applelink to an
Environment."""
link.generate(env)
env['FRAMEWORKPATHPREFIX'] = '-F'
env['_FRAMEWORKPATH'] = '${_concat(FRAMEWORKPATHPREFIX, FRAMEWORKPATH, "", __env__)}'
env['_FRAMEWORKS'] = '${_concat("-framework ", FRAMEWORKS, "", __env__)}'
env['LINKCOM'] = env['LINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -dynamiclib')
env['SHLINKCOM'] = env['SHLINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
# override the default for loadable modules, which are different
# on OS X than dynamic shared libs. echoing what XCode does for
# pre/suffixes:
env['LDMODULEPREFIX'] = ''
env['LDMODULESUFFIX'] = ''
env['LDMODULEFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -bundle')
env['LDMODULECOM'] = '$LDMODULE -o ${TARGET} $LDMODULEFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
def exists(env):
return env['PLATFORM'] == 'darwin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
src/engine/SCons/Tool/applelink.py
|
Python
|
mit
| 2,828 | 0.003182 |
"""
Profiling hooks
This module contains a couple of decorators (`profile` and `coverage`) that
can be used to wrap functions and/or methods to produce profiles and line
coverage reports. There's a third convenient decorator (`timecall`) that
measures the duration of function execution without the extra profiling
overhead.
Usage example (Python 2.4 or newer)::
from profilehooks import profile, coverage
@profile # or @coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
print(fn(42))
Or without imports, with some hack
$ python -m profilehooks yourmodule
@profile # or @coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
print(fn(42))
Usage example (Python 2.3 or older)::
from profilehooks import profile, coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
# Now wrap that function in a decorator
fn = profile(fn) # or coverage(fn)
print fn(42)
Reports for all thusly decorated functions will be printed to sys.stdout
on program termination. You can alternatively request for immediate
reports for each call by passing immediate=True to the profile decorator.
There's also a @timecall decorator for printing the time to sys.stderr
every time a function is called, when you just want to get a rough measure
instead of a detailed (but costly) profile.
Caveats
A thread on python-dev convinced me that hotshot produces bogus numbers.
See http://mail.python.org/pipermail/python-dev/2005-November/058264.html
I don't know what will happen if a decorated function will try to call
another decorated function. All decorators probably need to explicitly
support nested profiling (currently TraceFuncCoverage is the only one
that supports this, while HotShotFuncProfile has support for recursive
functions.)
Profiling with hotshot creates temporary files (*.prof for profiling,
*.cprof for coverage) in the current directory. These files are not
cleaned up. Exception: when you specify a filename to the profile
decorator (to store the pstats.Stats object for later inspection),
the temporary file will be the filename you specified with '.raw'
appended at the end.
Coverage analysis with hotshot seems to miss some executions resulting
in lower line counts and some lines errorneously marked as never
executed. For this reason coverage analysis now uses trace.py which is
slower, but more accurate.
Copyright (c) 2004--2014 Marius Gedminas <marius@pov.lt>
Copyright (c) 2007 Hanno Schlichting
Copyright (c) 2008 Florian Schulze
Released under the MIT licence since December 2006:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
(Previously it was distributed under the GNU General Public Licence.)
"""
__author__ = "Marius Gedminas <marius@gedmin.as>"
__copyright__ = "Copyright 2004-2015 Marius Gedminas and contributors"
__license__ = "MIT"
__version__ = "1.8.0"
__date__ = "2015-03-25"
import atexit
import inspect
import sys
import re
# For profiling
from profile import Profile
import pstats
# For hotshot profiling (inaccurate!)
try:
import hotshot
import hotshot.stats
except ImportError:
hotshot = None
# For trace.py coverage
import trace
# For hotshot coverage (inaccurate!; uses undocumented APIs; might break)
if hotshot is not None:
import _hotshot
import hotshot.log
# For cProfile profiling (best)
try:
import cProfile
except ImportError:
cProfile = None
# For timecall
import time
# registry of available profilers
AVAILABLE_PROFILERS = {}
__all__ = ['coverage', 'coverage_with_hotshot', 'profile', 'timecall']
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40,
profiler=('cProfile', 'profile', 'hotshot'),
stdout=True):
"""Mark `fn` for profiling.
If `skip` is > 0, first `skip` calls to `fn` will not be profiled.
If `immediate` is False, profiling results will be printed to
sys.stdout on program termination. Otherwise results will be printed
after each call. (If you don't want this, set stdout=False and specify a
`filename` to store profile data.)
If `dirs` is False only the name of the file will be printed.
Otherwise the full path is used.
`sort` can be a list of sort keys (defaulting to ['cumulative',
'time', 'calls']). The following ones are recognized::
'calls' -- call count
'cumulative' -- cumulative time
'file' -- file name
'line' -- line number
'module' -- file name
'name' -- function name
'nfl' -- name/file/line
'pcalls' -- call count
'stdname' -- standard name
'time' -- internal time
`entries` limits the output to the first N entries.
`profiler` can be used to select the preferred profiler, or specify a
sequence of them, in order of preference. The default is ('cProfile'.
'profile', 'hotshot').
If `filename` is specified, the profile stats will be stored in the
named file. You can load them with pstats.Stats(filename) or use a
visualization tool like RunSnakeRun.
Usage::
def fn(...):
...
fn = profile(fn, skip=1)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@profile(skip=3)
def fn(...):
...
or just ::
@profile
def fn(...):
...
"""
if fn is None: # @profile() syntax -- we are a decorator maker
def decorator(fn):
return profile(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries,
profiler=profiler, stdout=stdout)
return decorator
# @profile syntax -- we are a decorator.
if isinstance(profiler, str):
profiler = [profiler]
for p in profiler:
if p in AVAILABLE_PROFILERS:
profiler_class = AVAILABLE_PROFILERS[p]
break
else:
raise ValueError('only these profilers are available: %s'
% ', '.join(sorted(AVAILABLE_PROFILERS)))
fp = profiler_class(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries, stdout=stdout)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage(fn):
"""Mark `fn` for line coverage analysis.
Results will be printed to sys.stdout on program termination.
Usage::
def fn(...):
...
fn = coverage(fn)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@coverage
def fn(...):
...
"""
fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage_with_hotshot(fn):
"""Mark `fn` for line coverage analysis.
Uses the 'hotshot' module for fast coverage analysis.
BUG: Produces inaccurate results.
See the docstring of `coverage` for usage examples.
"""
fp = HotShotFuncCoverage(fn)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncProfile(object):
"""Profiler for a function (uses profile)."""
# This flag is shared between all instances
in_profiler = False
Profile = Profile
def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40, stdout=True):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
FuncProfile registers an atexit handler that prints profiling
information to sys.stderr when the program terminates.
"""
self.fn = fn
self.skip = skip
self.filename = filename
self.immediate = immediate
self.stdout = stdout
self.dirs = dirs
self.sort = sort or ('cumulative', 'time', 'calls')
if isinstance(self.sort, str):
self.sort = (self.sort, )
self.entries = entries
self.reset_stats()
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if FuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
# You cannot reuse the same profiler for many calls and accumulate
# stats that way. :-/
profiler = self.Profile()
try:
FuncProfile.in_profiler = True
return profiler.runcall(self.fn, *args, **kw)
finally:
FuncProfile.in_profiler = False
self.stats.add(profiler)
if self.immediate:
self.print_stats()
self.reset_stats()
def print_stats(self):
"""Print profile information to sys.stdout."""
stats = self.stats
if self.filename:
stats.dump_stats(self.filename)
if self.stdout:
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** PROFILER RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
if self.skipped:
skipped = " (%d calls not profiled)" % self.skipped
else:
skipped = ""
print("function called %d times%s" % (self.ncalls, skipped))
print("")
if not self.dirs:
stats.strip_dirs()
stats.sort_stats(*self.sort)
stats.print_stats(self.entries)
def reset_stats(self):
"""Reset accumulated profiler statistics."""
# Note: not using self.Profile, since pstats.Stats() fails then
self.stats = pstats.Stats(Profile())
self.ncalls = 0
self.skipped = 0
def atexit(self):
"""Stop profiling and print profile information to sys.stdout.
This function is registered as an atexit hook.
"""
# XXX: uh, why even register this as an atexit hook if immediate is True?
if not self.immediate:
self.print_stats()
AVAILABLE_PROFILERS['profile'] = FuncProfile
if cProfile is not None:
class CProfileFuncProfile(FuncProfile):
"""Profiler for a function (uses cProfile)."""
Profile = cProfile.Profile
AVAILABLE_PROFILERS['cProfile'] = CProfileFuncProfile
if hotshot is not None:
class HotShotFuncProfile(FuncProfile):
"""Profiler for a function (uses hotshot)."""
# This flag is shared between all instances
in_profiler = False
def __init__(self, fn, skip=0, filename=None, immediate=False,
dirs=False, sort=None, entries=40, stdout=True):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncProfile registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
if filename:
self.logfilename = filename + ".raw"
else:
self.logfilename = fn.__name__ + ".prof"
super(HotShotFuncProfile, self).__init__(
fn, skip=skip, filename=filename, immediate=immediate,
dirs=dirs, sort=sort, entries=entries, stdout=stdout)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if HotShotFuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
if self.profiler is None:
self.profiler = hotshot.Profile(self.logfilename)
try:
HotShotFuncProfile.in_profiler = True
return self.profiler.runcall(self.fn, *args, **kw)
finally:
HotShotFuncProfile.in_profiler = False
if self.immediate:
self.print_stats()
self.reset_stats()
def print_stats(self):
if self.profiler is None:
self.stats = pstats.Stats(Profile())
else:
self.profiler.close()
self.stats = hotshot.stats.load(self.logfilename)
super(HotShotFuncProfile, self).print_stats()
def reset_stats(self):
self.profiler = None
self.ncalls = 0
self.skipped = 0
AVAILABLE_PROFILERS['hotshot'] = HotShotFuncProfile
class HotShotFuncCoverage:
"""Coverage analysis for a function (uses _hotshot).
HotShot coverage is reportedly faster than trace.py, but it appears to
have problems with exceptions; also line counts in coverage reports
are generally lower from line counts produced by TraceFuncCoverage.
Is this my bug, or is it a problem with _hotshot?
"""
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.profiler = _hotshot.coverage(self.logfilename)
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
old_trace = sys.gettrace()
try:
return self.profiler.runcall(self.fn, args, kw)
finally: # pragma: nocover
sys.settrace(old_trace)
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
print("function called %d times" % self.ncalls)
print("")
fs = FuncSource(self.fn)
reader = hotshot.log.LogReader(self.logfilename)
for what, (filename, lineno, funcname), tdelta in reader:
if filename != fs.filename:
continue
if what == hotshot.log.LINE:
fs.mark(lineno)
if what == hotshot.log.ENTER:
# hotshot gives us the line number of the function definition
# and never gives us a LINE event for the first statement in
# a function, so if we didn't perform this mapping, the first
# statement would be marked as never executed
if lineno == fs.firstlineno:
lineno = fs.firstcodelineno
fs.mark(lineno)
reader.close()
print(fs)
never_executed = fs.count_never_executed()
if never_executed:
print("%d lines were not executed." % never_executed)
class TraceFuncCoverage:
"""Coverage analysis for a function (uses trace module).
HotShot coverage analysis is reportedly faster, but it appears to have
problems with exceptions.
"""
# Shared between all instances so that nested calls work
tracer = trace.Trace(count=True, trace=False,
ignoredirs=[sys.prefix, sys.exec_prefix])
# This flag is also shared between all instances
tracing = False
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
TraceFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if TraceFuncCoverage.tracing: # pragma: nocover
return self.fn(*args, **kw)
old_trace = sys.gettrace()
try:
TraceFuncCoverage.tracing = True
return self.tracer.runfunc(self.fn, *args, **kw)
finally: # pragma: nocover
sys.settrace(old_trace)
TraceFuncCoverage.tracing = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
print("function called %d times" % self.ncalls)
print("")
fs = FuncSource(self.fn)
for (filename, lineno), count in self.tracer.counts.items():
if filename != fs.filename:
continue
fs.mark(lineno, count)
print(fs)
never_executed = fs.count_never_executed()
if never_executed:
print("%d lines were not executed." % never_executed)
class FuncSource:
"""Source code annotator for a function."""
blank_rx = re.compile(r"^\s*finally:\s*(#.*)?$")
def __init__(self, fn):
self.fn = fn
self.filename = inspect.getsourcefile(fn)
self.sourcelines = {}
self.source = []
self.firstlineno = self.firstcodelineno = 0
try:
self.source, self.firstlineno = inspect.getsourcelines(fn)
self.firstcodelineno = self.firstlineno
self.find_source_lines()
except IOError:
self.filename = None
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
if self.filename is None:
return
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.__code__, strs)
for lineno in lines:
self.sourcelines.setdefault(lineno, 0)
if lines:
self.firstcodelineno = min(lines)
else: # pragma: nocover
# This branch cannot be reached, I'm just being paranoid.
self.firstcodelineno = self.firstlineno
def mark(self, lineno, count=1):
"""Mark a given source line as executed count times.
Multiple calls to mark for the same lineno add up.
"""
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
def count_never_executed(self):
"""Count statements that were never executed."""
lineno = self.firstlineno
counter = 0
for line in self.source:
if self.sourcelines.get(lineno) == 0:
if not self.blank_rx.match(line):
counter += 1
lineno += 1
return counter
def __str__(self):
"""Return annotated source code for the function."""
if self.filename is None:
return "cannot show coverage data since co_filename is None"
lines = []
lineno = self.firstlineno
for line in self.source:
counter = self.sourcelines.get(lineno)
if counter is None:
prefix = ' ' * 7
elif counter == 0:
if self.blank_rx.match(line): # pragma: nocover
# This is an workaround for an ancient bug I can't
# reproduce, perhaps because it was fixed, or perhaps
# because I can't remember all the details.
prefix = ' ' * 7
else:
prefix = '>' * 6 + ' '
else:
prefix = '%5d: ' % counter
lines.append(prefix + line)
lineno += 1
return ''.join(lines)
def timecall(fn=None, immediate=True, timer=None):
"""Wrap `fn` and print its execution time.
Example::
@timecall
def somefunc(x, y):
time.sleep(x * y)
somefunc(2, 3)
will print the time taken by somefunc on every call. If you want just
a summary at program termination, use
@timecall(immediate=False)
You can also choose a timing method other than the default ``time.time()``,
e.g.:
@timecall(timer=time.clock)
"""
if fn is None: # @timecall() syntax -- we are a decorator maker
def decorator(fn):
return timecall(fn, immediate=immediate, timer=timer)
return decorator
# @timecall syntax -- we are a decorator.
if timer is None:
timer = time.time
fp = FuncTimer(fn, immediate=immediate, timer=timer)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncTimer(object):
def __init__(self, fn, immediate, timer):
self.fn = fn
self.ncalls = 0
self.totaltime = 0
self.immediate = immediate
self.timer = timer
if not immediate:
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
fn = self.fn
timer = self.timer
self.ncalls += 1
try:
start = timer()
return fn(*args, **kw)
finally:
duration = timer() - start
self.totaltime += duration
if self.immediate:
funcname = fn.__name__
filename = fn.__code__.co_filename
lineno = fn.__code__.co_firstlineno
sys.stderr.write("\n %s (%s:%s):\n %.3f seconds\n\n" % (
funcname, filename, lineno, duration
))
sys.stderr.flush()
def atexit(self):
if not self.ncalls:
return
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("\n %s (%s:%s):\n"
" %d calls, %.3f seconds (%.3f seconds per call)\n" % (
funcname, filename, lineno, self.ncalls,
self.totaltime, self.totaltime / self.ncalls)
)
if __name__ == '__main__':
local = dict((name, globals()[name]) for name in __all__)
message = """********
Injected `profilehooks`
--------
{}
********
""".format("\n".join(local.keys()))
def interact_():
from code import interact
interact(message, local=local)
def run_():
from runpy import run_module
print(message)
run_module(sys.argv[1], init_globals=local)
if len(sys.argv) == 1:
interact_()
else:
run_()
|
kzlin129/practice-typing
|
lib/python2.7/site-packages/profilehooks.py
|
Python
|
apache-2.0
| 26,416 | 0.000341 |
#这个题面试时遇到过, 本身matrix是有特点的, 如果从左下角开始搜索 就可以看到规律
class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
i = len(matrix) - 1
j = 0
flag = 0
if not matrix or not matrix[0]:
return False
while i >= 0 and j < len(matrix[0]):
if matrix[i][j] == target:
return True
elif matrix[i][j] < target:
j += 1
else:
i -= 1
return False
|
MingfeiPan/leetcode
|
array/74.py
|
Python
|
apache-2.0
| 655 | 0.003396 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-28 00:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clog', '0010_auto_20160410_2149'),
]
operations = [
migrations.AlterUniqueTogether(
name='variable',
unique_together=set([('user', 'name')]),
),
]
|
jaufrec/whatnext
|
clog/migrations/0011_auto_20160528_0055.py
|
Python
|
gpl-3.0
| 419 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of kwalitee
# Copyright (C) 2014, 2015 CERN.
#
# kwalitee is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# kwalitee is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kwalitee; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Prepare release news from git log.
Prepares release news from git log messages, breaking release news
into (1) sections (e.g. Security fixes, detected from commit labels)
and (2) modules (e.g. search, detected from commit log headlines).
"""
from __future__ import absolute_import, print_function, unicode_literals
import itertools
import re
import sys
import textwrap
from collections import OrderedDict
from flask import current_app
from flask_script import Manager
from .check import _git_commits, _pygit2_commits
manager = Manager(usage=__doc__)
def analyse_body_paragraph(body_paragraph, labels=None):
"""Analyse commit body paragraph and return (label, message).
>>> analyse_body_paragraph('* BETTER Foo and bar.',
>>> ... {'BETTER': 'Improvements'})
('BETTER', 'Foo and bar.')
>>> analyse_body_paragraph('* Foo and bar.')
(None, 'Foo and bar.')
>>> analyse_body_paragraph('Foo and bar.')
(None, None)
"""
# try to find leading label first:
for label, dummy in labels:
if body_paragraph.startswith('* ' + label):
return (label, body_paragraph[len(label) + 3:].replace('\n ',
' '))
# no conformed leading label found; do we have leading asterisk?
if body_paragraph.startswith('* '):
return (None, body_paragraph[2:].replace('\n ', ' '))
# no leading asterisk found; ignore this paragraph silently:
return (None, None)
def remove_ticket_directives(message):
"""Remove ticket directives like "(closes #123).
>>> remove_ticket_directives('(closes #123)')
'(#123)'
>>> remove_ticket_directives('(foo #123)')
'(foo #123)'
"""
if message:
message = re.sub(r'closes #', '#', message)
message = re.sub(r'addresses #', '#', message)
message = re.sub(r'references #', '#', message)
return message
def amended_commits(commits):
"""Return those git commit sha1s that have been amended later."""
# which SHA1 are declared as amended later?
amended_sha1s = []
for message in commits.values():
amended_sha1s.extend(re.findall(r'AMENDS\s([0-f]+)', message))
return amended_sha1s
def enrich_git_log_dict(messages, labels):
"""Enrich git log with related information on tickets."""
for commit_sha1, message in messages.items():
# detect module and ticket numbers for each commit:
component = None
title = message.split('\n')[0]
try:
component, title = title.split(":", 1)
component = component.strip()
except ValueError:
pass # noqa
paragraphs = [analyse_body_paragraph(p, labels)
for p in message.split('\n\n')]
yield {
'sha1': commit_sha1,
'component': component,
'title': title.strip(),
'tickets': re.findall(r'\s(#\d+)', message),
'paragraphs': [
(label, remove_ticket_directives(message))
for label, message in paragraphs
],
}
@manager.option('repository', default='.', nargs='?', help='repository path')
@manager.option('commit', metavar='<sha or branch>', nargs='?',
default='HEAD', help='an integer for the accumulator')
@manager.option('-c', '--components', default=False, action="store_true",
help='group components', dest='group_components')
def release(commit='HEAD', repository='.', group_components=False):
"""Generate release notes."""
from ..kwalitee import get_options
from ..hooks import _read_local_kwalitee_configuration
options = get_options(current_app.config)
options.update(_read_local_kwalitee_configuration(directory=repository))
try:
sha = 'oid'
commits = _pygit2_commits(commit, repository)
except ImportError:
try:
sha = 'hexsha'
commits = _git_commits(commit, repository)
except ImportError:
print('To use this feature, please install pygit2. GitPython will '
'also work but is not recommended (python <= 2.7 only).',
file=sys.stderr)
return 2
messages = OrderedDict([(getattr(c, sha), c.message) for c in commits])
for commit_sha1 in amended_commits(messages):
if commit_sha1 in messages:
del messages[commit_sha1]
full_messages = list(
enrich_git_log_dict(messages, options.get('commit_msg_labels'))
)
indent = ' ' if group_components else ''
wrapper = textwrap.TextWrapper(
width=70,
initial_indent=indent + '- ',
subsequent_indent=indent + ' ',
)
for label, section in options.get('commit_msg_labels'):
if section is None:
continue
bullets = []
for commit in full_messages:
bullets += [
{'text': bullet, 'component': commit['component']}
for lbl, bullet in commit['paragraphs']
if lbl == label and bullet is not None
]
if len(bullets) > 0:
print(section)
print('-' * len(section))
print()
if group_components:
def key(cmt):
return cmt['component']
for component, bullets in itertools.groupby(
sorted(bullets, key=key), key):
bullets = list(bullets)
if len(bullets) > 0:
print('+ {}'.format(component))
print()
for bullet in bullets:
print(wrapper.fill(bullet['text']))
print()
else:
for bullet in bullets:
print(wrapper.fill(bullet['text']))
print()
return 0
|
greut/invenio-kwalitee
|
kwalitee/cli/prepare.py
|
Python
|
gpl-2.0
| 6,883 | 0 |
from .cal_var_byPCA *
|
stephenliu1989/HK_DataMiner
|
hkdataminer/template_matching/Select_angle/__init__.py
|
Python
|
apache-2.0
| 23 | 0.043478 |
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapiclient.GenericClient import GenericClient
from networkapiclient.utils import is_valid_int_param
from networkapiclient.exception import InvalidParameterError
class EquipamentoAmbiente(GenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(
EquipamentoAmbiente,
self).__init__(
networkapi_url,
user,
password,
user_ldap)
def inserir(self, id_equipment, id_environment, is_router=0):
"""Inserts a new Related Equipment with Environment and returns its identifier
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:param id_environment: Identifier of the Environment. Integer value and greater than zero.
:param is_router: Identifier of the Environment. Boolean value.
:return: Dictionary with the following structure:
::
{'equipamento_ambiente': {'id': < id_equipment_environment >}}
:raise InvalidParameterError: The identifier of Equipment or Environment is null and invalid.
:raise AmbienteNaoExisteError: Environment not registered.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise EquipamentoAmbienteError: Equipment is already associated with the Environment.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
equipment_environment_map = dict()
equipment_environment_map['id_equipamento'] = id_equipment
equipment_environment_map['id_ambiente'] = id_environment
equipment_environment_map['is_router'] = is_router
code, xml = self.submit(
{'equipamento_ambiente': equipment_environment_map}, 'POST', 'equipamentoambiente/')
return self.response(code, xml)
def remover(self, id_equipment, id_environment):
"""Remove Related Equipment with Environment from by the identifier.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:param id_environment: Identifier of the Environment. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: The identifier of Environment, Equipament is null and invalid.
:raise EquipamentoNotFoundError: Equipment not registered.
:raise EquipamentoAmbienteNaoExisteError: Environment not registered.
:raise VipIpError: IP-related equipment is being used for a request VIP.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
if not is_valid_int_param(id_equipment):
raise InvalidParameterError(
u'The identifier of Equipment is invalid or was not informed.')
if not is_valid_int_param(id_environment):
raise InvalidParameterError(
u'The identifier of Environment is invalid or was not informed.')
url = 'equipment/' + \
str(id_equipment) + '/environment/' + str(id_environment) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml)
def update(self, id_equipment, id_environment, is_router):
"""Remove Related Equipment with Environment from by the identifier.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:param id_environment: Identifier of the Environment. Integer value and greater than zero.
:param is_router: Identifier of the Environment. Boolean value.
:return: None
:raise InvalidParameterError: The identifier of Environment, Equipament is null and invalid.
:raise EquipamentoNotFoundError: Equipment not registered.
:raise EquipamentoAmbienteNaoExisteError: Environment not registered.
:raise VipIpError: IP-related equipment is being used for a request VIP.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
if not is_valid_int_param(id_equipment):
raise InvalidParameterError(
u'The identifier of Equipment is invalid or was not informed.')
if not is_valid_int_param(id_environment):
raise InvalidParameterError(
u'The identifier of Environment is invalid or was not informed.')
equipment_environment_map = dict()
equipment_environment_map['id_equipamento'] = id_equipment
equipment_environment_map['id_ambiente'] = id_environment
equipment_environment_map['is_router'] = is_router
code, xml = self.submit(
{'equipamento_ambiente': equipment_environment_map}, 'PUT', 'equipamentoambiente/update/')
return self.response(code, xml)
|
globocom/GloboNetworkAPI-client-python
|
networkapiclient/EquipamentoAmbiente.py
|
Python
|
apache-2.0
| 6,040 | 0.002815 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import grp, os, pwd
from conary_test import recipes
from conary.local import database
from conary.cmds import verify
from conary.repository import changeset
from conary_test import rephelp
class VerifyTest(rephelp.RepositoryHelper):
def testDisplay(self):
userDict = {}
userDict['user'], userDict['group'] = self._getUserGroup()
self.resetRepository()
self.resetRoot()
(built, d) = self.buildRecipe(recipes.testRecipe1, "TestRecipe1")
pkgname, version = built[0][:2]
self.updatePkg(self.rootDir, 'testcase', version)
self.writeFile(self.rootDir + '/usr/bin/hello', 'newtext')
sb = os.stat(self.rootDir + '/usr/bin/hello')
# we need the time to change; conary ignores size changes on
# executables to allow it to handle prelink sanely
os.utime(self.rootDir + '/usr/bin/hello', (sb.st_mtime + 1,
sb.st_mtime + 1))
db = database.Database(self.rootDir, self.cfg.dbPath)
rc, str = self.captureOutput(verify.verify, ['testcase'], db, self.cfg)
# XXX verify that output is correct here...will have to ignore
# uid/gid information, as localcs expects everything to be owned
# by root. Can share parsing code with showchangesettest
rc, str2 = self.captureOutput(verify.verify, [], db, self.cfg, all=True)
assert(str == str2)
assert('testcase:runtime' in str)
assert('/usr/bin/hello' in str)
assert(' 7 ' in str)
assert(' 20 ' in str) # make sure original size of file is displayed
assert(' -rwxr-xr-x ' in str) # make sure original mode of file is
# display (Even though that wasn't changed)
rc, str = self.captureOutput(verify.verify, ['testcase:runtime'], db,
self.cfg, diffBinaries=True)
self.assertEquals(str,
'diff --git a/etc/changedconfig b/etc/changedconfig\n'
'old user root\n'
'new user %(user)s\n'
'old group root\n'
'new group %(group)s\n'
'diff --git a/etc/unchangedconfig b/etc/unchangedconfig\n'
'old user root\n'
'new user %(user)s\n'
'old group root\n'
'new group %(group)s\n'
'diff --git a/usr/share/changed b/usr/share/changed\n'
'old user root\n'
'new user %(user)s\n'
'old group root\n'
'new group %(group)s\n'
'diff --git a/usr/share/unchanged b/usr/share/unchanged\n'
'old user root\n'
'new user %(user)s\n'
'old group root\n'
'new group %(group)s\n'
'diff --git a/usr/bin/hello b/usr/bin/hello\n'
'old user root\n'
'new user %(user)s\n'
'old group root\n'
'new group %(group)s\n'
'GIT binary patch\n'
'literal 7\n'
'Oc$~{iEiXx}C;<Qr9Rm;m\n'
'\n' % userDict)
self.logFilter.add()
verify.verify(['unknownpkg'], db, self.cfg)
verify.verify(['unknownpkg=@rpl:linux'], db, self.cfg)
self.logFilter.remove()
self.logFilter.compare(('error: trove unknownpkg is not installed',
'error: version @rpl:linux of trove unknownpkg is not installed'))
def testVerifyWithSignatures(self):
# Make sure that verify works with troves that have
# missing components, which means that the collection's signature
# is no good...
self.addComponent('foo:runtime', '1.0', '',
['/foo'])
self.addComponent('foo:data', '1.0')
self.addCollection('foo', '1.0', [':runtime', ':data'])
self.updatePkg(['foo', 'foo:runtime'], recurse=False)
self.writeFile(self.rootDir + '/foo', 'newtext')
db = database.Database(self.rootDir, self.cfg.dbPath)
self.captureOutput(verify.verify, ['foo'], db, self.cfg)
def testVerifyRemovedFiles(self):
# CNY-950
self.addComponent('foo:runtime', '1.0', fileContents = ['/foo'])
self.updatePkg('foo:runtime')
self.removeFile(self.rootDir, '/foo')
db = database.Database(self.rootDir, self.cfg.dbPath)
s = self.captureOutput(verify.verify, ['foo:runtime'], db, self.cfg)
assert(not s[1])
@staticmethod
def _getUserGroup():
user = pwd.getpwuid(os.getuid()).pw_name
group = grp.getgrgid(os.getgid()).gr_name
return user, group
def testVerifyToFile(self):
db = database.Database(self.rootDir, self.cfg.dbPath)
os.chdir(self.workDir)
user, group = self._getUserGroup()
self.addComponent('foo:runtime', '1.0',
fileContents = [('/foo',
rephelp.RegularFile(owner = user,
group = group))])
self.updatePkg('foo:runtime')
s = verify.verify(['foo:runtime'], db, self.cfg,
changesetPath = 'foo.ccs')
cs = changeset.ChangeSetFromFile('foo.ccs')
assert(list(cs.iterNewTroveList()) == [])
f = open(self.rootDir + '/foo', "a")
f.write("mod")
f.close()
s = self.captureOutput(verify.verify, ['foo:runtime'], db, self.cfg,
changesetPath = 'foo.ccs')
assert(not s[1])
cs = changeset.ChangeSetFromFile('foo.ccs')
assert(list(cs.iterNewTroveList())[0].getName() == 'foo:runtime')
def testVerifyAll(self):
os.chdir(self.workDir)
self.addComponent('foo:runtime', '1.0', fileContents = ['/bin/b'])
self.addComponent('bar:lib', '1.0', fileContents = ['/lib/l'])
self.addCollection('foo', [ ':runtime' ])
self.addCollection('bar', [ ':lib' ])
db = database.Database(self.rootDir, self.cfg.dbPath)
self.updatePkg('foo')
self.updatePkg('bar')
verify.verify([], db, self.cfg, all = True, changesetPath = 'foo.ccs')
cs = changeset.ChangeSetFromFile('foo.ccs')
assert(sorted([ x.getName() for x in cs.iterNewTroveList() ]) ==
[ 'bar:lib', 'foo:runtime' ] )
def testHashCheck(self):
# by default, we trust the size/date timestamps
repos = self.openRepository()
db = database.Database(self.rootDir, self.cfg.dbPath)
os.chdir(self.workDir)
user, group = self._getUserGroup()
trv = self.addComponent('foo:runtime',
fileContents = [ ( '/a', rephelp.RegularFile(contents = '1234',
owner = user,
group = group)) ] )
fileInfo = trv.iterFileList().next()
self.updatePkg('foo:runtime')
f = open(self.rootDir + '/a', "w")
f.write('abcd')
f.close()
f = repos.getFileVersions([(fileInfo[0], fileInfo[2], fileInfo[3])])[0]
st = os.stat(self.rootDir + '/a')
os.utime(self.rootDir + '/a', (f.inode.mtime(), f.inode.mtime()))
s = self.captureOutput(verify.verify, ['foo:runtime'], db, self.cfg,
changesetPath = 'foo.ccs')
assert(not s[1])
verify.verify(['foo:runtime'], db, self.cfg, forceHashCheck = True,
changesetPath = 'foo.ccs')
cs = changeset.ChangeSetFromFile('foo.ccs')
assert(cs.files)
def testNewFiles(self):
userDict = {}
userDict['user'], userDict['group'] = self._getUserGroup()
self.addComponent('foo:run=1',
fileContents = [ ('/bin/ls',
rephelp.RegularFile(owner = userDict['user'],
group = userDict['group'],
contents = 'content\n')) ])
self.updatePkg('foo:run=1')
db = self.openDatabase()
s = self.captureOutput(verify.verify, ['foo:run'], db, self.cfg,
asDiff = True)[1]
self.assertEquals(s, '')
# we don't notice the new file unless all is given because
# nothing owns /bin
self.writeFile(self.rootDir + '/bin/new-file', 'newtext\n')
s = self.captureOutput(verify.verify, ['foo:run'], db, self.cfg,
asDiff = True, newFiles = True)[1]
self.assertEquals(s, '')
s = self.captureOutput(verify.verify, [], db, self.cfg,
asDiff = True, newFiles = True,
all = True)[1]
self.assertEquals(s,
'diff --git a/bin/new-file b/bin/new-file\n'
'new user %(user)s\n'
'new group %(group)s\n'
'new mode 100644\n'
'--- a/dev/null\n'
'+++ b/bin/new-file\n'
'@@ -1,0 +1,1 @@\n'
'+newtext\n' % userDict)
# check the normal output format as well
s = self.captureOutput(verify.verify, [], db, self.cfg,
newFiles = True, all = True)[1]
# filter out the timestamp
s = ' '.join(s.split()[0:8] + s.split()[10:])
self.assertEquals(s,
'Install @new:files=1.0-1-1 New -rw-r--r-- 1 %(user)s %(group)s '
'8 UTC /bin/new-file' % userDict)
# if we add don't check /bin to the exclude list the diff should
# go away
oldCfg = self.cfg.verifyDirsNoNewFiles[:]
try:
self.cfg.verifyDirsNoNewFiles.append('/bin')
s = self.captureOutput(verify.verify, [], db, self.cfg,
asDiff = True, newFiles = True,
all = True)[1]
self.assertEquals(s, '')
finally:
self.cfg.verifyDirsNoNewFiles = oldCfg
# make a package own /bin, and then verifying that package w/
# --new-files should make it show up
self.addComponent('foo:dir=1',
fileContents = [ ('/bin',
rephelp.Directory(owner = userDict['user'],
group = userDict['group'],))])
self.updatePkg('foo:dir=1')
s = self.captureOutput(verify.verify, ['foo:dir'], db, self.cfg,
asDiff = True, newFiles = True)[1]
self.assertEquals(s,
'diff --git a/bin/new-file b/bin/new-file\n'
'new user %(user)s\n'
'new group %(group)s\n'
'new mode 100644\n'
'--- a/dev/null\n'
'+++ b/bin/new-file\n'
'@@ -1,0 +1,1 @@\n'
'+newtext\n' % userDict)
def testNewFileOwnership(self):
# make sure files found with --new-files get assigned to the right
# troves
user, group = self._getUserGroup()
self.addComponent('foo:bin=0',
fileContents = [ ('/bin',
rephelp.Directory(owner = user,
group = group,)) ])
self.addComponent('foo:lib=1',
fileContents = [ ('/lib',
rephelp.Directory(owner = user,
group = group,)) ])
self.updatePkg([ 'foo:bin', 'foo:lib' ])
db = self.openDatabase()
self.writeFile(self.rootDir + '/bin/new', 'newtext\n')
self.writeFile(self.rootDir + '/lib/new', 'newtext\n')
self.writeFile(self.rootDir + '/rootfile', 'newtext\n')
os.chdir(self.workDir)
verify.verify([], db, self.cfg, all = True, newFiles = True,
changesetPath = 'foo.ccs')
cs = changeset.ChangeSetFromFile('foo.ccs')
trvCsByName = dict((x.getName(), x) for x in cs.iterNewTroveList())
self.assertEquals(
[ x[1] for x in trvCsByName['foo:bin'].getNewFileList() ],
[ '/bin/new'] )
self.assertEquals(
[ x[1] for x in trvCsByName['foo:lib'].getNewFileList() ],
[ '/lib/new'] )
self.assertEquals(
[ x[1] for x in trvCsByName['@new:files'].getNewFileList() ],
[ '/rootfile'] )
|
fedora-conary/conary
|
conary_test/verifytest.py
|
Python
|
apache-2.0
| 12,956 | 0.015282 |
import abc
import logging
from time import sleep, time
from subprocess import call, check_output
from config import sim_dump_location, safe_location, devel
import infoli_diagnostics
import sys
class countermeasure(object):
''' Countermeasure class '''
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def perform(self):
return
''' defines an ordering among the different countermeasures, based on MTTR '''
countermeasure_enum = {
'restartSimulation':0
}
def wait_for_cores(core_names, timeout):
''' Utility function that blocks until a set of cores is available
or until the timeout is reached
'''
if devel:
return True
t0 = time()
available_cores = 0
while available_cores < len(core_names):
status = check_output(['sccBoot', '-s'])
if status[-11:-8] == "All":
available_cores = 48
elif status[-10:-8] == "No":
available_cores = 0
else:
available_cores = int(status[-10:-8])
if time() - t0 > timeout:
logging.error("Timeout exceeded for %s cores", expected)
return False
sleep(10)
status = check_output(['sccBoot', '-s'])
print status
return True
class restartSimulation(countermeasure):
""" Restarts the simulation """
__name__ = 'restartSimulation'
def __init__(self, manager):
self.manager = manager
def perform(self):
logging.info("performing the Restart Simulation countermeasure")
print self.manager.checkpoints
if any(isinstance(x, infoli_diagnostics.infoliOutputDivergence) for x in self.manager.failed_diagnostics()): #infoli-specific
# check if the SDC detection diagnostic has failed, and use the SDC checkpoint
print sorted(self.manager.checkpoints)
checkpoint = max(self.manager.checkpoints)
else:
checkpoint = max(self.manager.checkpoints)
print("The mttr_values are:",self.manager.mttr_values)
print("Calling dvfs: ")
self.manager.dvfs.dvfsOperation(checkpoint)
print "Restarting from step" + str(checkpoint)
logging.info("Restarting from step " + str(checkpoint))
with self.manager.lock:
# Copy safe checkpoints
#for i in range(self.manager.num_cores):
# call( ['cp', '-f', '-u', safe_location + str(checkpoint) + '/ckptFile%d.bin' %i, sim_dump_location])
# call( ['cp', '-f', '-u', safe_location + str(checkpoint) + '/InferiorOlive_Output%d.txt' %i, sim_dump_location])
self.manager.rccerun([self.manager.restart_exec] + self.manager.exec_list[1:]) # use False as extra last argument to avoid piping stdout for diagnostics - useful for measurements
logging.info("Restart Simulation countermeasure completed")
return True
|
A-Kokolis/thesis-ntua
|
scc_implementation/scc_countermeasures.py
|
Python
|
gpl-3.0
| 2,957 | 0.00372 |
from . xml import EVECACHEPATH
import time
from os import path
import urllib
import MySQLdb as mysql
from urllib2 import urlopen
class EveDb(object):
"""
This class is responsible for loading up an instance of the eve static
dump information. Without this, most functionality of this library will
not work. """
def __init__(self, database, user, passwd, host="localhost"):
self.db = mysql.connect(host=host, user=user, passwd=passwd, db=database)
def get_item_row(self, id):
cur=self.db.cursor()
cols = ("typeID", "typeName", "description", "volume")
cur.execute("select "+ ",".join(cols) + " from invTypes where typeID = %s", (id,))
row = cur.fetchone()
row = dict(zip(cols, row))
return row
def get_location_row(self, id):
return
def get_location_by_string(self, id):
return
def get_item_by_string(self, txt):
c = self.db.cursor()
c.execute("select typeName from invTypes where typeName GLOB '%s'", (txt,))
row = c.fetchone()
return row["typeName"]
game_db=EveDb("eve", "eve", "eve")
|
pkovac/evedustrial
|
eve/db.py
|
Python
|
mit
| 1,136 | 0.011444 |
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import test_l10n_es_aeat_mod347
|
OCA/l10n-spain
|
l10n_es_aeat_mod347/tests/__init__.py
|
Python
|
agpl-3.0
| 104 | 0 |
# -*- coding: utf-8 -*-
# The Hazard Library
# Copyright (C) 2013-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports
:class:`EdwardsFah2013Alpine10Bars`,
:class:`EdwardsFah2013Alpine20Bars`,
:class:`EdwardsFah2013Alpine30Bars`,
:class:`EdwardsFah2013Alpine50Bars`,
:class:`EdwardsFah2013Alpine60Bars`,
:class:`EdwardsFah2013Alpine75Bars`,
:class:`EdwardsFah2013Alpine90Bars`,
:class:`EdwardsFah2013Alpine120Bars`.
"""
from __future__ import division
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGV, PGA, SA
from openquake.hazardlib.gsim.edwards_fah_2013a_coeffs import (
COEFFS_ALPINE_60Bars,
COEFFS_ALPINE_10Bars,
COEFFS_ALPINE_20Bars,
COEFFS_ALPINE_30Bars,
COEFFS_ALPINE_50Bars,
COEFFS_ALPINE_75Bars,
COEFFS_ALPINE_90Bars,
COEFFS_ALPINE_120Bars
)
from openquake.hazardlib.gsim.utils_swiss_gmpe import (
_compute_phi_ss,
_compute_C1_term
)
class EdwardsFah2013Alpine10Bars(GMPE):
"""
This function implements the GMPE developed by Ben Edwars and Donath Fah
and published as "A Stochastic Ground-Motion Model for Switzerland"
Bulletin of the Seismological Society of America,
Vol. 103, No. 1, pp. 78–98, February 2013.
The GMPE was parametrized by Carlo Cauzzi to be implemented in OpenQuake.
This class implements the equations for 'Alpine' and 'Foreland - two
tectonic regionalizations defined for the Switzerland -
therefore this GMPE is region specific".
@ implemented by laurentiu.danciu@sed.ethz.zh
"""
#: Supported tectonic region type is ALPINE which
#: is a sub-region of Active Shallow Crust.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration, see tables 3 and 4, pages 227 and 228.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGV,
PGA,
SA
])
#: Supported intensity measure component is the geometric mean of two
#: horizontal components
#: :attr:`~openquake.hazardlib.const.IMC.AVERAGE_HORIZONTAL`
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation type is total,
#: Carlo Cauzzi - Personal Communication
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: Required site parameter is only Vs30 (used to distinguish rock
#: and deep soil).
REQUIRES_SITES_PARAMETERS = set(('vs30', ))
#: Required rupture parameters: magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'rake'))
#: Required distance measure is Rrup
REQUIRES_DISTANCES = set(('rrup', ))
#: Vs30 value representing typical rock conditions in Switzerland.
#: confirmed by the Swiss GMPE group
ROCK_VS30 = 1105
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
COEFFS = self.COEFFS[imt]
R = self._compute_term_r(COEFFS, rup.mag, dists.rrup)
mean = 10 ** (self._compute_mean(COEFFS, rup.mag, R))
# Convert units to g,
# but only for PGA and SA (not PGV):
if isinstance(imt, (PGA, SA)):
mean = np.log(mean / (g*100.))
else:
# PGV:
mean = np.log(mean)
c1_rrup = _compute_C1_term(COEFFS, dists.rrup)
log_phi_ss = 1.00
stddevs = self._get_stddevs(
COEFFS, stddev_types, sites.vs30.shape[0], rup.mag, c1_rrup,
log_phi_ss, COEFFS['mean_phi_ss']
)
return mean, stddevs
def _get_stddevs(self, C, stddev_types, num_sites, mag, c1_rrup,
log_phi_ss, mean_phi_ss):
"""
Return standard deviations
"""
phi_ss = _compute_phi_ss(C, mag, c1_rrup, log_phi_ss, mean_phi_ss)
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(np.sqrt(
C['tau'] * C['tau'] +
phi_ss * phi_ss) +
np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(phi_ss + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['tau'] + np.zeros(num_sites))
return stddevs
def _compute_term_r(self, C, mag, rrup):
"""
Compute distance term
d = log10(max(R,rmin));
"""
if mag > self.M1:
rrup_min = 0.55
elif mag > self.M2:
rrup_min = -2.80 * mag + 14.55
else:
rrup_min = -0.295 * mag + 2.65
R = np.maximum(rrup, rrup_min)
return np.log10(R)
def _compute_term_1(self, C, mag):
"""
Compute term 1
a1 + a2.*M + a3.*M.^2 + a4.*M.^3 + a5.*M.^4 + a6.*M.^5 + a7.*M.^6
"""
return (
C['a1'] + C['a2'] * mag + C['a3'] *
np.power(mag, 2) + C['a4'] * np.power(mag, 3)
+ C['a5'] * np.power(mag, 4) + C['a6'] *
np.power(mag, 5) + C['a7'] * np.power(mag, 6)
)
def _compute_term_2(self, C, mag, R):
"""
(a8 + a9.*M + a10.*M.*M + a11.*M.*M.*M).*d(r)
"""
return (
(C['a8'] + C['a9'] * mag + C['a10'] * np.power(mag, 2) +
C['a11'] * np.power(mag, 3)) * R
)
def _compute_term_3(self, C, mag, R):
"""
(a12 + a13.*M + a14.*M.*M + a15.*M.*M.*M).*(d(r).^2)
"""
return (
(C['a12'] + C['a13'] * mag + C['a14'] * np.power(mag, 2) +
C['a15'] * np.power(mag, 3)) * np.power(R, 2)
)
def _compute_term_4(self, C, mag, R):
"""
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
"""
return (
(C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) +
C['a19'] * np.power(mag, 3)) * np.power(R, 3)
)
def _compute_term_5(self, C, mag, R):
"""
(a20 + a21.*M + a22.*M.*M + a23.*M.*M.*M).*(d(r).^4)
"""
return (
(C['a20'] + C['a21'] * mag + C['a22'] * np.power(mag, 2) +
C['a23'] * np.power(mag, 3)) * np.power(R, 4)
)
def _compute_mean(self, C, mag, term_dist_r):
"""
compute mean
"""
return (self._compute_term_1(C, mag) +
self._compute_term_2(C, mag, term_dist_r) +
self._compute_term_3(C, mag, term_dist_r) +
self._compute_term_4(C, mag, term_dist_r) +
self._compute_term_5(C, mag, term_dist_r))
#: Fixed magnitude terms
M1 = 5.00
M2 = 4.70
COEFFS = COEFFS_ALPINE_10Bars
class EdwardsFah2013Alpine20Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 20Bars Model :class:`EdwardsFah2013Alpine20Bars`
"""
COEFFS = COEFFS_ALPINE_20Bars
class EdwardsFah2013Alpine30Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 30Bars Model :class:`EdwardsFah2013Alpine30Bars`
"""
COEFFS = COEFFS_ALPINE_30Bars
class EdwardsFah2013Alpine50Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 50Bars Model :class:`EdwardsFah2013Alpine50Bars`
"""
COEFFS = COEFFS_ALPINE_50Bars
class EdwardsFah2013Alpine60Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 60Bars Model :class:`EdwardsFah2013Alpine60Bars`
"""
COEFFS = COEFFS_ALPINE_60Bars
class EdwardsFah2013Alpine75Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 75Bars Model :class:`EdwardsFah2013Alpine75Bars`
"""
COEFFS = COEFFS_ALPINE_75Bars
class EdwardsFah2013Alpine90Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 90Bars Model :class:`EdwardsFah2013Alpine90Bars`
"""
COEFFS = COEFFS_ALPINE_90Bars
class EdwardsFah2013Alpine120Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 120Bars Model :class:`EdwardsFah2013Alpine120Bars`
"""
COEFFS = COEFFS_ALPINE_120Bars
|
mmpagani/oq-hazardlib
|
openquake/hazardlib/gsim/edwards_fah_2013a.py
|
Python
|
agpl-3.0
| 9,481 | 0 |
"""
WSGI config for thumbor_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "thumbor_project.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thumbor_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
lCharlie123l/django-thumborstorage
|
tests/thumbor_project/thumbor_project/wsgi.py
|
Python
|
mit
| 1,446 | 0.000692 |
#!/usr/bin/env python
import luigi
import dynamic_range_simulation
import darknoise_simulation
import pde_simulation
import relative_pde_simulation
import n_pe_simulation
import crosstalk_neighbour_simulation
class All(luigi.WrapperTask):
def requires(self):
yield crosstalk_neighbour_simulation.All()
yield darknoise_simulation.All()
yield dynamic_range_simulation.All()
yield n_pe_simulation.All()
yield pde_simulation.All()
yield relative_pde_simulation.All()
if __name__ == "__main__":
luigi.run(main_task_cls=All)
|
ntim/g4sipm
|
sample/run/luigi/all.py
|
Python
|
gpl-3.0
| 561 | 0.023173 |
from __future__ import absolute_import
from sentry.testutils import TestCase
class EventTest(TestCase):
def test_legacy_tags(self):
event = self.create_event(data={
'tags': [
('logger', 'foobar'),
('site', 'foo'),
('server_name', 'bar'),
]
})
assert event.logger == 'foobar'
assert event.level == event.group.level
assert event.site == 'foo'
assert event.server_name == 'bar'
assert event.culprit == event.group.culprit
def test_email_subject(self):
event1 = self.create_event(
event_id='a' * 32, group=self.group, tags={'level': 'info'},
message='Foo bar')
event2 = self.create_event(
event_id='b' * 32, group=self.group, tags={'level': 'error'},
message='Foo bar')
self.group.level = 30
assert event1.get_email_subject() == '[foo Bar] INFO: Foo bar'
assert event2.get_email_subject() == '[foo Bar] ERROR: Foo bar'
class EventGetLegacyMessageTest(TestCase):
def test_message(self):
event = self.create_event(message='foo bar')
assert event.get_legacy_message() == 'foo bar'
def test_message_interface(self):
event = self.create_event(
message='biz baz',
data={
'sentry.interfaces.Message': {'message': 'foo bar'}
},
)
assert event.get_legacy_message() == 'foo bar'
def test_message_interface_with_formatting(self):
event = self.create_event(
message='biz baz',
data={
'sentry.interfaces.Message': {
'message': 'foo %s',
'formatted': 'foo bar',
'params': ['bar'],
}
},
)
assert event.get_legacy_message() == 'foo bar'
|
alexm92/sentry
|
tests/sentry/models/test_event.py
|
Python
|
bsd-3-clause
| 1,907 | 0 |
from . import foo
|
asedunov/intellij-community
|
python/testData/completion/relativeFromImportInNamespacePackage2/nspkg1/a.after.py
|
Python
|
apache-2.0
| 17 | 0.058824 |
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
class CertificateAuthorityConfig(OpenShiftCLIConfig):
''' CertificateAuthorityConfig is a DTO for the oadm ca command '''
def __init__(self, cmd, kubeconfig, verbose, ca_options):
super(CertificateAuthorityConfig, self).__init__('ca', 'default', kubeconfig, ca_options)
self.cmd = cmd
self.kubeconfig = kubeconfig
self.verbose = verbose
self._ca = ca_options
class CertificateAuthority(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self,
config,
verbose=False):
''' Constructor for oadm ca '''
super(CertificateAuthority, self).__init__('default', config.kubeconfig, verbose)
self.config = config
self.verbose = verbose
def get(self):
'''get the current cert file
If a file exists by the same name in the specified location then the cert exists
'''
cert = self.config.config_options['cert']['value']
if cert and os.path.exists(cert):
return open(cert).read()
return None
def create(self):
'''Create a deploymentconfig '''
options = self.config.to_option_list()
cmd = ['ca']
cmd.append(self.config.cmd)
cmd.extend(options)
return self.openshift_cmd(cmd, oadm=True)
def exists(self):
''' check whether the certificate exists and has the clusterIP '''
cert_path = self.config.config_options['cert']['value']
if not os.path.exists(cert_path):
return False
proc = subprocess.Popen(['openssl', 'x509', '-noout', '-subject', '-in', cert_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
if proc.returncode == 0:
cn_results = proc.stdout.read()
for var in self.config.config_options['hostnames']['value'].split(','):
if var in cn_results:
return True
return False
def main():
'''
ansible oadm module for ca
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present']),
debug=dict(default=False, type='bool'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
cmd=dict(default=None, require=True, type='str'),
# oadm ca create-master-certs [options]
cert_dir=dict(default=None, type='str'),
hostnames=dict(default=[], type='list'),
master=dict(default=None, type='str'),
public_master=dict(default=None, type='str'),
overwrite=dict(default=False, type='bool'),
signer_name=dict(default=None, type='str'),
# oadm ca create-key-pair [options]
private_key=dict(default=None, type='str'),
public_key=dict(default=None, type='str'),
# oadm ca create-server-cert [options]
cert=dict(default=None, type='str'),
key=dict(default=None, type='str'),
signer_cert=dict(default=None, type='str'),
signer_key=dict(default=None, type='str'),
signer_serial=dict(default=None, type='str'),
# name
# oadm ca create-signer-cert [options]
),
supports_check_mode=True,
)
# pylint: disable=line-too-long
config = CertificateAuthorityConfig(module.params['cmd'],
module.params['kubeconfig'],
module.params['debug'],
{'cert_dir': {'value': module.params['cert_dir'], 'include': True},
'cert': {'value': module.params['cert'], 'include': True},
'hostnames': {'value': ','.join(module.params['hostnames']), 'include': True},
'master': {'value': module.params['master'], 'include': True},
'public_master': {'value': module.params['public_master'], 'include': True},
'overwrite': {'value': module.params['overwrite'], 'include': True},
'signer_name': {'value': module.params['signer_name'], 'include': True},
'private_key': {'value': module.params['private_key'], 'include': True},
'public_key': {'value': module.params['public_key'], 'include': True},
'key': {'value': module.params['key'], 'include': True},
'signer_cert': {'value': module.params['signer_cert'], 'include': True},
'signer_key': {'value': module.params['signer_key'], 'include': True},
'signer_serial': {'value': module.params['signer_serial'], 'include': True},
})
oadm_ca = CertificateAuthority(config)
state = module.params['state']
if state == 'present':
########
# Create
########
if not oadm_ca.exists() or module.params['overwrite']:
if module.check_mode:
module.exit_json(changed=False, msg="Would have created the certificate.", state="present")
api_rval = oadm_ca.create()
module.exit_json(changed=True, results=api_rval, state="present")
########
# Exists
########
api_rval = oadm_ca.get()
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
joelsmith/openshift-tools
|
ansible/roles/lib_openshift_3.2/library/oadm_ca.py
|
Python
|
apache-2.0
| 35,621 | 0.002892 |
# -*- coding: utf-8 -*-
import sys, os
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'ssp'
copyright = u'2013, Yury Konovalov'
version = '0.0.1'
release = '0.0.1'
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = 'sspdoc'
latex_elements = {
}
latex_documents = [
('index', 'ssp.tex', u'ssp Documentation',
u'Yury Konovalov', 'manual'),
]
man_pages = [
('index', 'ssp', u'ssp Documentation',
[u'Yury Konovalov'], 1)
]
texinfo_documents = [
('index', 'ssp', u'ssp Documentation',
u'Yury Konovalov', 'ssp', 'One line description of project.',
'Miscellaneous'),
]
|
mdcic/ssp
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 752 | 0.00133 |
#!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video of your talk is posted:
{{url}}
{% if ep.state == 7 %}
Look at it, make sure the title is spelled right and the audio sounds reasonable.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
http://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
{% endif %}
{% if ep.twitter_url %}
It has been tweeted: {{ ep.twitter_url }}
Re-tweet it, blog it, whatever it. No point in making videos if no one watches them.
{% endif %}
"""
py_name = "email_url.py"
def more_context(self, ep):
# If there is a Richard (pyvideo) url, use that;
# else use the youtube url.
url = ep.public_url or ep.host_url
return {'url':url}
if __name__ == '__main__':
p=email_url()
p.main()
|
yoe/veyepar
|
dj/scripts/email_url.py
|
Python
|
mit
| 1,390 | 0.009353 |
"""
Django settings for votainteligente project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '18_bfrslfj^(m1+k+ks3q@f08rsod46lr0k0=p7+=3z5&cl7gj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'votainteligente.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'votainteligente.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'it-it'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/cache/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'cache')
from votainteligente.votainteligente_settings import *
if THEME:
INSTALLED_APPS += (THEME, )
INSTALLED_APPS += ('votai_general_theme', )
|
opencorato/votainteligente-portal-electoral
|
votainteligente/settings.py
|
Python
|
gpl-3.0
| 2,935 | 0.000681 |
# -*- coding: utf-8 -*-
#
# Flask-RQ documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 12 15:35:21 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
#from setup import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-RQ'
copyright = u'2012, Matt Wright'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_fork': 'mattupstate/flask-rq',
'index_logo': False
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-RQdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-RQ.tex', u'Flask-RQ Documentation',
u'Matt Wright', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-rq', u'Flask-RQ Documentation',
[u'Matt Wright'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flask-RQ', u'Flask-RQ Documentation',
u'Matt Wright', 'Flask-RQ', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Flask-RQ'
epub_author = u'Matt Wright'
epub_publisher = u'Matt Wright'
epub_copyright = u'2012, Matt Wright'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
pygments_style = 'flask_theme_support.FlaskyStyle'
# fall back if theme is not there
try:
__import__('flask_theme_support')
except ImportError, e:
print '-' * 74
print 'Warning: Flask themes unavailable. Building with default theme'
print 'If you want the Flask themes, run this command and build again:'
print
print ' git submodule update --init'
print '-' * 74
pygments_style = 'tango'
html_theme = 'default'
html_theme_options = {}
|
xen/flask-rq
|
docs/conf.py
|
Python
|
mit
| 9,809 | 0.006932 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake RPC implementation which calls proxy methods directly with no
queues. Casts will block, but this is very useful for tests.
"""
import inspect
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
# jsonutils has some extra logic to automatically convert objects to primitive
# types so that they can be serialized. We want to catch all cases where
# non-primitive types make it into this code and treat it as an error.
import json
import time
import eventlet
from openstack_dashboard.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
class RpcContext(rpc_common.CommonRpcContext):
def __init__(self, **kwargs):
super(RpcContext, self).__init__(**kwargs)
self._response = []
self._done = False
def deepcopy(self):
values = self.to_dict()
new_inst = self.__class__(**values)
new_inst._response = self._response
new_inst._done = self._done
return new_inst
def reply(self, reply=None, failure=None, ending=False):
if ending:
self._done = True
if not self._done:
self._response.append((reply, failure))
class Consumer(object):
def __init__(self, topic, proxy):
self.topic = topic
self.proxy = proxy
def call(self, context, version, method, namespace, args, timeout):
done = eventlet.event.Event()
def _inner():
ctxt = RpcContext.from_dict(context.to_dict())
try:
rval = self.proxy.dispatch(context, version, method,
namespace, **args)
res = []
# Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response:
if failure:
raise failure[0], failure[1], failure[2]
res.append(reply)
# if ending not 'sent'...we might have more data to
# return from the function itself
if not ctxt._done:
if inspect.isgenerator(rval):
for val in rval:
res.append(val)
else:
res.append(rval)
done.send(res)
except rpc_common.ClientException as e:
done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
thread = eventlet.greenthread.spawn(_inner)
if timeout:
start_time = time.time()
while not done.ready():
eventlet.greenthread.sleep(1)
cur_time = time.time()
if (cur_time - start_time) > timeout:
thread.kill()
raise rpc_common.Timeout()
return done.wait()
class Connection(object):
"""Connection object."""
def __init__(self):
self.consumers = []
def create_consumer(self, topic, proxy, fanout=False):
consumer = Consumer(topic, proxy)
self.consumers.append(consumer)
if topic not in CONSUMERS:
CONSUMERS[topic] = []
CONSUMERS[topic].append(consumer)
def close(self):
for consumer in self.consumers:
CONSUMERS[consumer.topic].remove(consumer)
self.consumers = []
def consume_in_thread(self):
pass
def create_connection(conf, new=True):
"""Create a connection."""
return Connection()
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
try:
consumer = CONSUMERS[topic][0]
except (KeyError, IndexError):
return iter([None])
else:
return consumer.call(context, version, method, namespace, args,
timeout)
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg):
check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:
pass
def notify(conf, context, topic, msg, envelope):
check_serialize(msg)
def cleanup():
pass
def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
for consumer in CONSUMERS.get(topic, []):
try:
consumer.call(context, version, method, namespace, args, None)
except Exception:
pass
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/openstack/common/rpc/impl_fake.py
|
Python
|
apache-2.0
| 5,854 | 0 |
"""
Room Typeclasses for the TutorialWorld.
This defines special types of Rooms available in the tutorial. To keep
everything in one place we define them together with the custom
commands needed to control them. Those commands could also have been
in a separate module (e.g. if they could have been re-used elsewhere.)
"""
from __future__ import print_function
import random
from evennia import TICKER_HANDLER
from evennia import CmdSet, Command, DefaultRoom
from evennia import utils, create_object, search_object
from evennia import syscmdkeys, default_cmds
from evennia.contrib.tutorial_world.objects import LightSource
# the system error-handling module is defined in the settings. We load the
# given setting here using utils.object_from_module. This way we can use
# it regardless of if we change settings later.
from django.conf import settings
_SEARCH_AT_RESULT = utils.object_from_module(settings.SEARCH_AT_RESULT)
# -------------------------------------------------------------
#
# Tutorial room - parent room class
#
# This room is the parent of all rooms in the tutorial.
# It defines a tutorial command on itself (available to
# all those who are in a tutorial room).
#
# -------------------------------------------------------------
#
# Special command available in all tutorial rooms
class CmdTutorial(Command):
"""
Get help during the tutorial
Usage:
tutorial [obj]
This command allows you to get behind-the-scenes info
about an object or the current location.
"""
key = "tutorial"
aliases = ["tut"]
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""
All we do is to scan the current location for an Attribute
called `tutorial_info` and display that.
"""
caller = self.caller
if not self.args:
target = self.obj # this is the room the command is defined on
else:
target = caller.search(self.args.strip())
if not target:
return
helptext = target.db.tutorial_info
if helptext:
caller.msg("|G%s|n" % helptext)
else:
caller.msg("|RSorry, there is no tutorial help available here.|n")
# for the @detail command we inherit from MuxCommand, since
# we want to make use of MuxCommand's pre-parsing of '=' in the
# argument.
class CmdTutorialSetDetail(default_cmds.MuxCommand):
"""
sets a detail on a room
Usage:
@detail <key> = <description>
@detail <key>;<alias>;... = description
Example:
@detail walls = The walls are covered in ...
@detail castle;ruin;tower = The distant ruin ...
This sets a "detail" on the object this command is defined on
(TutorialRoom for this tutorial). This detail can be accessed with
the TutorialRoomLook command sitting on TutorialRoom objects (details
are set as a simple dictionary on the room). This is a Builder command.
We custom parse the key for the ;-separator in order to create
multiple aliases to the detail all at once.
"""
key = "@detail"
locks = "cmd:perm(Builder)"
help_category = "TutorialWorld"
def func(self):
"""
All this does is to check if the object has
the set_detail method and uses it.
"""
if not self.args or not self.rhs:
self.caller.msg("Usage: @detail key = description")
return
if not hasattr(self.obj, "set_detail"):
self.caller.msg("Details cannot be set on %s." % self.obj)
return
for key in self.lhs.split(";"):
# loop over all aliases, if any (if not, this will just be
# the one key to loop over)
self.obj.set_detail(key, self.rhs)
self.caller.msg("Detail set: '%s': '%s'" % (self.lhs, self.rhs))
class CmdTutorialLook(default_cmds.CmdLook):
"""
looks at the room and on details
Usage:
look <obj>
look <room detail>
look *<account>
Observes your location, details at your location or objects
in your vicinity.
Tutorial: This is a child of the default Look command, that also
allows us to look at "details" in the room. These details are
things to examine and offers some extra description without
actually having to be actual database objects. It uses the
return_detail() hook on TutorialRooms for this.
"""
# we don't need to specify key/locks etc, this is already
# set by the parent.
help_category = "TutorialWorld"
def func(self):
"""
Handle the looking. This is a copy of the default look
code except for adding in the details.
"""
caller = self.caller
args = self.args
if args:
# we use quiet=True to turn off automatic error reporting.
# This tells search that we want to handle error messages
# ourself. This also means the search function will always
# return a list (with 0, 1 or more elements) rather than
# result/None.
looking_at_obj = caller.search(args,
# note: excludes room/room aliases
candidates=caller.location.contents + caller.contents,
use_nicks=True, quiet=True)
if len(looking_at_obj) != 1:
# no target found or more than one target found (multimatch)
# look for a detail that may match
detail = self.obj.return_detail(args)
if detail:
self.caller.msg(detail)
return
else:
# no detail found, delegate our result to the normal
# error message handler.
_SEARCH_AT_RESULT(None, caller, args, looking_at_obj)
return
else:
# we found a match, extract it from the list and carry on
# normally with the look handling.
looking_at_obj = looking_at_obj[0]
else:
looking_at_obj = caller.location
if not looking_at_obj:
caller.msg("You have no location to look at!")
return
if not hasattr(looking_at_obj, 'return_appearance'):
# this is likely due to us having an account instead
looking_at_obj = looking_at_obj.character
if not looking_at_obj.access(caller, "view"):
caller.msg("Could not find '%s'." % args)
return
# get object's appearance
caller.msg(looking_at_obj.return_appearance(caller))
# the object's at_desc() method.
looking_at_obj.at_desc(looker=caller)
return
class TutorialRoomCmdSet(CmdSet):
"""
Implements the simple tutorial cmdset. This will overload the look
command in the default CharacterCmdSet since it has a higher
priority (ChracterCmdSet has prio 0)
"""
key = "tutorial_cmdset"
priority = 1
def at_cmdset_creation(self):
"""add the tutorial-room commands"""
self.add(CmdTutorial())
self.add(CmdTutorialSetDetail())
self.add(CmdTutorialLook())
class TutorialRoom(DefaultRoom):
"""
This is the base room type for all rooms in the tutorial world.
It defines a cmdset on itself for reading tutorial info about the location.
"""
def at_object_creation(self):
"""Called when room is first created"""
self.db.tutorial_info = "This is a tutorial room. It allows you to use the 'tutorial' command."
self.cmdset.add_default(TutorialRoomCmdSet)
def at_object_receive(self, new_arrival, source_location):
"""
When an object enter a tutorial room we tell other objects in
the room about it by trying to call a hook on them. The Mob object
uses this to cheaply get notified of enemies without having
to constantly scan for them.
Args:
new_arrival (Object): the object that just entered this room.
source_location (Object): the previous location of new_arrival.
"""
if new_arrival.has_account and not new_arrival.is_superuser:
# this is a character
for obj in self.contents_get(exclude=new_arrival):
if hasattr(obj, "at_new_arrival"):
obj.at_new_arrival(new_arrival)
def return_detail(self, detailkey):
"""
This looks for an Attribute "obj_details" and possibly
returns the value of it.
Args:
detailkey (str): The detail being looked at. This is
case-insensitive.
"""
details = self.db.details
if details:
return details.get(detailkey.lower(), None)
def set_detail(self, detailkey, description):
"""
This sets a new detail, using an Attribute "details".
Args:
detailkey (str): The detail identifier to add (for
aliases you need to add multiple keys to the
same description). Case-insensitive.
description (str): The text to return when looking
at the given detailkey.
"""
if self.db.details:
self.db.details[detailkey.lower()] = description
else:
self.db.details = {detailkey.lower(): description}
# -------------------------------------------------------------
#
# Weather room - room with a ticker
#
# -------------------------------------------------------------
# These are rainy weather strings
WEATHER_STRINGS = (
"The rain coming down from the iron-grey sky intensifies.",
"A gust of wind throws the rain right in your face. Despite your cloak you shiver.",
"The rainfall eases a bit and the sky momentarily brightens.",
"For a moment it looks like the rain is slowing, then it begins anew with renewed force.",
"The rain pummels you with large, heavy drops. You hear the rumble of thunder in the distance.",
"The wind is picking up, howling around you, throwing water droplets in your face. It's cold.",
"Bright fingers of lightning flash over the sky, moments later followed by a deafening rumble.",
"It rains so hard you can hardly see your hand in front of you. You'll soon be drenched to the bone.",
"Lightning strikes in several thundering bolts, striking the trees in the forest to your west.",
"You hear the distant howl of what sounds like some sort of dog or wolf.",
"Large clouds rush across the sky, throwing their load of rain over the world.")
class WeatherRoom(TutorialRoom):
"""
This should probably better be called a rainy room...
This sets up an outdoor room typeclass. At irregular intervals,
the effects of weather will show in the room. Outdoor rooms should
inherit from this.
"""
def at_object_creation(self):
"""
Called when object is first created.
We set up a ticker to update this room regularly.
Note that we could in principle also use a Script to manage
the ticking of the room; the TickerHandler works fine for
simple things like this though.
"""
super(WeatherRoom, self).at_object_creation()
# subscribe ourselves to a ticker to repeatedly call the hook
# "update_weather" on this object. The interval is randomized
# so as to not have all weather rooms update at the same time.
self.db.interval = random.randint(50, 70)
TICKER_HANDLER.add(interval=self.db.interval, callback=self.update_weather, idstring="tutorial")
# this is parsed by the 'tutorial' command on TutorialRooms.
self.db.tutorial_info = \
"This room has a Script running that has it echo a weather-related message at irregular intervals."
def update_weather(self, *args, **kwargs):
"""
Called by the tickerhandler at regular intervals. Even so, we
only update 20% of the time, picking a random weather message
when we do. The tickerhandler requires that this hook accepts
any arguments and keyword arguments (hence the *args, **kwargs
even though we don't actually use them in this example)
"""
if random.random() < 0.2:
# only update 20 % of the time
self.msg_contents("|w%s|n" % random.choice(WEATHER_STRINGS))
SUPERUSER_WARNING = "\nWARNING: You are playing as a superuser ({name}). Use the {quell} command to\n" \
"play without superuser privileges (many functions and puzzles ignore the \n" \
"presence of a superuser, making this mode useful for exploring things behind \n" \
"the scenes later).\n" \
# ------------------------------------------------------------
#
# Intro Room - unique room
#
# This room marks the start of the tutorial. It sets up properties on
# the player char that is needed for the tutorial.
#
# -------------------------------------------------------------
class IntroRoom(TutorialRoom):
"""
Intro room
properties to customize:
char_health - integer > 0 (default 20)
"""
def at_object_creation(self):
"""
Called when the room is first created.
"""
super(IntroRoom, self).at_object_creation()
self.db.tutorial_info = "The first room of the tutorial. " \
"This assigns the health Attribute to "\
"the account."
def at_object_receive(self, character, source_location):
"""
Assign properties on characters
"""
# setup character for the tutorial
health = self.db.char_health or 20
if character.has_account:
character.db.health = health
character.db.health_max = health
if character.is_superuser:
string = "-" * 78 + SUPERUSER_WARNING + "-" * 78
character.msg("|r%s|n" % string.format(name=character.key, quell="|w@quell|r"))
# -------------------------------------------------------------
#
# Bridge - unique room
#
# Defines a special west-eastward "bridge"-room, a large room that takes
# several steps to cross. It is complete with custom commands and a
# chance of falling off the bridge. This room has no regular exits,
# instead the exitings are handled by custom commands set on the account
# upon first entering the room.
#
# Since one can enter the bridge room from both ends, it is
# divided into five steps:
# westroom <- 0 1 2 3 4 -> eastroom
#
# -------------------------------------------------------------
class CmdEast(Command):
"""
Go eastwards across the bridge.
Tutorial info:
This command relies on the caller having two Attributes
(assigned by the room when entering):
- east_exit: a unique name or dbref to the room to go to
when exiting east.
- west_exit: a unique name or dbref to the room to go to
when exiting west.
The room must also have the following Attributes
- tutorial_bridge_posistion: the current position on
on the bridge, 0 - 4.
"""
key = "east"
aliases = ["e"]
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""move one step eastwards"""
caller = self.caller
bridge_step = min(5, caller.db.tutorial_bridge_position + 1)
if bridge_step > 4:
# we have reached the far east end of the bridge.
# Move to the east room.
eexit = search_object(self.obj.db.east_exit)
if eexit:
caller.move_to(eexit[0])
else:
caller.msg("No east exit was found for this room. Contact an admin.")
return
caller.db.tutorial_bridge_position = bridge_step
# since we are really in one room, we have to notify others
# in the room when we move.
caller.location.msg_contents("%s steps eastwards across the bridge." % caller.name, exclude=caller)
caller.execute_cmd("look")
# go back across the bridge
class CmdWest(Command):
"""
Go westwards across the bridge.
Tutorial info:
This command relies on the caller having two Attributes
(assigned by the room when entering):
- east_exit: a unique name or dbref to the room to go to
when exiting east.
- west_exit: a unique name or dbref to the room to go to
when exiting west.
The room must also have the following property:
- tutorial_bridge_posistion: the current position on
on the bridge, 0 - 4.
"""
key = "west"
aliases = ["w"]
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""move one step westwards"""
caller = self.caller
bridge_step = max(-1, caller.db.tutorial_bridge_position - 1)
if bridge_step < 0:
# we have reached the far west end of the bridge.
# Move to the west room.
wexit = search_object(self.obj.db.west_exit)
if wexit:
caller.move_to(wexit[0])
else:
caller.msg("No west exit was found for this room. Contact an admin.")
return
caller.db.tutorial_bridge_position = bridge_step
# since we are really in one room, we have to notify others
# in the room when we move.
caller.location.msg_contents("%s steps westwards across the bridge." % caller.name, exclude=caller)
caller.execute_cmd("look")
BRIDGE_POS_MESSAGES = ("You are standing |wvery close to the the bridge's western foundation|n."
" If you go west you will be back on solid ground ...",
"The bridge slopes precariously where it extends eastwards"
" towards the lowest point - the center point of the hang bridge.",
"You are |whalfways|n out on the unstable bridge.",
"The bridge slopes precariously where it extends westwards"
" towards the lowest point - the center point of the hang bridge.",
"You are standing |wvery close to the bridge's eastern foundation|n."
" If you go east you will be back on solid ground ...")
BRIDGE_MOODS = ("The bridge sways in the wind.", "The hanging bridge creaks dangerously.",
"You clasp the ropes firmly as the bridge sways and creaks under you.",
"From the castle you hear a distant howling sound, like that of a large dog or other beast.",
"The bridge creaks under your feet. Those planks does not seem very sturdy.",
"Far below you the ocean roars and throws its waves against the cliff,"
" as if trying its best to reach you.",
"Parts of the bridge come loose behind you, falling into the chasm far below!",
"A gust of wind causes the bridge to sway precariously.",
"Under your feet a plank comes loose, tumbling down. For a moment you dangle over the abyss ...",
"The section of rope you hold onto crumble in your hands,"
" parts of it breaking apart. You sway trying to regain balance.")
FALL_MESSAGE = "Suddenly the plank you stand on gives way under your feet! You fall!" \
"\nYou try to grab hold of an adjoining plank, but all you manage to do is to " \
"divert your fall westwards, towards the cliff face. This is going to hurt ... " \
"\n ... The world goes dark ...\n\n"
class CmdLookBridge(Command):
"""
looks around at the bridge.
Tutorial info:
This command assumes that the room has an Attribute
"fall_exit", a unique name or dbref to the place they end upp
if they fall off the bridge.
"""
key = 'look'
aliases = ["l"]
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""Looking around, including a chance to fall."""
caller = self.caller
bridge_position = self.caller.db.tutorial_bridge_position
# this command is defined on the room, so we get it through self.obj
location = self.obj
# randomize the look-echo
message = "|c%s|n\n%s\n%s" % (location.key,
BRIDGE_POS_MESSAGES[bridge_position],
random.choice(BRIDGE_MOODS))
chars = [obj for obj in self.obj.contents_get(exclude=caller) if obj.has_account]
if chars:
# we create the You see: message manually here
message += "\n You see: %s" % ", ".join("|c%s|n" % char.key for char in chars)
self.caller.msg(message)
# there is a chance that we fall if we are on the western or central
# part of the bridge.
if bridge_position < 3 and random.random() < 0.05 and not self.caller.is_superuser:
# we fall 5% of time.
fall_exit = search_object(self.obj.db.fall_exit)
if fall_exit:
self.caller.msg("|r%s|n" % FALL_MESSAGE)
self.caller.move_to(fall_exit[0], quiet=True)
# inform others on the bridge
self.obj.msg_contents("A plank gives way under %s's feet and "
"they fall from the bridge!" % self.caller.key)
# custom help command
class CmdBridgeHelp(Command):
"""
Overwritten help command while on the bridge.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
help_category = "Tutorial world"
def func(self):
"""Implements the command."""
string = "You are trying hard not to fall off the bridge ..." \
"\n\nWhat you can do is trying to cross the bridge |weast|n" \
" or try to get back to the mainland |wwest|n)."
self.caller.msg(string)
class BridgeCmdSet(CmdSet):
"""This groups the bridge commands. We will store it on the room."""
key = "Bridge commands"
priority = 1 # this gives it precedence over the normal look/help commands.
def at_cmdset_creation(self):
"""Called at first cmdset creation"""
self.add(CmdTutorial())
self.add(CmdEast())
self.add(CmdWest())
self.add(CmdLookBridge())
self.add(CmdBridgeHelp())
BRIDGE_WEATHER = (
"The rain intensifies, making the planks of the bridge even more slippery.",
"A gust of wind throws the rain right in your face.",
"The rainfall eases a bit and the sky momentarily brightens.",
"The bridge shakes under the thunder of a closeby thunder strike.",
"The rain pummels you with large, heavy drops. You hear the distinct howl of a large hound in the distance.",
"The wind is picking up, howling around you and causing the bridge to sway from side to side.",
"Some sort of large bird sweeps by overhead, giving off an eery screech. Soon it has disappeared in the gloom.",
"The bridge sways from side to side in the wind.",
"Below you a particularly large wave crashes into the rocks.",
"From the ruin you hear a distant, otherwordly howl. Or maybe it was just the wind.")
class BridgeRoom(WeatherRoom):
"""
The bridge room implements an unsafe bridge. It also enters the player into
a state where they get new commands so as to try to cross the bridge.
We want this to result in the account getting a special set of
commands related to crossing the bridge. The result is that it
will take several steps to cross it, despite it being represented
by only a single room.
We divide the bridge into steps:
self.db.west_exit - - | - - self.db.east_exit
0 1 2 3 4
The position is handled by a variable stored on the character
when entering and giving special move commands will
increase/decrease the counter until the bridge is crossed.
We also has self.db.fall_exit, which points to a gathering
location to end up if we happen to fall off the bridge (used by
the CmdLookBridge command).
"""
def at_object_creation(self):
"""Setups the room"""
# this will start the weather room's ticker and tell
# it to call update_weather regularly.
super(BridgeRoom, self).at_object_creation()
# this identifies the exits from the room (should be the command
# needed to leave through that exit). These are defaults, but you
# could of course also change them after the room has been created.
self.db.west_exit = "cliff"
self.db.east_exit = "gate"
self.db.fall_exit = "cliffledge"
# add the cmdset on the room.
self.cmdset.add_default(BridgeCmdSet)
# since the default Character's at_look() will access the room's
# return_description (this skips the cmdset) when
# first entering it, we need to explicitly turn off the room
# as a normal view target - once inside, our own look will
# handle all return messages.
self.locks.add("view:false()")
def update_weather(self, *args, **kwargs):
"""
This is called at irregular intervals and makes the passage
over the bridge a little more interesting.
"""
if random.random() < 80:
# send a message most of the time
self.msg_contents("|w%s|n" % random.choice(BRIDGE_WEATHER))
def at_object_receive(self, character, source_location):
"""
This hook is called by the engine whenever the player is moved
into this room.
"""
if character.has_account:
# we only run this if the entered object is indeed a player object.
# check so our east/west exits are correctly defined.
wexit = search_object(self.db.west_exit)
eexit = search_object(self.db.east_exit)
fexit = search_object(self.db.fall_exit)
if not (wexit and eexit and fexit):
character.msg("The bridge's exits are not properly configured. "
"Contact an admin. Forcing west-end placement.")
character.db.tutorial_bridge_position = 0
return
if source_location == eexit[0]:
# we assume we enter from the same room we will exit to
character.db.tutorial_bridge_position = 4
else:
# if not from the east, then from the west!
character.db.tutorial_bridge_position = 0
character.execute_cmd("look")
def at_object_leave(self, character, target_location):
"""
This is triggered when the player leaves the bridge room.
"""
if character.has_account:
# clean up the position attribute
del character.db.tutorial_bridge_position
# -------------------------------------------------------------------------------
#
# Dark Room - a room with states
#
# This room limits the movemenets of its denizens unless they carry an active
# LightSource object (LightSource is defined in
# tutorialworld.objects.LightSource)
#
# -------------------------------------------------------------------------------
DARK_MESSAGES = ("It is pitch black. You are likely to be eaten by a grue.",
"It's pitch black. You fumble around but cannot find anything.",
"You don't see a thing. You feel around, managing to bump your fingers hard against something. Ouch!",
"You don't see a thing! Blindly grasping the air around you, you find nothing.",
"It's totally dark here. You almost stumble over some un-evenness in the ground.",
"You are completely blind. For a moment you think you hear someone breathing nearby ... "
"\n ... surely you must be mistaken.",
"Blind, you think you find some sort of object on the ground, but it turns out to be just a stone.",
"Blind, you bump into a wall. The wall seems to be covered with some sort of vegetation,"
" but its too damp to burn.",
"You can't see anything, but the air is damp. It feels like you are far underground.")
ALREADY_LIGHTSOURCE = "You don't want to stumble around in blindness anymore. You already " \
"found what you need. Let's get light already!"
FOUND_LIGHTSOURCE = "Your fingers bump against a splinter of wood in a corner." \
" It smells of resin and seems dry enough to burn! " \
"You pick it up, holding it firmly. Now you just need to" \
" |wlight|n it using the flint and steel you carry with you."
class CmdLookDark(Command):
"""
Look around in darkness
Usage:
look
Look around in the darkness, trying
to find something.
"""
key = "look"
aliases = ["l", 'feel', 'search', 'feel around', 'fiddle']
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""
Implement the command.
This works both as a look and a search command; there is a
random chance of eventually finding a light source.
"""
caller = self.caller
if random.random() < 0.8:
# we don't find anything
caller.msg(random.choice(DARK_MESSAGES))
else:
# we could have found something!
if any(obj for obj in caller.contents if utils.inherits_from(obj, LightSource)):
# we already carry a LightSource object.
caller.msg(ALREADY_LIGHTSOURCE)
else:
# don't have a light source, create a new one.
create_object(LightSource, key="splinter", location=caller)
caller.msg(FOUND_LIGHTSOURCE)
class CmdDarkHelp(Command):
"""
Help command for the dark state.
"""
key = "help"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""
Replace the the help command with a not-so-useful help
"""
string = "Can't help you until you find some light! Try looking/feeling around for something to burn. " \
"You shouldn't give up even if you don't find anything right away."
self.caller.msg(string)
class CmdDarkNoMatch(Command):
"""
This is a system command. Commands with special keys are used to
override special sitations in the game. The CMD_NOMATCH is used
when the given command is not found in the current command set (it
replaces Evennia's default behavior or offering command
suggestions)
"""
key = syscmdkeys.CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"""Implements the command."""
self.caller.msg("Until you find some light, there's not much you can do. Try feeling around.")
class DarkCmdSet(CmdSet):
"""
Groups the commands of the dark room together. We also import the
default say command here so that players can still talk in the
darkness.
We give the cmdset the mergetype "Replace" to make sure it
completely replaces whichever command set it is merged onto
(usually the default cmdset)
"""
key = "darkroom_cmdset"
mergetype = "Replace"
priority = 2
def at_cmdset_creation(self):
"""populate the cmdset."""
self.add(CmdTutorial())
self.add(CmdLookDark())
self.add(CmdDarkHelp())
self.add(CmdDarkNoMatch())
self.add(default_cmds.CmdSay)
class DarkRoom(TutorialRoom):
"""
A dark room. This tries to start the DarkState script on all
objects entering. The script is responsible for making sure it is
valid (that is, that there is no light source shining in the room).
The is_lit Attribute is used to define if the room is currently lit
or not, so as to properly echo state changes.
Since this room (in the tutorial) is meant as a sort of catch-all,
we also make sure to heal characters ending up here, since they
may have been beaten up by the ghostly apparition at this point.
"""
def at_object_creation(self):
"""
Called when object is first created.
"""
super(DarkRoom, self).at_object_creation()
self.db.tutorial_info = "This is a room with custom command sets on itself."
# the room starts dark.
self.db.is_lit = False
self.cmdset.add(DarkCmdSet, permanent=True)
def at_init(self):
"""
Called when room is first recached (such as after a reload)
"""
self.check_light_state()
def _carries_light(self, obj):
"""
Checks if the given object carries anything that gives light.
Note that we do NOT look for a specific LightSource typeclass,
but for the Attribute is_giving_light - this makes it easy to
later add other types of light-giving items. We also accept
if there is a light-giving object in the room overall (like if
a splinter was dropped in the room)
"""
return obj.is_superuser or obj.db.is_giving_light or any(o for o in obj.contents if o.db.is_giving_light)
def _heal(self, character):
"""
Heal a character.
"""
health = character.db.health_max or 20
character.db.health = health
def check_light_state(self, exclude=None):
"""
This method checks if there are any light sources in the room.
If there isn't it makes sure to add the dark cmdset to all
characters in the room. It is called whenever characters enter
the room and also by the Light sources when they turn on.
Args:
exclude (Object): An object to not include in the light check.
"""
if any(self._carries_light(obj) for obj in self.contents if obj != exclude):
self.locks.add("view:all()")
self.cmdset.remove(DarkCmdSet)
self.db.is_lit = True
for char in (obj for obj in self.contents if obj.has_account):
# this won't do anything if it is already removed
char.msg("The room is lit up.")
else:
# noone is carrying light - darken the room
self.db.is_lit = False
self.locks.add("view:false()")
self.cmdset.add(DarkCmdSet, permanent=True)
for char in (obj for obj in self.contents if obj.has_account):
if char.is_superuser:
char.msg("You are Superuser, so you are not affected by the dark state.")
else:
# put players in darkness
char.msg("The room is completely dark.")
def at_object_receive(self, obj, source_location):
"""
Called when an object enters the room.
"""
if obj.has_account:
# a puppeted object, that is, a Character
self._heal(obj)
# in case the new guy carries light with them
self.check_light_state()
def at_object_leave(self, obj, target_location):
"""
In case people leave with the light, we make sure to clear the
DarkCmdSet if necessary. This also works if they are
teleported away.
"""
# since this hook is called while the object is still in the room,
# we exclude it from the light check, to ignore any light sources
# it may be carrying.
self.check_light_state(exclude=obj)
# -------------------------------------------------------------
#
# Teleport room - puzzles solution
#
# This is a sort of puzzle room that requires a certain
# attribute on the entering character to be the same as
# an attribute of the room. If not, the character will
# be teleported away to a target location. This is used
# by the Obelisk - grave chamber puzzle, where one must
# have looked at the obelisk to get an attribute set on
# oneself, and then pick the grave chamber with the
# matching imagery for this attribute.
#
# -------------------------------------------------------------
class TeleportRoom(TutorialRoom):
"""
Teleporter - puzzle room.
Important attributes (set at creation):
puzzle_key - which attr to look for on character
puzzle_value - what char.db.puzzle_key must be set to
success_teleport_to - where to teleport in case if success
success_teleport_msg - message to echo while teleporting to success
failure_teleport_to - where to teleport to in case of failure
failure_teleport_msg - message to echo while teleporting to failure
"""
def at_object_creation(self):
"""Called at first creation"""
super(TeleportRoom, self).at_object_creation()
# what character.db.puzzle_clue must be set to, to avoid teleportation.
self.db.puzzle_value = 1
# target of successful teleportation. Can be a dbref or a
# unique room name.
self.db.success_teleport_msg = "You are successful!"
self.db.success_teleport_to = "treasure room"
# the target of the failure teleportation.
self.db.failure_teleport_msg = "You fail!"
self.db.failure_teleport_to = "dark cell"
def at_object_receive(self, character, source_location):
"""
This hook is called by the engine whenever the player is moved into
this room.
"""
if not character.has_account:
# only act on player characters.
return
# determine if the puzzle is a success or not
is_success = str(character.db.puzzle_clue) == str(self.db.puzzle_value)
teleport_to = self.db.success_teleport_to if is_success else self.db.failure_teleport_to
# note that this returns a list
results = search_object(teleport_to)
if not results or len(results) > 1:
# we cannot move anywhere since no valid target was found.
character.msg("no valid teleport target for %s was found." % teleport_to)
return
if character.is_superuser:
# superusers don't get teleported
character.msg("Superuser block: You would have been teleported to %s." % results[0])
return
# perform the teleport
if is_success:
character.msg(self.db.success_teleport_msg)
else:
character.msg(self.db.failure_teleport_msg)
# teleport quietly to the new place
character.move_to(results[0], quiet=True, move_hooks=False)
# we have to call this manually since we turn off move_hooks
# - this is necessary to make the target dark room aware of an
# already carried light.
results[0].at_object_receive(character, self)
# -------------------------------------------------------------
#
# Outro room - unique exit room
#
# Cleans up the character from all tutorial-related properties.
#
# -------------------------------------------------------------
class OutroRoom(TutorialRoom):
"""
Outro room.
Called when exiting the tutorial, cleans the
character of tutorial-related attributes.
"""
def at_object_creation(self):
"""
Called when the room is first created.
"""
super(OutroRoom, self).at_object_creation()
self.db.tutorial_info = "The last room of the tutorial. " \
"This cleans up all temporary Attributes " \
"the tutorial may have assigned to the "\
"character."
def at_object_receive(self, character, source_location):
"""
Do cleanup.
"""
if character.has_account:
del character.db.health_max
del character.db.health
del character.db.last_climbed
del character.db.puzzle_clue
del character.db.combat_parry_mode
del character.db.tutorial_bridge_position
for obj in character.contents:
if obj.typeclass_path.startswith("evennia.contrib.tutorial_world"):
obj.delete()
character.tags.clear(category="tutorial_world")
|
feend78/evennia
|
evennia/contrib/tutorial_world/rooms.py
|
Python
|
bsd-3-clause
| 40,655 | 0.001746 |
#!/usr/bin/env python
import os, Queue
import sys
from time import sleep
from threading import Thread
from libs.qemu import QemuInstance, UARTLineParser
# External
if len(sys.argv) > 1:
print "ARGS:", str(sys.argv)
sys.path.append(os.path.dirname( sys.argv[1] ))
########################################################################
print("=== Starting RPiEmu v0.5 ===")
# Qemu python wrapper that connects to the TCP server
rpi = QemuInstance()
rpi.start()
#####################################################
from models.totumduino import TotumDuino
from models.fabtotum import FABTotum
# FABTotum model
ft = FABTotum()
# Totumduino model
td = TotumDuino(ft)
# Start a TD thread
td.run()
print("* Totumduino thread started")
# UART line parser
parser = UARTLineParser(qemu=rpi, line_handler=td.uart0_transfer)
parser.start()
parser.loop()
# Finish the TD thread
td.finish()
|
Colibri-Embedded/FABEmu
|
examples/rpiemu.py
|
Python
|
gpl-2.0
| 909 | 0.006601 |
# -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.board
import random
import pygame
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,5,10)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,13,11)
def create_game_objects(self, level = 1):
self.board.decolorable = False
self.board.draw_grid = False
color = (234,218,225)
self.color = color
self.grey = (200,200,200)
self.font_hl = (100,0,250)
self.task_str_color = ex.hsv_to_rgb(200,200,230)
self.activated_col = self.font_hl
white = (255,255,255)
self.bg_col = white
self.top_line = 3#self.board.scale//2
if self.mainloop.scheme is not None:
if self.mainloop.scheme.dark:
self.bg_col = (0,0,0)
self.level.games_per_lvl = 5
if self.level.lvl == 1:
rngs = [20,50,10,19]
self.level.games_per_lvl = 3
elif self.level.lvl == 2:
rngs = [50,100,20,49]
self.level.games_per_lvl = 3
elif self.level.lvl == 3:
rngs = [100,250,50,99]
self.level.games_per_lvl = 3
elif self.level.lvl == 4:
rngs = [250,500,100,249]
elif self.level.lvl == 5:
rngs = [500,1000,100,499]
elif self.level.lvl == 6:
rngs = [700,1500,250,699]
elif self.level.lvl == 7:
rngs = [1500,2500,500,1499]
elif self.level.lvl == 8:
rngs = [2500,5000,1500,2499]
elif self.level.lvl == 9:
rngs = [5000,10000,2500,4999]
elif self.level.lvl == 10:
rngs = [10000,84999,5000,9999]
data = [39,18]
self.points = self.level.lvl
#stretch width to fit the screen size
x_count = self.get_x_count(data[1],even=None)
if x_count > 39:
data[0] = x_count
self.data = data
self.vis_buttons = [1,1,1,1,1,1,1,0,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0],data[1])
scale = self.layout.scale
self.board.level_start(data[0],data[1],scale)
self.n1 = random.randrange(rngs[0],rngs[1])
self.n2 = random.randrange(rngs[2],rngs[3])
self.sumn1n2 = self.n1-self.n2
self.n1s = str(self.n1)
self.n2s = str(self.n2)
self.sumn1n2s = str(self.sumn1n2)
self.n1sl = len(self.n1s)
self.n2sl = len(self.n2s)
self.sumn1n2sl =len(self.sumn1n2s)
self.cursor_pos = 0
self.correct = False
self.carry1l = []
self.carry10l = []
self.resultl = []
self.nums1l = []
self.nums2l = []
self.ship_id = 0
self.digits = ["0","1","2","3","4","5","6","7","8","9"]
if self.lang.lang == 'el':
qm = ";"
else:
qm = "?"
question = self.n1s + " - " + self.n2s + " = " + qm
self.board.add_unit(1,0,data[0]-3-(max(self.n1sl,self.n2sl))*3 ,3,classes.board.Label,question,self.bg_col,"",21)
self.board.units[-1].align = 1
#borrow 1
for i in range(self.n1sl - 1):
self.board.add_unit(data[0]-6-i*3,0,1,1,classes.board.Label,"-",self.bg_col,"",0)
self.board.add_unit(data[0]-5-i*3,0,1,1,classes.board.Letter,"",self.bg_col,"",1)
self.carry1l.append(self.board.ships[-1])
self.carry1l[-1].set_outline(self.grey, 2)
self.carry1l[-1].pos_id = i
self.board.units[-1].align = 2
#add 10
for i in range(self.n1sl - 1):
self.board.add_unit(data[0]-3-i*3,1,1,1,classes.board.Label,"+",self.bg_col,"",0)
self.board.add_unit(data[0]-2-i*3,1,1,1,classes.board.Letter,"",self.bg_col,"",1)
self.carry10l.append(self.board.ships[-1])
self.carry10l[-1].set_outline(self.grey, 2)
self.carry10l[-1].pos_id = i
self.board.units[-1].align = 2
self.board.add_unit(data[0]-2-self.n1sl*3,0,2,1,classes.board.Label,"-1",self.bg_col,"",0)
self.board.add_unit(data[0]-2-self.n1sl*3,1,2,1,classes.board.Label,"+10",self.bg_col,"",0)
#first number
for i in range(self.n1sl):
self.board.add_unit(data[0]-3-i*3,2,3,3,classes.board.Label,self.n1s[-(i+1)],self.bg_col,"",21)
self.nums1l.append(self.board.units[-1])
self.nums1l[-1].font_color = self.grey
self.nums1l[-1].pos_id = i
#second number
i = 0
for i in range(self.n2sl):
self.board.add_unit(data[0]-3-i*3,5,3,3,classes.board.Label,self.n2s[-(i+1)],self.bg_col,"",21)
self.nums2l.append(self.board.units[-1])
self.nums2l[-1].pos_id = i
i += 1
self.board.add_unit(data[0]-3-i*3,5,3,3,classes.board.Label,"-",self.bg_col,"",21)
self.plus_label = self.board.units[-1]
#line
#line = "―" * (self.sumn1n2sl*2)
self.board.add_unit(data[0]-self.sumn1n2sl*3,8,self.sumn1n2sl*3,1,classes.board.Label,"",self.bg_col,"",21)
self.draw_hori_line(self.board.units[-1])
#self.board.units[-1].text_wrap = False
#result
for i in range(self.sumn1n2sl):
self.board.add_unit(data[0]-3-i*3,9,3,3,classes.board.Letter,"",self.bg_col,"",21)
self.resultl.append(self.board.ships[-1])
self.resultl[-1].set_outline(self.grey, 2)
self.resultl[-1].pos_id = i
self.resultl[0].set_outline(self.activated_col, 3)
self.home_square = self.resultl[0]
self.board.active_ship = self.home_square.unit_id
self.activable_count = len(self.board.ships)
for each in self.board.ships:
each.immobilize()
self.deactivate_colors()
self.reactivate_colors()
def draw_hori_line(self,unit):
w = unit.grid_w*self.board.scale
h = unit.grid_h*self.board.scale
center = [w//2,h//2]
canv = pygame.Surface([w, h-1])
canv.fill(self.bg_col)
pygame.draw.line(canv,self.grey,(0,self.top_line),(w,self.top_line),3)
unit.painting = canv.copy()
unit.update_me = True
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
if self.show_msg == False:
if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
self.home_sqare_switch(self.board.active_ship+1)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
self.home_sqare_switch(self.board.active_ship-1)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
if self.home_square in self.resultl:
self.home_sqare_switch(self.board.active_ship-self.n1sl+1)
elif self.home_square in self.carry10l:
self.home_sqare_switch(self.board.active_ship-self.n1sl+1)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.home_sqare_switch(self.board.active_ship+self.n1sl-1)
elif event.type == pygame.KEYDOWN and event.key != pygame.K_RETURN and not self.correct:
lhv = len(self.home_square.value)
self.changed_since_check = True
if event.key == pygame.K_BACKSPACE:
if lhv > 0:
self.home_square.value = self.home_square.value[0:lhv-1]
else:
char = event.unicode
if (len(char)>0 and lhv < 3 and char in self.digits):
if self.home_square in self.resultl:
if lhv == 1:
s = self.home_square.value + char
if s[0] == "0":
self.home_square.value = char
else:
n = int(s)
if n < 20:
self.home_square.value = str(n % 10)
else:
self.home_square.value = char
else:
self.home_square.value = char
elif self.home_square in self.carry1l:
if char == "1":
self.home_square.value = "1"
self.carry10l[self.home_square.pos_id].value = "10"
else:
self.home_square.value = ""
self.carry10l[self.home_square.pos_id].value = ""
self.carry10l[self.home_square.pos_id].update_me = True
elif self.home_square in self.carry10l:
if lhv == 0:
if char == "1":
self.home_square.value = "10"
elif lhv == 1:
if char == "0":
self.home_square.value = "10"
else:
self.home_square.value = ""
else:
if char == "1":
self.home_square.value = "10"
else:
self.home_square.value = ""
if self.home_square.value == "10":
self.carry1l[self.home_square.pos_id].value = "1"
else:
self.carry1l[self.home_square.pos_id].value = ""
self.carry1l[self.home_square.pos_id].update_me = True
self.home_square.update_me = True
self.mainloop.redraw_needed[0] = True
elif event.type == pygame.MOUSEBUTTONUP:
self.home_sqare_switch(self.board.active_ship)
def home_sqare_switch(self, activate):
if activate < 0 or activate > self.activable_count:
activate = self.activable_count - self.sumn1n2sl
if activate >= 0 and activate < self.activable_count:
self.board.active_ship = activate
self.home_square.update_me = True
if self.board.active_ship >= 0:
self.home_square.set_outline(self.grey, 2)
self.deactivate_colors()
self.home_square = self.board.ships[self.board.active_ship]
self.home_square.set_outline(self.activated_col, 3)
self.reactivate_colors()
self.home_square.font_color = self.font_hl
self.home_square.update_me = True
self.mainloop.redraw_needed[0] = True
def deactivate_colors(self):
for each in self.board.ships:
each.font_color = self.grey
each.update_me = True
for each in self.board.units:
each.font_color = self.grey
each.update_me = True
def reactivate_colors(self):
self.plus_label.font_color = self.font_hl
self.board.units[0].font_color = self.task_str_color
if self.home_square in self.carry1l:
self.carry10l[self.home_square.pos_id].font_color = self.font_hl
elif self.home_square in self.carry10l:
self.carry1l[self.home_square.pos_id].font_color = self.font_hl
elif self.home_square in self.resultl:
if self.home_square.pos_id > 0:
self.carry1l[self.home_square.pos_id-1].font_color = self.font_hl
if self.home_square.pos_id >= 0 and self.home_square.pos_id < self.n1sl-1:
self.carry10l[self.home_square.pos_id].font_color = self.font_hl
if (self.n1sl > self.home_square.pos_id):
self.nums1l[self.home_square.pos_id].font_color = self.font_hl
if (self.n2sl > self.home_square.pos_id):
self.nums2l[self.home_square.pos_id].font_color = self.font_hl
self.resultl[self.home_square.pos_id].font_color = self.font_hl
def update(self,game):
game.fill(self.color)
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
s = ""
for each in reversed(self.resultl):
s += each.value
if s == self.sumn1n2s:
self.update_score(self.points)
self.level.next_board()
else:
if self.points > 0:
self.points -= 1
self.level.try_again()
|
OriHoch/pysiogame
|
game_boards/game070.py
|
Python
|
gpl-3.0
| 12,968 | 0.018124 |
import json
import random
import time
import re
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import BytesIO as StringIO # Python 3
from datetime import date, datetime, timedelta
import requests
from requests.adapters import HTTPAdapter
try:
from requests.packages.urllib3.poolmanager import PoolManager
except:
from urllib3.poolmanager import PoolManager
import xmltodict
try:
import pandas as pd
except ImportError:
pd = None
def assert_pd():
# Common function to check if pd is installed
if not pd:
raise ImportError(
'transactions data requires pandas; '
'please pip install pandas'
)
DATE_FIELDS = [
'addAccountDate',
'closeDate',
'fiLastUpdated',
'lastUpdated',
]
class MintHTTPSAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, **kwargs):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize, **kwargs)
class Mint(requests.Session):
json_headers = {'accept': 'application/json'}
request_id = 42 # magic number? random number?
token = None
def __init__(self, email=None, password=None):
requests.Session.__init__(self)
self.mount('https://', MintHTTPSAdapter())
if email and password:
self.login_and_get_token(email, password)
@classmethod
def create(cls, email, password): # {{{
mint = Mint()
mint.login_and_get_token(email, password)
return mint
@classmethod
def get_rnd(cls): # {{{
return (str(int(time.mktime(datetime.now().timetuple())))
+ str(random.randrange(999)).zfill(3))
@classmethod
def parse_float(cls, string): # {{{
for bad_char in ['$', ',', '%']:
string = string.replace(bad_char, '')
try:
return float(string)
except ValueError:
return None
def request_and_check(self, url, method='get',
expected_content_type=None, **kwargs):
"""Performs a request, and checks that the status is OK, and that the
content-type matches expectations.
Args:
url: URL to request
method: either 'get' or 'post'
expected_content_type: prefix to match response content-type against
**kwargs: passed to the request method directly.
Raises:
RuntimeError if status_code does not match.
"""
assert (method == 'get' or method == 'post')
result = getattr(self, method)(url, **kwargs)
if result.status_code != requests.codes.ok:
raise RuntimeError('Error requesting %r, status = %d' %
(url, result.status_code))
if expected_content_type is not None:
content_type = result.headers.get('content-type', '')
if not re.match(expected_content_type, content_type):
raise RuntimeError(
'Error requesting %r, content type %r does not match %r' %
(url, content_type, expected_content_type))
return result
def login_and_get_token(self, email, password): # {{{
# 0: Check to see if we're already logged in.
if self.token is not None:
return
# 1: Login.
login_url = 'https://wwws.mint.com/login.event?task=L'
try:
self.request_and_check(login_url)
except RuntimeError:
raise Exception('Failed to load Mint login page')
data = {'username': email}
response = self.post('https://wwws.mint.com/getUserPod.xevent',
data=data, headers=self.json_headers).text
data = {'username': email, 'password': password, 'task': 'L',
'browser': 'firefox', 'browserVersion': '27', 'os': 'linux'}
response = self.post('https://wwws.mint.com/loginUserSubmit.xevent',
data=data, headers=self.json_headers).text
if 'token' not in response:
raise Exception('Mint.com login failed[1]')
response = json.loads(response)
if not response['sUser']['token']:
raise Exception('Mint.com login failed[2]')
# 2: Grab token.
self.token = response['sUser']['token']
def get_accounts(self, get_detail=False): # {{{
# Issue service request.
req_id = str(self.request_id)
input = {
'args': {
'types': [
'BANK',
'CREDIT',
'INVESTMENT',
'LOAN',
'MORTGAGE',
'OTHER_PROPERTY',
'REAL_ESTATE',
'VEHICLE',
'UNCLASSIFIED'
]
},
'id': req_id,
'service': 'MintAccountService',
'task': 'getAccountsSorted'
# 'task': 'getAccountsSortedByBalanceDescending'
}
data = {'input': json.dumps([input])}
account_data_url = ('https://wwws.mint.com/bundledServiceController.'
'xevent?legacy=false&token=' + self.token)
response = self.post(account_data_url, data=data,
headers=self.json_headers).text
self.request_id = self.request_id + 1
if req_id not in response:
raise Exception('Could not parse account data: ' + response)
# Parse the request
response = json.loads(response)
accounts = response['response'][req_id]['response']
# Return datetime objects for dates
for account in accounts:
for df in DATE_FIELDS:
if df in account:
# Convert from javascript timestamp to unix timestamp
# http://stackoverflow.com/a/9744811/5026
try:
ts = account[df] / 1e3
except TypeError:
# returned data is not a number, don't parse
continue
account[df + 'InDate'] = datetime.fromtimestamp(ts)
if get_detail:
accounts = self.populate_extended_account_detail(accounts)
return accounts
def set_user_property(self, name, value):
url = ('https://wwws.mint.com/bundledServiceController.xevent?' +
'legacy=false&token=' + self.token)
req_id = str(self.request_id)
self.request_id += 1
result = self.post(
url,
data={'input': json.dumps([{'args': {'propertyName': name,
'propertyValue': value},
'service': 'MintUserService',
'task': 'setUserProperty',
'id': req_id}])},
headers=self.json_headers)
if result.status_code != 200:
raise Exception('Received HTTP error %d' % result.status_code)
response = result.text
if req_id not in response:
raise Exception("Could not parse response to set_user_property")
def _dateconvert(self, dateraw):
# Converts dates from json data
cy = datetime.isocalendar(date.today())[0]
try:
newdate = datetime.strptime(dateraw + str(cy), '%b %d%Y')
except:
newdate = datetime.strptime(dateraw, '%m/%d/%y')
return newdate
def _debit_credit(self, row):
# Reverses credit balances
dic = {False: -1, True: 1}
return float(row['amount'][1:].replace(',', '')) * dic[row['isDebit']]
def get_transactions_json(self, include_investment=False,
skip_duplicates=False, start_date=None):
"""Returns the raw JSON transaction data as downloaded from Mint. The JSON
transaction data includes some additional information missing from the
CSV data, such as whether the transaction is pending or completed, but
leaves off the year for current year transactions.
Warning: In order to reliably include or exclude duplicates, it is
necessary to change the user account property 'hide_duplicates' to the
appropriate value. This affects what is displayed in the web
interface. Note that the CSV transactions never exclude duplicates.
"""
# Warning: This is a global property for the user that we are changing.
self.set_user_property('hide_duplicates',
'T' if skip_duplicates else 'F')
# Converts the start date into datetime format - must be mm/dd/yy
try:
start_date = datetime.strptime(start_date, '%m/%d/%y')
except:
start_date = None
all_txns = []
offset = 0
# Mint only returns some of the transactions at once. To get all of
# them, we have to keep asking for more until we reach the end.
while 1:
# Specifying accountId=0 causes Mint to return investment
# transactions as well. Otherwise they are skipped by
# default.
url = (
'https://wwws.mint.com/getJsonData.xevent?' +
'queryNew=&offset={offset}&comparableType=8&' +
'rnd={rnd}&{query_options}').format(
offset=offset,
rnd=Mint.get_rnd(),
query_options=(
'accountId=0&task=transactions' if include_investment
else 'task=transactions,txnfilters&filterType=cash'))
result = self.request_and_check(
url, headers=self.json_headers,
expected_content_type='text/json|application/json')
data = json.loads(result.text)
txns = data['set'][0].get('data', [])
if start_date:
last_dt = self._dateconvert(txns[-1]['odate'])
if last_dt < start_date:
keep_txns = [
item for item in txns
if self._dateconvert(item['odate']) >= start_date]
all_txns.extend(keep_txns)
break
if not txns:
break
all_txns.extend(txns)
offset += len(txns)
return all_txns
def get_detailed_transactions(self, include_investment=False,
skip_duplicates=False,
remove_pending=True,
start_date=None):
"""Returns the JSON transaction data as a DataFrame, and converts
current year dates and prior year dates into consistent datetime
format, and reverses credit activity.
Note: start_date must be in format mm/dd/yy. If pulls take too long,
use a more recent start date. See json explanations of
include_investment and skip_duplicates.
Also note: Mint includes pending transactions, however these sometimes
change dates/amounts after the transactions post. They have been
removed by default in this pull, but can be included by changing
remove_pending to False
"""
assert_pd()
result = self.get_transactions_json(include_investment,
skip_duplicates, start_date)
df = pd.DataFrame(result)
df['odate'] = df['odate'].apply(self._dateconvert)
if remove_pending:
df = df[~df.isPending]
df.reset_index(drop=True, inplace=True)
df.amount = df.apply(self._debit_credit, axis=1)
return df
def get_transactions_csv(self, include_investment=False):
"""Returns the raw CSV transaction data as downloaded from Mint.
If include_investment == True, also includes transactions that Mint
classifies as investment-related. You may find that the investment
transaction data is not sufficiently detailed to actually be useful,
however.
"""
# Specifying accountId=0 causes Mint to return investment
# transactions as well. Otherwise they are skipped by
# default.
result = self.request_and_check(
'https://wwws.mint.com/transactionDownload.event' +
('?accountId=0' if include_investment else ''),
headers=self.headers,
expected_content_type='text/csv'
)
return result.content
def get_net_worth(self, account_data=None):
if account_data is None:
account_data = self.get_accounts()
# account types in this list will be subtracted
negative_accounts = ['loan', 'loans', 'credit']
try:
net_worth = long()
except NameError:
net_worth = 0
# iterate over accounts and add or subtract account balances
for account in [a for a in account_data if a['isActive']]:
current_balance = account['currentBalance']
if account['accountType'] in negative_accounts:
net_worth -= current_balance
else:
net_worth += current_balance
return net_worth
def get_transactions(self):
"""Returns the transaction data as a Pandas DataFrame.
"""
assert_pd()
s = StringIO(self.get_transactions_csv())
s.seek(0)
df = pd.read_csv(s, parse_dates=['Date'])
df.columns = [c.lower().replace(' ', '_') for c in df.columns]
df.category = (df.category.str.lower()
.replace('uncategorized', pd.np.nan))
return df
def populate_extended_account_detail(self, accounts): # {{{
# I can't find any way to retrieve this information other than by
# doing this stupid one-call-per-account to listTransactions.xevent
# and parsing the HTML snippet :(
for account in accounts:
headers = self.json_headers
headers['Referer'] = ('https://wwws.mint.com/transaction.event?'
'accountId=' + str(account['id']))
list_txn_url = ('https://wwws.mint.com/listTransaction.xevent?'
'accountId=' + str(account['id']) + '&queryNew=&'
'offset=0&comparableType=8&acctChanged=T&rnd=' +
Mint.get_rnd())
response = json.loads(self.get(list_txn_url, headers=headers).text)
xml = '<div>' + response['accountHeader'] + '</div>'
xml = xml.replace('–', '-')
xml = xmltodict.parse(xml)
account['availableMoney'] = None
account['totalFees'] = None
account['totalCredit'] = None
account['nextPaymentAmount'] = None
account['nextPaymentDate'] = None
xml = xml['div']['div'][1]['table']
if 'tbody' not in xml:
continue
xml = xml['tbody']
table_type = xml['@id']
xml = xml['tr'][1]['td']
if table_type == 'account-table-bank':
account['availableMoney'] = Mint.parse_float(xml[1]['#text'])
account['totalFees'] = Mint.parse_float(xml[3]['a']['#text'])
if (account['interestRate'] is None):
account['interestRate'] = (
Mint.parse_float(xml[2]['#text']) / 100.0
)
elif table_type == 'account-table-credit':
account['availableMoney'] = Mint.parse_float(xml[1]['#text'])
account['totalCredit'] = Mint.parse_float(xml[2]['#text'])
account['totalFees'] = Mint.parse_float(xml[4]['a']['#text'])
if account['interestRate'] is None:
account['interestRate'] = (
Mint.parse_float(xml[3]['#text']) / 100.0
)
elif table_type == 'account-table-loan':
account['nextPaymentAmount'] = (
Mint.parse_float(xml[1]['#text'])
)
account['nextPaymentDate'] = xml[2].get('#text', None)
elif table_type == 'account-type-investment':
account['totalFees'] = Mint.parse_float(xml[2]['a']['#text'])
return accounts
def get_categories(self): # {{{
# Get category metadata.
req_id = str(self.request_id)
data = {
'input': json.dumps([{
'args': {
'excludedCategories': [],
'sortByPrecedence': False,
'categoryTypeFilter': 'FREE'
},
'id': req_id,
'service': 'MintCategoryService',
'task': 'getCategoryTreeDto2'
}])
}
cat_url = ('https://wwws.mint.com/bundledServiceController.xevent'
'?legacy=false&token=' + self.token)
response = self.post(cat_url, data=data,
headers=self.json_headers).text
self.request_id = self.request_id + 1
if req_id not in response:
raise Exception('Could not parse category data: "'
+ response + '"')
response = json.loads(response)
response = response['response'][req_id]['response']
# Build category list
categories = {}
for category in response['allCategories']:
if category['parentId'] == 0:
continue
categories[category['id']] = category
return categories
def get_budgets(self): # {{{
# Get categories
categories = self.get_categories()
# Issue request for budget utilization
today = date.today()
this_month = date(today.year, today.month, 1)
last_year = this_month - timedelta(days=330)
this_month = (str(this_month.month).zfill(2) +
'/01/' + str(this_month.year))
last_year = (str(last_year.month).zfill(2) +
'/01/' + str(last_year.year))
response = json.loads(self.get(
'https://wwws.mint.com/getBudget.xevent?startDate=' + last_year +
'&endDate=' + this_month + '&rnd=' + Mint.get_rnd(),
headers=self.json_headers
).text)
# Make the skeleton return structure
budgets = {
'income': response['data']['income'][
str(max(map(int, response['data']['income'].keys())))
]['bu'],
'spend': response['data']['spending'][
str(max(map(int, response['data']['income'].keys())))
]['bu']
}
# Fill in the return structure
for direction in budgets.keys():
for budget in budgets[direction]:
budget['cat'] = self.get_category_from_id(
budget['cat'],
categories
)
return budgets
def get_category_from_id(self, cid, categories):
if cid == 0:
return 'Uncategorized'
for i in categories:
if categories[i]['id'] == cid:
return categories[i]['name']
if 'children' in categories[i]:
for j in categories[i]['children']:
if categories[i][j]['id'] == cid:
return categories[i][j]['name']
return 'Unknown'
def initiate_account_refresh(self):
# Submit refresh request.
data = {
'token': self.token
}
self.post('https://wwws.mint.com/refreshFILogins.xevent',
data=data, headers=self.json_headers)
def get_accounts(email, password, get_detail=False):
mint = Mint.create(email, password)
return mint.get_accounts(get_detail=get_detail)
def get_net_worth(email, password):
mint = Mint.create(email, password)
account_data = mint.get_accounts()
return mint.get_net_worth(account_data)
def make_accounts_presentable(accounts):
for account in accounts:
for k, v in account.items():
if isinstance(v, datetime):
account[k] = repr(v)
return accounts
def print_accounts(accounts):
print(json.dumps(make_accounts_presentable(accounts), indent=2))
def get_budgets(email, password):
mint = Mint.create(email, password)
return mint.get_budgets()
def initiate_account_refresh(email, password):
mint = Mint.create(email, password)
return mint.initiate_account_refresh()
def main():
import getpass
import argparse
try:
import keyring
except ImportError:
keyring = None
# Parse command-line arguments {{{
cmdline = argparse.ArgumentParser()
cmdline.add_argument('email', nargs='?', default=None,
help='The e-mail address for your Mint.com account')
cmdline.add_argument('password', nargs='?', default=None,
help='The password for your Mint.com account')
cmdline.add_argument('--accounts', action='store_true', dest='accounts',
default=False, help='Retrieve account information'
' (default if nothing else is specified)')
cmdline.add_argument('--budgets', action='store_true', dest='budgets',
default=False, help='Retrieve budget information')
cmdline.add_argument('--net-worth', action='store_true', dest='net_worth',
default=False, help='Retrieve net worth information')
cmdline.add_argument('--extended-accounts', action='store_true',
dest='accounts_ext', default=False,
help='Retrieve extended account information (slower, '
'implies --accounts)')
cmdline.add_argument('--transactions', '-t', action='store_true',
default=False, help='Retrieve transactions')
cmdline.add_argument('--filename', '-f', help='write results to file. can '
'be {csv,json} format. default is to write to '
'stdout.')
cmdline.add_argument('--keyring', action='store_true',
help='Use OS keyring for storing password '
'information')
options = cmdline.parse_args()
if options.keyring and not keyring:
cmdline.error('--keyring can only be used if the `keyring` '
'library is installed.')
try:
from __builtin__ import raw_input as input
except NameError:
pass
# Try to get the e-mail and password from the arguments
email = options.email
password = options.password
if not email:
# If the user did not provide an e-mail, prompt for it
email = input("Mint e-mail: ")
if keyring and not password:
# If the keyring module is installed and we don't yet have
# a password, try prompting for it
password = keyring.get_password('mintapi', email)
if not password:
# If we still don't have a password, prompt for it
password = getpass.getpass("Mint password: ")
if options.keyring:
# If keyring option is specified, save the password in the keyring
keyring.set_password('mintapi', email, password)
if options.accounts_ext:
options.accounts = True
if not any([options.accounts, options.budgets, options.transactions,
options.net_worth]):
options.accounts = True
mint = Mint.create(email, password)
data = None
if options.accounts and options.budgets:
try:
accounts = make_accounts_presentable(
mint.get_accounts(get_detail=options.accounts_ext)
)
except:
accounts = None
try:
budgets = mint.get_budgets()
except:
budgets = None
data = {'accounts': accounts, 'budgets': budgets}
elif options.budgets:
try:
data = mint.get_budgets()
except:
data = None
elif options.accounts:
try:
data = make_accounts_presentable(mint.get_accounts(
get_detail=options.accounts_ext)
)
except:
data = None
elif options.transactions:
data = mint.get_transactions()
elif options.net_worth:
data = mint.get_net_worth()
# output the data
if options.transactions:
if options.filename is None:
print(data.to_json(orient='records'))
elif options.filename.endswith('.csv'):
data.to_csv(options.filename, index=False)
elif options.filename.endswith('.json'):
data.to_json(options.filename, orient='records')
else:
raise ValueError('file extension must be either .csv or .json')
else:
if options.filename is None:
print(json.dumps(data, indent=2))
elif options.filename.endswith('.json'):
with open(options.filename, 'w+') as f:
json.dump(data, f, indent=2)
else:
raise ValueError('file type must be json for non-transaction data')
if __name__ == '__main__':
main()
|
jbms/mintapi
|
mintapi/api.py
|
Python
|
mit
| 25,488 | 0.000471 |
# -*- coding: UTF-8 -*-
# Copyright 2012-2015 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""See :doc:`/specs/isip`.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from lino.api import ad
class Plugin(ad.Plugin):
"See :class:`lino.core.plugin.Plugin`."
verbose_name = _("ISIP")
needs_plugins = ['lino_welfare.modlib.integ']
|
lsaffre/lino-welfare
|
lino_welfare/modlib/isip/__init__.py
|
Python
|
agpl-3.0
| 413 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-05 03:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
tyagow/AdvancingTheBlog
|
src/posts/migrations/0002_post_user.py
|
Python
|
mit
| 633 | 0.00158 |
from outsourcer import Code
from . import utils
from .base import Expression
from .constants import BREAK, POS, RESULT, STATUS
class Sep(Expression):
num_blocks = 2
def __init__(
self,
expr,
separator,
discard_separators=True,
allow_trailer=False,
allow_empty=True,
):
self.expr = expr
self.separator = separator
self.discard_separators = discard_separators
self.allow_trailer = allow_trailer
self.allow_empty = allow_empty
def __str__(self):
op = '/?' if self.allow_trailer else '//'
return utils.infix_str(self.expr, op, self.separator)
def operand_string(self):
return f'({self})'
def always_succeeds(self):
return self.allow_empty
def _compile(self, out):
staging = out.var('staging', [])
checkpoint = out.var('checkpoint', POS)
with out.WHILE(True):
with utils.if_fails(out, self.expr):
# If we're not discarding separators, and if we're also not
# allowing a trailing separator, then we need to pop the last
# separator off of our list.
if not self.discard_separators and not self.allow_trailer:
# But only pop if staging is not empty.
with out.IF(staging):
out += staging.pop()
out += BREAK
out += staging.append(RESULT)
out += checkpoint << POS
with utils.if_fails(out, self.separator):
out += BREAK
if not self.discard_separators:
out += staging.append(RESULT)
if self.allow_trailer:
out += checkpoint << POS
success = [
RESULT << staging,
POS << checkpoint,
STATUS << True,
]
if self.allow_empty:
out.extend(success)
else:
with out.IF(staging):
out.extend(success)
|
jvs/sourcer
|
sourcer/expressions/sep.py
|
Python
|
mit
| 2,062 | 0.000485 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.